code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_ALPHA_DSPUTIL_ALPHA_H #define AVCODEC_ALPHA_DSPUTIL_ALPHA_H #include "libavcodec/dsputil.h" void ff_simple_idct_axp(DCTELEM *block); void ff_simple_idct_put_axp(uint8_t *dest, int line_size, DCTELEM *block); void ff_simple_idct_add_axp(uint8_t *dest, int line_size, DCTELEM *block); void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels, int line_size, int h); void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, int line_size); void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, int line_size); extern void (*put_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, int line_size); extern void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, int line_size); void get_pixels_mvi(DCTELEM *restrict block, const uint8_t *restrict pixels, int line_size); void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride); int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int pix_abs16x16_mvi_asm(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); #endif /* AVCODEC_ALPHA_DSPUTIL_ALPHA_H */
123linslouis-android-video-cutter
jni/libavcodec/alpha/dsputil_alpha.h
C
asf20
2,379
/* * Simple IDCT (Alpha optimized) * * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> * * based upon some outcommented C code from mpeg2dec (idct_mmx.c * written by Aaron Holtzman <aholtzma@ess.engr.uvic.ca>) * * Alpha optimizations by Måns Rullgård <mans@mansr.com> * and Falk Hueffner <falk@debian.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "dsputil_alpha.h" #include "asm.h" // cos(i * M_PI / 16) * sqrt(2) * (1 << 14) // W4 is actually exactly 16384, but using 16383 works around // accumulating rounding errors for some encoders #define W1 ((int_fast32_t) 22725) #define W2 ((int_fast32_t) 21407) #define W3 ((int_fast32_t) 19266) #define W4 ((int_fast32_t) 16383) #define W5 ((int_fast32_t) 12873) #define W6 ((int_fast32_t) 8867) #define W7 ((int_fast32_t) 4520) #define ROW_SHIFT 11 #define COL_SHIFT 20 /* 0: all entries 0, 1: only first entry nonzero, 2: otherwise */ static inline int idct_row(DCTELEM *row) { int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3, t; uint64_t l, r, t2; l = ldq(row); r = ldq(row + 4); if (l == 0 && r == 0) return 0; a0 = W4 * sextw(l) + (1 << (ROW_SHIFT - 1)); if (((l & ~0xffffUL) | r) == 0) { a0 >>= ROW_SHIFT; t2 = (uint16_t) a0; t2 |= t2 << 16; t2 |= t2 << 32; stq(t2, row); stq(t2, row + 4); return 1; } a1 = a0; a2 = a0; a3 = a0; t = extwl(l, 4); /* row[2] */ if (t != 0) { t = sextw(t); a0 += W2 * t; a1 += W6 * t; a2 -= W6 * t; a3 -= W2 * t; } t = extwl(r, 0); /* row[4] */ if (t != 0) { t = sextw(t); a0 += W4 * t; a1 -= W4 * t; a2 -= W4 * t; a3 += W4 * t; } t = extwl(r, 4); /* row[6] */ if (t != 0) { t = sextw(t); a0 += W6 * t; a1 -= W2 * t; a2 += W2 * t; a3 -= W6 * t; } t = extwl(l, 2); /* row[1] */ if (t != 0) { t = sextw(t); b0 = W1 * t; b1 = W3 * t; b2 = W5 * t; b3 = W7 * t; } else { b0 = 0; b1 = 0; b2 = 0; b3 = 0; } t = extwl(l, 6); /* row[3] */ if (t) { t = sextw(t); b0 += W3 * t; b1 -= W7 * t; b2 -= W1 * t; b3 -= W5 * t; } t = extwl(r, 2); /* row[5] */ if (t) { t = sextw(t); b0 += W5 * t; b1 -= W1 * t; b2 += W7 * t; b3 += W3 * t; } t = extwl(r, 6); /* row[7] */ if (t) { t = sextw(t); b0 += W7 * t; b1 -= W5 * t; b2 += W3 * t; b3 -= W1 * t; } row[0] = (a0 + b0) >> ROW_SHIFT; row[1] = (a1 + b1) >> ROW_SHIFT; row[2] = (a2 + b2) >> ROW_SHIFT; row[3] = (a3 + b3) >> ROW_SHIFT; row[4] = (a3 - b3) >> ROW_SHIFT; row[5] = (a2 - b2) >> ROW_SHIFT; row[6] = (a1 - b1) >> ROW_SHIFT; row[7] = (a0 - b0) >> ROW_SHIFT; return 2; } static inline void idct_col(DCTELEM *col) { int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3; col[0] += (1 << (COL_SHIFT - 1)) / W4; a0 = W4 * col[8 * 0]; a1 = W4 * col[8 * 0]; a2 = W4 * col[8 * 0]; a3 = W4 * col[8 * 0]; if (col[8 * 2]) { a0 += W2 * col[8 * 2]; a1 += W6 * col[8 * 2]; a2 -= W6 * col[8 * 2]; a3 -= W2 * col[8 * 2]; } if (col[8 * 4]) { a0 += W4 * col[8 * 4]; a1 -= W4 * col[8 * 4]; a2 -= W4 * col[8 * 4]; a3 += W4 * col[8 * 4]; } if (col[8 * 6]) { a0 += W6 * col[8 * 6]; a1 -= W2 * col[8 * 6]; a2 += W2 * col[8 * 6]; a3 -= W6 * col[8 * 6]; } if (col[8 * 1]) { b0 = W1 * col[8 * 1]; b1 = W3 * col[8 * 1]; b2 = W5 * col[8 * 1]; b3 = W7 * col[8 * 1]; } else { b0 = 0; b1 = 0; b2 = 0; b3 = 0; } if (col[8 * 3]) { b0 += W3 * col[8 * 3]; b1 -= W7 * col[8 * 3]; b2 -= W1 * col[8 * 3]; b3 -= W5 * col[8 * 3]; } if (col[8 * 5]) { b0 += W5 * col[8 * 5]; b1 -= W1 * col[8 * 5]; b2 += W7 * col[8 * 5]; b3 += W3 * col[8 * 5]; } if (col[8 * 7]) { b0 += W7 * col[8 * 7]; b1 -= W5 * col[8 * 7]; b2 += W3 * col[8 * 7]; b3 -= W1 * col[8 * 7]; } col[8 * 0] = (a0 + b0) >> COL_SHIFT; col[8 * 7] = (a0 - b0) >> COL_SHIFT; col[8 * 1] = (a1 + b1) >> COL_SHIFT; col[8 * 6] = (a1 - b1) >> COL_SHIFT; col[8 * 2] = (a2 + b2) >> COL_SHIFT; col[8 * 5] = (a2 - b2) >> COL_SHIFT; col[8 * 3] = (a3 + b3) >> COL_SHIFT; col[8 * 4] = (a3 - b3) >> COL_SHIFT; } /* If all rows but the first one are zero after row transformation, all rows will be identical after column transformation. */ static inline void idct_col2(DCTELEM *col) { int i; uint64_t l, r; for (i = 0; i < 8; ++i) { int_fast32_t a0 = col[i] + (1 << (COL_SHIFT - 1)) / W4; a0 *= W4; col[i] = a0 >> COL_SHIFT; } l = ldq(col + 0 * 4); r = ldq(col + 1 * 4); stq(l, col + 2 * 4); stq(r, col + 3 * 4); stq(l, col + 4 * 4); stq(r, col + 5 * 4); stq(l, col + 6 * 4); stq(r, col + 7 * 4); stq(l, col + 8 * 4); stq(r, col + 9 * 4); stq(l, col + 10 * 4); stq(r, col + 11 * 4); stq(l, col + 12 * 4); stq(r, col + 13 * 4); stq(l, col + 14 * 4); stq(r, col + 15 * 4); } void ff_simple_idct_axp(DCTELEM *block) { int i; int rowsZero = 1; /* all rows except row 0 zero */ int rowsConstant = 1; /* all rows consist of a constant value */ for (i = 0; i < 8; i++) { int sparseness = idct_row(block + 8 * i); if (i > 0 && sparseness > 0) rowsZero = 0; if (sparseness == 2) rowsConstant = 0; } if (rowsZero) { idct_col2(block); } else if (rowsConstant) { idct_col(block); for (i = 0; i < 8; i += 2) { uint64_t v = (uint16_t) block[0]; uint64_t w = (uint16_t) block[8]; v |= v << 16; w |= w << 16; v |= v << 32; w |= w << 32; stq(v, block + 0 * 4); stq(v, block + 1 * 4); stq(w, block + 2 * 4); stq(w, block + 3 * 4); block += 4 * 4; } } else { for (i = 0; i < 8; i++) idct_col(block + i); } } void ff_simple_idct_put_axp(uint8_t *dest, int line_size, DCTELEM *block) { ff_simple_idct_axp(block); put_pixels_clamped_axp_p(block, dest, line_size); } void ff_simple_idct_add_axp(uint8_t *dest, int line_size, DCTELEM *block) { ff_simple_idct_axp(block); add_pixels_clamped_axp_p(block, dest, line_size); }
123linslouis-android-video-cutter
jni/libavcodec/alpha/simple_idct_alpha.c
C
asf20
7,632
/* * Alpha optimized DSP utils * Copyright (c) 2002 Falk Hueffner <falk@debian.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "asm.h" static void dct_unquantize_h263_axp(DCTELEM *block, int n_coeffs, uint64_t qscale, uint64_t qadd) { uint64_t qmul = qscale << 1; uint64_t correction = WORD_VEC(qmul * 255 >> 8); int i; qadd = WORD_VEC(qadd); for(i = 0; i <= n_coeffs; block += 4, i += 4) { uint64_t levels, negmask, zeros, add, sub; levels = ldq(block); if (levels == 0) continue; #ifdef __alpha_max__ /* I don't think the speed difference justifies runtime detection. */ negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */ negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */ #else negmask = cmpbge(WORD_VEC(0x7fff), levels); negmask &= (negmask >> 1) | (1 << 7); negmask = zap(-1, negmask); #endif zeros = cmpbge(0, levels); zeros &= zeros >> 1; /* zeros |= zeros << 1 is not needed since qadd <= 255, so zapping the lower byte suffices. */ levels *= qmul; levels -= correction & (negmask << 16); add = qadd & ~negmask; sub = qadd & negmask; /* Set qadd to 0 for levels == 0. */ add = zap(add, zeros); levels += add; levels -= sub; stq(levels, block); } } static void dct_unquantize_h263_intra_axp(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int n_coeffs; uint64_t qadd; DCTELEM block0 = block[0]; if (!s->h263_aic) { if (n < 4) block0 *= s->y_dc_scale; else block0 *= s->c_dc_scale; qadd = (qscale - 1) | 1; } else { qadd = 0; } if(s->ac_pred) n_coeffs = 63; else n_coeffs = s->inter_scantable.raster_end[s->block_last_index[n]]; dct_unquantize_h263_axp(block, n_coeffs, qscale, qadd); block[0] = block0; } static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int n_coeffs = s->inter_scantable.raster_end[s->block_last_index[n]]; dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1); } void MPV_common_init_axp(MpegEncContext *s) { s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp; }
123linslouis-android-video-cutter
jni/libavcodec/alpha/mpegvideo_alpha.c
C
asf20
3,323
/* * Sunplus JPEG tables * Copyright (c) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_SP5X_H #define AVCODEC_SP5X_H #include <stdint.h> static const uint8_t sp5x_data_sof[] = { 0xFF, 0xC0, /* SOF */ 0x00, 0x11, /* len */ 0x08, /* bits */ 0x00, 0xf0, /* height (default: 240) */ 0x01, 0x40, /* width (default: 240) */ 0x03, /* nb components */ 0x01, 0x22, 0x00, /* 21 vs 22 ? */ 0x02, 0x11, 0x01, 0x03, 0x11, 0x01 }; static const uint8_t sp5x_data_sos[] = { 0xFF, 0xDA, /* SOS */ 0x00, 0x0C, /* len */ 0x03, /* nb components */ 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, /* Ss */ 0x3F, /* Se */ 0x00 /* Ah/Al */ }; static const uint8_t sp5x_data_dqt[] = { 0xFF, 0xDB, /* DQT */ 0x00, 0x84, /* len */ 0x00, 0x05, 0x03, 0x04, 0x04, 0x04, 0x03, 0x05, 0x04, 0x04, 0x04, 0x06, 0x05, 0x05, 0x06, 0x08, 0x0D, 0x08, 0x08, 0x07, 0x07, 0x08, 0x10, 0x0C, 0x0C, 0x0A, 0x0D, 0x14, 0x11, 0x15, 0x14, 0x13, 0x11, 0x13, 0x13, 0x16, 0x18, 0x1F, 0x1A, 0x16, 0x17, 0x1E, 0x17, 0x13, 0x13, 0x1B, 0x25, 0x1C, 0x1E, 0x20, 0x21, 0x23, 0x23, 0x23, 0x15, 0x1A, 0x27, 0x29, 0x26, 0x22, 0x29, 0x1F, 0x22, 0x23, 0x22, 0x01, 0x05, 0x06, 0x06, 0x08, 0x07, 0x08, 0x10, 0x08, 0x08, 0x10, 0x22, 0x16, 0x13, 0x16, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22 }; static const uint8_t sp5x_data_dht[] = { 0xFF, 0xC4, /* DHT */ 0x01, 0xA2, /* len */ 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA }; static const uint8_t sp5x_quant_table[20][64]= { /* index 0, Q50 */ { 16, 11, 12, 14, 12, 10, 16, 14, 13, 14, 18, 17, 16, 19, 24, 40, 26, 24, 22, 22, 24, 49, 35, 37, 29, 40, 58, 51, 61, 60, 57, 51, 56, 55, 64, 72, 92, 78, 64, 68, 87, 69, 55, 56, 80,109, 81, 87, 95, 98,103,104,103, 62, 77,113,121,112,100,120, 92,101,103, 99 }, { 17, 18, 18, 24, 21, 24, 47, 26, 26, 47, 99, 66, 56, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 }, /* index 1, Q70 */ { 10, 7, 7, 8, 7, 6, 10, 8, 8, 8, 11, 10, 10, 11, 14, 24, 16, 14, 13, 13, 14, 29, 21, 22, 17, 24, 35, 31, 37, 36, 34, 31, 34, 33, 38, 43, 55, 47, 38, 41, 52, 41, 33, 34, 48, 65, 49, 52, 57, 59, 62, 62, 62, 37, 46, 68, 73, 67, 60, 72, 55, 61, 62, 59 }, { 10, 11, 11, 14, 13, 14, 28, 16, 16, 28, 59, 40, 34, 40, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59 }, /* index 2, Q80 */ { 6, 4, 5, 6, 5, 4, 6, 6, 5, 6, 7, 7, 6, 8, 10, 16, 10, 10, 9, 9, 10, 20, 14, 15, 12, 16, 23, 20, 24, 24, 23, 20, 22, 22, 26, 29, 37, 31, 26, 27, 35, 28, 22, 22, 32, 44, 32, 35, 38, 39, 41, 42, 41, 25, 31, 45, 48, 45, 40, 48, 37, 40, 41, 40 }, { 7, 7, 7, 10, 8, 10, 19, 10, 10, 19, 40, 26, 22, 26, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40 }, /* index 3, Q85 */ { 5, 3, 4, 4, 4, 3, 5, 4, 4, 4, 5, 5, 5, 6, 7, 12, 8, 7, 7, 7, 7, 15, 11, 11, 9, 12, 17, 15, 18, 18, 17, 15, 17, 17, 19, 22, 28, 23, 19, 20, 26, 21, 17, 17, 24, 33, 24, 26, 29, 29, 31, 31, 31, 19, 23, 34, 36, 34, 30, 36, 28, 30, 31, 30 }, { 5, 5, 5, 7, 6, 7, 14, 8, 8, 14, 30, 20, 17, 20, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 }, /* index 4, Q90 */ { 3, 2, 2, 3, 2, 2, 3, 3, 3, 3, 4, 3, 3, 4, 5, 8, 5, 5, 4, 4, 5, 10, 7, 7, 6, 8, 12, 10, 12, 12, 11, 10, 11, 11, 13, 14, 18, 16, 13, 14, 17, 14, 11, 11, 16, 22, 16, 17, 19, 20, 21, 21, 21, 12, 15, 23, 24, 22, 20, 24, 18, 20, 21, 20 }, { 3, 4, 4, 5, 4, 5, 9, 5, 5, 9, 20, 13, 11, 13, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 }, /* index 5, Q60 */ { 13, 9, 10, 11, 10, 8, 13, 11, 10, 11, 14, 14, 13, 15, 19, 32, 21, 19, 18, 18, 19, 39, 28, 30, 23, 32, 46, 41, 49, 48, 46, 41, 45, 44, 51, 58, 74, 62, 51, 54, 70, 55, 44, 45, 64, 87, 65, 70, 76, 78, 82, 83, 82, 50, 62, 90, 97, 90, 80, 96, 74, 81, 82, 79 }, { 14, 14, 14, 19, 17, 19, 38, 21, 21, 38, 79, 53, 45, 53, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79 }, /* index 6, Q25 */ { 32, 22, 24, 28, 24, 20, 32, 28, 26, 28, 36, 34, 32, 38, 48, 80, 52, 48, 44, 44, 48, 98, 70, 74, 58, 80,116,102,122,120,114,102, 112,110,128,144,184,156,128,136,174,138,110,112,160,218,162,174, 190,196,206,208,206,124,154,226,242,224,200,240,184,202,206,198 }, { 34, 36, 36, 48, 42, 48, 94, 52, 52, 94,198,132,112,132,198,198, 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198, 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198, 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198 }, /* index 7, Q95 */ { 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 4, 3, 2, 2, 2, 2, 5, 4, 4, 3, 4, 6, 5, 6, 6, 6, 5, 6, 6, 6, 7, 9, 8, 6, 7, 9, 7, 6, 6, 8, 11, 8, 9, 10, 10, 10, 10, 10, 6, 8, 11, 12, 11, 10, 12, 9, 10, 10, 10 }, { 2, 2, 2, 2, 2, 2, 5, 3, 3, 5, 10, 7, 6, 7, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }, /* index 8, Q93 */ { 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 3, 2, 2, 3, 3, 6, 4, 3, 3, 3, 3, 7, 5, 5, 4, 6, 8, 7, 9, 8, 8, 7, 8, 8, 9, 10, 13, 11, 9, 10, 12, 10, 8, 8, 11, 15, 11, 12, 13, 14, 14, 15, 14, 9, 11, 16, 17, 16, 14, 17, 13, 14, 14, 14 }, { 2, 3, 3, 3, 3, 3, 7, 4, 4, 7, 14, 9, 8, 9, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 }, /* index 9, Q40 */ { 20, 14, 15, 18, 15, 13, 20, 18, 16, 18, 23, 21, 20, 24, 30, 50, 33, 30, 28, 28, 30, 61, 44, 46, 36, 50, 73, 64, 76, 75, 71, 64, 70, 69, 80, 90,115, 98, 80, 85,109, 86, 69, 70,100,136,101,109, 119,123,129,130,129, 78, 96,141,151,140,125,150,115,126,129,124 }, { 21, 23, 23, 30, 26, 30, 59, 33, 33, 59,124, 83, 70, 83,124,124, 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124, 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124, 124,124,124,124,124,124,124,124,124,124,124,124,124,124,124,124 } }; #if 0 /* 4NF-M, not ZigZag */ static const uint8_t sp5x_quant_table_orig[18][64] = { /* index 0, Q50 */ { 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62, 18, 22, 37, 56, 68,109,103, 77, 24, 35, 55, 64, 81,104,113, 92, 49, 64, 78, 87,103,121,120,101, 72, 92, 95, 98,112,100,103, 99 }, { 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99, 47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 }, /* index 1, Q70 */ { 10, 7, 6, 10, 14, 24, 31, 37, 7, 7, 8, 11, 16, 35, 36, 33, 8, 8, 10, 14, 24, 34, 41, 34, 8, 10, 13, 17, 31, 52, 48, 37, 11, 13, 22, 34, 41, 65, 62, 46, 14, 21, 33, 38, 49, 62, 68, 55, 29, 38, 47, 52, 62, 73, 72, 61, 43, 55, 57, 59, 67, 60, 62, 59 }, { 10, 11, 14, 28, 59, 59, 59, 59, 11, 13, 16, 40, 59, 59, 59, 59, 14, 16, 34, 59, 59, 59, 59, 59, 28, 40, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59 }, /* index 2, Q80 */ { 6, 4, 4, 6, 10, 16, 20, 24, 5, 5, 6, 8, 10, 23, 24, 22, 6, 5, 6, 10, 16, 23, 28, 22, 6, 7, 9, 12, 20, 35, 32, 25, 7, 9, 15, 22, 27, 44, 41, 31, 10, 14, 22, 26, 32, 42, 45, 37, 20, 26, 31, 35, 41, 48, 48, 40, 29, 37, 38, 39, 45, 40, 41, 40 }, { 7, 7, 10, 19, 40, 40, 40, 40, 7, 8, 10, 26, 40, 40, 40, 40, 10, 10, 22, 40, 40, 40, 40, 40, 19, 26, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40 }, /* index 3, Q85 */ { 5, 3, 3, 5, 7, 12, 15, 18, 4, 4, 4, 6, 8, 17, 18, 17, 4, 4, 5, 7, 12, 17, 21, 17, 4, 5, 7, 9, 15, 26, 24, 19, 5, 7, 11, 17, 20, 33, 31, 23, 7, 11, 17, 19, 24, 31, 34, 28, 15, 19, 23, 26, 31, 36, 36, 30, 22, 28, 29, 29, 34, 30, 31, 30 }, { 5, 5, 7, 14, 30, 30, 30, 30, 5, 6, 8, 20, 30, 30, 30, 30, 7, 8, 17, 30, 30, 30, 30, 30, 14, 20, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 }, /* index 4, Q90 */ { 3, 2, 2, 3, 5, 8, 10, 12, 2, 2, 3, 4, 5, 12, 12, 11, 3, 3, 3, 5, 8, 11, 14, 11, 3, 3, 4, 6, 10, 17, 16, 12, 4, 4, 7, 11, 14, 22, 21, 15, 5, 7, 11, 13, 16, 21, 23, 18, 10, 13, 16, 17, 21, 24, 24, 20, 14, 18, 19, 20, 22, 20, 21, 20 }, { 3, 4, 5, 9, 20, 20, 20, 20, 4, 4, 5, 13, 20, 20, 20, 20, 5, 5, 11, 20, 20, 20, 20, 20, 9, 13, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 }, /* index 5, Q60 */ { 13, 9, 8, 13, 19, 32, 41, 49, 10, 10, 11, 15, 21, 46, 48, 44, 11, 10, 13, 19, 32, 46, 55, 45, 11, 14, 18, 23, 41, 70, 64, 50, 14, 18, 30, 45, 54, 87, 82, 62, 19, 28, 44, 51, 65, 83, 90, 74, 39, 51, 62, 70, 82, 97, 96, 81, 58, 74, 76, 78, 90, 80, 82, 79 }, { 14, 14, 19, 38, 79, 79, 79, 79, 14, 17, 21, 53, 79, 79, 79, 79, 19, 21, 45, 79, 79, 79, 79, 79, 38, 53, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79 }, /* index 6, Q25 */ { 32, 22, 20, 32, 48, 80,102,122, 24, 24, 28, 38, 52,116,120,110, 28, 26, 32, 48, 80,114,138,112, 28, 34, 44, 58,102,174,160,124, 36, 44, 74,112,136,218,206,154, 48, 70,110,128,162,208,226,184, 98,128,156,174,206,242,240,202,144,184,190,196,224,200,206,198 }, { 34, 36, 48, 94,198,198,198,198, 36, 42, 52,132,198,198,198,198, 48, 52,112,198,198,198,198,198, 94,132,198,198,198,198,198,198, 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198, 198,198,198,198,198,198,198,198,198,198,198,198,198,198,198,198 }, /* index 7, Q95 */ { 2, 1, 1, 2, 2, 4, 5, 6, 1, 1, 1, 2, 3, 6, 6, 6, 1, 1, 2, 2, 4, 6, 7, 6, 1, 2, 2, 3, 5, 9, 8, 6, 2, 2, 4, 6, 7, 11, 10, 8, 2, 4, 6, 6, 8, 10, 11, 9, 5, 6, 8, 9, 10, 12, 12, 10, 7, 9, 10, 10, 11, 10, 10, 10 }, { 2, 2, 2, 5, 10, 10, 10, 10, 2, 2, 3, 7, 10, 10, 10, 10, 2, 3, 6, 10, 10, 10, 10, 10, 5, 7, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }, /* index 8, Q93 */ { 2, 2, 1, 2, 3, 6, 7, 9, 2, 2, 2, 3, 4, 8, 8, 8, 2, 2, 2, 3, 6, 8, 10, 8, 2, 2, 3, 4, 7, 12, 11, 9, 3, 3, 5, 8, 10, 15, 14, 11, 3, 5, 8, 9, 11, 15, 16, 13, 7, 9, 11, 12, 14, 17, 17, 14, 10, 13, 13, 14, 16, 14, 14, 14 }, { 2, 3, 3, 7, 14, 14, 14, 14, 3, 3, 4, 9, 14, 14, 14, 14, 3, 4, 8, 14, 14, 14, 14, 14, 7, 9, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 } }; #endif #endif /* AVCODEC_SP5X_H */
123linslouis-android-video-cutter
jni/libavcodec/sp5x.h
C
asf20
16,805
/* * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * mpeg audio declarations for both encoder and decoder. */ #ifndef AVCODEC_MPEGAUDIO_H #define AVCODEC_MPEGAUDIO_H #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #define CONFIG_AUDIO_NONSHORT 0 /* max frame size, in samples */ #define MPA_FRAME_SIZE 1152 /* max compressed frame size */ #define MPA_MAX_CODED_FRAME_SIZE 1792 #define MPA_MAX_CHANNELS 2 #define SBLIMIT 32 /* number of subbands */ #define MPA_STEREO 0 #define MPA_JSTEREO 1 #define MPA_DUAL 2 #define MPA_MONO 3 /* header + layer + bitrate + freq + lsf/mpeg25 */ #define SAME_HEADER_MASK \ (0xffe00000 | (3 << 17) | (0xf << 12) | (3 << 10) | (3 << 19)) #define MP3_MASK 0xFFFE0CCF #if CONFIG_MPEGAUDIO_HP #define FRAC_BITS 23 /* fractional bits for sb_samples and dct */ #define WFRAC_BITS 16 /* fractional bits for window */ #else #define FRAC_BITS 15 /* fractional bits for sb_samples and dct */ #define WFRAC_BITS 14 /* fractional bits for window */ #endif #define FRAC_ONE (1 << FRAC_BITS) #define FIX(a) ((int)((a) * FRAC_ONE)) #if CONFIG_MPEGAUDIO_HP && CONFIG_AUDIO_NONSHORT typedef int32_t OUT_INT; #define OUT_MAX INT32_MAX #define OUT_MIN INT32_MIN #define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 31) #define OUT_FMT SAMPLE_FMT_S32 #else typedef int16_t OUT_INT; #define OUT_MAX INT16_MAX #define OUT_MIN INT16_MIN #define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15) #define OUT_FMT SAMPLE_FMT_S16 #endif #if FRAC_BITS <= 15 typedef int16_t MPA_INT; #else typedef int32_t MPA_INT; #endif #define BACKSTEP_SIZE 512 #define EXTRABYTES 24 /* layer 3 "granule" */ typedef struct GranuleDef { uint8_t scfsi; int part2_3_length; int big_values; int global_gain; int scalefac_compress; uint8_t block_type; uint8_t switch_point; int table_select[3]; int subblock_gain[3]; uint8_t scalefac_scale; uint8_t count1table_select; int region_size[3]; /* number of huffman codes in each region */ int preflag; int short_start, long_end; /* long/short band indexes */ uint8_t scale_factors[40]; int32_t sb_hybrid[SBLIMIT * 18]; /* 576 samples */ } GranuleDef; #define MPA_DECODE_HEADER \ int frame_size; \ int error_protection; \ int layer; \ int sample_rate; \ int sample_rate_index; /* between 0 and 8 */ \ int bit_rate; \ int nb_channels; \ int mode; \ int mode_ext; \ int lsf; typedef struct MPADecodeHeader { MPA_DECODE_HEADER } MPADecodeHeader; typedef struct MPADecodeContext { MPA_DECODE_HEADER uint8_t last_buf[2*BACKSTEP_SIZE + EXTRABYTES]; int last_buf_size; /* next header (used in free format parsing) */ uint32_t free_format_next_header; GetBitContext gb; GetBitContext in_gb; DECLARE_ALIGNED(16, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2]; int synth_buf_offset[MPA_MAX_CHANNELS]; DECLARE_ALIGNED(16, int32_t, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT]; int32_t mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */ GranuleDef granules[2][2]; /* Used in Layer 3 */ #ifdef DEBUG int frame_count; #endif void (*compute_antialias)(struct MPADecodeContext *s, struct GranuleDef *g); int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3 int dither_state; int error_recognition; AVCodecContext* avctx; } MPADecodeContext; /* layer 3 huffman tables */ typedef struct HuffTable { int xsize; const uint8_t *bits; const uint16_t *codes; } HuffTable; int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf); int ff_mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate, int *channels, int *frame_size, int *bitrate); extern MPA_INT ff_mpa_synth_window[]; void ff_mpa_synth_init(MPA_INT *window); void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset, MPA_INT *window, int *dither_state, OUT_INT *samples, int incr, int32_t sb_samples[SBLIMIT]); /* fast header check for resync */ static inline int ff_mpa_check_header(uint32_t header){ /* header */ if ((header & 0xffe00000) != 0xffe00000) return -1; /* layer check */ if ((header & (3<<17)) == 0) return -1; /* bit rate */ if ((header & (0xf<<12)) == 0xf<<12) return -1; /* frequency */ if ((header & (3<<10)) == 3<<10) return -1; return 0; } #endif /* AVCODEC_MPEGAUDIO_H */
123linslouis-android-video-cutter
jni/libavcodec/mpegaudio.h
C
asf20
5,302
/* * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 part10 DSP functions. * @author Michael Niedermayer <michaelni@gmx.at> */ #include <stdint.h> #include "avcodec.h" #include "h264dsp.h" #define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom ) #define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1)) #define H264_WEIGHT(W,H) \ static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \ int y; \ offset <<= log2_denom; \ if(log2_denom) offset += 1<<(log2_denom-1); \ for(y=0; y<H; y++, block += stride){ \ op_scale1(0); \ op_scale1(1); \ if(W==2) continue; \ op_scale1(2); \ op_scale1(3); \ if(W==4) continue; \ op_scale1(4); \ op_scale1(5); \ op_scale1(6); \ op_scale1(7); \ if(W==8) continue; \ op_scale1(8); \ op_scale1(9); \ op_scale1(10); \ op_scale1(11); \ op_scale1(12); \ op_scale1(13); \ op_scale1(14); \ op_scale1(15); \ } \ } \ static void biweight_h264_pixels ## W ## x ## H ## _c(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ int y; \ offset = ((offset + 1) | 1) << log2_denom; \ for(y=0; y<H; y++, dst += stride, src += stride){ \ op_scale2(0); \ op_scale2(1); \ if(W==2) continue; \ op_scale2(2); \ op_scale2(3); \ if(W==4) continue; \ op_scale2(4); \ op_scale2(5); \ op_scale2(6); \ op_scale2(7); \ if(W==8) continue; \ op_scale2(8); \ op_scale2(9); \ op_scale2(10); \ op_scale2(11); \ op_scale2(12); \ op_scale2(13); \ op_scale2(14); \ op_scale2(15); \ } \ } H264_WEIGHT(16,16) H264_WEIGHT(16,8) H264_WEIGHT(8,16) H264_WEIGHT(8,8) H264_WEIGHT(8,4) H264_WEIGHT(4,8) H264_WEIGHT(4,4) H264_WEIGHT(4,2) H264_WEIGHT(2,4) H264_WEIGHT(2,2) #undef op_scale1 #undef op_scale2 #undef H264_WEIGHT static av_always_inline av_flatten void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) { int i, d; for( i = 0; i < 4; i++ ) { if( tc0[i] < 0 ) { pix += 4*ystride; continue; } for( d = 0; d < 4; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int p2 = pix[-3*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; const int q2 = pix[2*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int tc = tc0[i]; int i_delta; if( FFABS( p2 - p0 ) < beta ) { if(tc0[i]) pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); tc++; } if( FFABS( q2 - q0 ) < beta ) { if(tc0[i]) pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); tc++; } i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ } pix += ystride; } } } static void h264_v_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { h264_loop_filter_luma_c(pix, stride, 1, alpha, beta, tc0); } static void h264_h_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { h264_loop_filter_luma_c(pix, 1, stride, alpha, beta, tc0); } static av_always_inline av_flatten void h264_loop_filter_luma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta) { int d; for( d = 0; d < 16; d++ ) { const int p2 = pix[-3*xstride]; const int p1 = pix[-2*xstride]; const int p0 = pix[-1*xstride]; const int q0 = pix[ 0*xstride]; const int q1 = pix[ 1*xstride]; const int q2 = pix[ 2*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ if( FFABS( p2 - p0 ) < beta) { const int p3 = pix[-4*xstride]; /* p0', p1', p2' */ pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else { /* p0' */ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; } if( FFABS( q2 - q0 ) < beta) { const int q3 = pix[3*xstride]; /* q0', q1', q2' */ pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else { /* q0' */ pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } }else{ /* p0', q0' */ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } } pix += ystride; } } static void h264_v_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) { h264_loop_filter_luma_intra_c(pix, stride, 1, alpha, beta); } static void h264_h_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) { h264_loop_filter_luma_intra_c(pix, 1, stride, alpha, beta); } static av_always_inline av_flatten void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) { int i, d; for( i = 0; i < 4; i++ ) { const int tc = tc0[i]; if( tc <= 0 ) { pix += 2*ystride; continue; } for( d = 0; d < 2; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - delta ); /* q0' */ } pix += ystride; } } } static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { h264_loop_filter_chroma_c(pix, stride, 1, alpha, beta, tc0); } static void h264_h_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { h264_loop_filter_chroma_c(pix, 1, stride, alpha, beta, tc0); } static av_always_inline av_flatten void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta) { int d; for( d = 0; d < 8; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */ } pix += ystride; } } static void h264_v_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta) { h264_loop_filter_chroma_intra_c(pix, stride, 1, alpha, beta); } static void h264_h_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta) { h264_loop_filter_chroma_intra_c(pix, 1, stride, alpha, beta); } void ff_h264dsp_init(H264DSPContext *c) { c->h264_idct_add= ff_h264_idct_add_c; c->h264_idct8_add= ff_h264_idct8_add_c; c->h264_idct_dc_add= ff_h264_idct_dc_add_c; c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c; c->h264_idct_add16 = ff_h264_idct_add16_c; c->h264_idct8_add4 = ff_h264_idct8_add4_c; c->h264_idct_add8 = ff_h264_idct_add8_c; c->h264_idct_add16intra= ff_h264_idct_add16intra_c; c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c; c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c; c->weight_h264_pixels_tab[2]= weight_h264_pixels8x16_c; c->weight_h264_pixels_tab[3]= weight_h264_pixels8x8_c; c->weight_h264_pixels_tab[4]= weight_h264_pixels8x4_c; c->weight_h264_pixels_tab[5]= weight_h264_pixels4x8_c; c->weight_h264_pixels_tab[6]= weight_h264_pixels4x4_c; c->weight_h264_pixels_tab[7]= weight_h264_pixels4x2_c; c->weight_h264_pixels_tab[8]= weight_h264_pixels2x4_c; c->weight_h264_pixels_tab[9]= weight_h264_pixels2x2_c; c->biweight_h264_pixels_tab[0]= biweight_h264_pixels16x16_c; c->biweight_h264_pixels_tab[1]= biweight_h264_pixels16x8_c; c->biweight_h264_pixels_tab[2]= biweight_h264_pixels8x16_c; c->biweight_h264_pixels_tab[3]= biweight_h264_pixels8x8_c; c->biweight_h264_pixels_tab[4]= biweight_h264_pixels8x4_c; c->biweight_h264_pixels_tab[5]= biweight_h264_pixels4x8_c; c->biweight_h264_pixels_tab[6]= biweight_h264_pixels4x4_c; c->biweight_h264_pixels_tab[7]= biweight_h264_pixels4x2_c; c->biweight_h264_pixels_tab[8]= biweight_h264_pixels2x4_c; c->biweight_h264_pixels_tab[9]= biweight_h264_pixels2x2_c; c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_c; c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_c; c->h264_v_loop_filter_luma_intra= h264_v_loop_filter_luma_intra_c; c->h264_h_loop_filter_luma_intra= h264_h_loop_filter_luma_intra_c; c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_c; c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_c; c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_c; c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_c; c->h264_loop_filter_strength= NULL; if (ARCH_ARM) ff_h264dsp_init_arm(c); if (HAVE_ALTIVEC) ff_h264dsp_init_ppc(c); if (HAVE_MMX) ff_h264dsp_init_x86(c); }
123linslouis-android-video-cutter
jni/libavcodec/h264dsp.c
C
asf20
11,688
/* * KMVC decoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Karl Morton's Video Codec decoder */ #include <stdio.h> #include <stdlib.h> #include "avcodec.h" #include "bytestream.h" #define KMVC_KEYFRAME 0x80 #define KMVC_PALETTE 0x40 #define KMVC_METHOD 0x0F /* * Decoder context */ typedef struct KmvcContext { AVCodecContext *avctx; AVFrame pic; int setpal; int palsize; uint32_t pal[256]; uint8_t *cur, *prev; uint8_t *frm0, *frm1; } KmvcContext; typedef struct BitBuf { int bits; int bitbuf; } BitBuf; #define BLK(data, x, y) data[(x) + (y) * 320] #define kmvc_init_getbits(bb, src) bb.bits = 7; bb.bitbuf = *src++; #define kmvc_getbit(bb, src, res) {\ res = 0; \ if (bb.bitbuf & (1 << bb.bits)) res = 1; \ bb.bits--; \ if(bb.bits == -1) { \ bb.bitbuf = *src++; \ bb.bits = 7; \ } \ } static void kmvc_decode_intra_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h) { BitBuf bb; int res, val; int i, j; int bx, by; int l0x, l1x, l0y, l1y; int mx, my; kmvc_init_getbits(bb, src); for (by = 0; by < h; by += 8) for (bx = 0; bx < w; bx += 8) { kmvc_getbit(bb, src, res); if (!res) { // fill whole 8x8 block val = *src++; for (i = 0; i < 64; i++) BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val; } else { // handle four 4x4 subblocks for (i = 0; i < 4; i++) { l0x = bx + (i & 1) * 4; l0y = by + (i & 2) * 2; kmvc_getbit(bb, src, res); if (!res) { kmvc_getbit(bb, src, res); if (!res) { // fill whole 4x4 block val = *src++; for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val; } else { // copy block from already decoded place val = *src++; mx = val & 0xF; my = val >> 4; for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = BLK(ctx->cur, l0x + (j & 3) - mx, l0y + (j >> 2) - my); } } else { // descend to 2x2 sub-sub-blocks for (j = 0; j < 4; j++) { l1x = l0x + (j & 1) * 2; l1y = l0y + (j & 2); kmvc_getbit(bb, src, res); if (!res) { kmvc_getbit(bb, src, res); if (!res) { // fill whole 2x2 block val = *src++; BLK(ctx->cur, l1x, l1y) = val; BLK(ctx->cur, l1x + 1, l1y) = val; BLK(ctx->cur, l1x, l1y + 1) = val; BLK(ctx->cur, l1x + 1, l1y + 1) = val; } else { // copy block from already decoded place val = *src++; mx = val & 0xF; my = val >> 4; BLK(ctx->cur, l1x, l1y) = BLK(ctx->cur, l1x - mx, l1y - my); BLK(ctx->cur, l1x + 1, l1y) = BLK(ctx->cur, l1x + 1 - mx, l1y - my); BLK(ctx->cur, l1x, l1y + 1) = BLK(ctx->cur, l1x - mx, l1y + 1 - my); BLK(ctx->cur, l1x + 1, l1y + 1) = BLK(ctx->cur, l1x + 1 - mx, l1y + 1 - my); } } else { // read values for block BLK(ctx->cur, l1x, l1y) = *src++; BLK(ctx->cur, l1x + 1, l1y) = *src++; BLK(ctx->cur, l1x, l1y + 1) = *src++; BLK(ctx->cur, l1x + 1, l1y + 1) = *src++; } } } } } } } static void kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h) { BitBuf bb; int res, val; int i, j; int bx, by; int l0x, l1x, l0y, l1y; int mx, my; kmvc_init_getbits(bb, src); for (by = 0; by < h; by += 8) for (bx = 0; bx < w; bx += 8) { kmvc_getbit(bb, src, res); if (!res) { kmvc_getbit(bb, src, res); if (!res) { // fill whole 8x8 block val = *src++; for (i = 0; i < 64; i++) BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val; } else { // copy block from previous frame for (i = 0; i < 64; i++) BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = BLK(ctx->prev, bx + (i & 0x7), by + (i >> 3)); } } else { // handle four 4x4 subblocks for (i = 0; i < 4; i++) { l0x = bx + (i & 1) * 4; l0y = by + (i & 2) * 2; kmvc_getbit(bb, src, res); if (!res) { kmvc_getbit(bb, src, res); if (!res) { // fill whole 4x4 block val = *src++; for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val; } else { // copy block val = *src++; mx = (val & 0xF) - 8; my = (val >> 4) - 8; for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = BLK(ctx->prev, l0x + (j & 3) + mx, l0y + (j >> 2) + my); } } else { // descend to 2x2 sub-sub-blocks for (j = 0; j < 4; j++) { l1x = l0x + (j & 1) * 2; l1y = l0y + (j & 2); kmvc_getbit(bb, src, res); if (!res) { kmvc_getbit(bb, src, res); if (!res) { // fill whole 2x2 block val = *src++; BLK(ctx->cur, l1x, l1y) = val; BLK(ctx->cur, l1x + 1, l1y) = val; BLK(ctx->cur, l1x, l1y + 1) = val; BLK(ctx->cur, l1x + 1, l1y + 1) = val; } else { // copy block val = *src++; mx = (val & 0xF) - 8; my = (val >> 4) - 8; BLK(ctx->cur, l1x, l1y) = BLK(ctx->prev, l1x + mx, l1y + my); BLK(ctx->cur, l1x + 1, l1y) = BLK(ctx->prev, l1x + 1 + mx, l1y + my); BLK(ctx->cur, l1x, l1y + 1) = BLK(ctx->prev, l1x + mx, l1y + 1 + my); BLK(ctx->cur, l1x + 1, l1y + 1) = BLK(ctx->prev, l1x + 1 + mx, l1y + 1 + my); } } else { // read values for block BLK(ctx->cur, l1x, l1y) = *src++; BLK(ctx->cur, l1x + 1, l1y) = *src++; BLK(ctx->cur, l1x, l1y + 1) = *src++; BLK(ctx->cur, l1x + 1, l1y + 1) = *src++; } } } } } } } static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; KmvcContext *const ctx = avctx->priv_data; uint8_t *out, *src; int i; int header; int blocksize; if (ctx->pic.data[0]) avctx->release_buffer(avctx, &ctx->pic); ctx->pic.reference = 1; ctx->pic.buffer_hints = FF_BUFFER_HINTS_VALID; if (avctx->get_buffer(avctx, &ctx->pic) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } header = *buf++; /* blocksize 127 is really palette change event */ if (buf[0] == 127) { buf += 3; for (i = 0; i < 127; i++) { ctx->pal[i + (header & 0x81)] = AV_RB24(buf); buf += 4; } buf -= 127 * 4 + 3; } if (header & KMVC_KEYFRAME) { ctx->pic.key_frame = 1; ctx->pic.pict_type = FF_I_TYPE; } else { ctx->pic.key_frame = 0; ctx->pic.pict_type = FF_P_TYPE; } /* if palette has been changed, copy it from palctrl */ if (ctx->avctx->palctrl && ctx->avctx->palctrl->palette_changed) { memcpy(ctx->pal, ctx->avctx->palctrl->palette, AVPALETTE_SIZE); ctx->setpal = 1; ctx->avctx->palctrl->palette_changed = 0; } if (header & KMVC_PALETTE) { ctx->pic.palette_has_changed = 1; // palette starts from index 1 and has 127 entries for (i = 1; i <= ctx->palsize; i++) { ctx->pal[i] = bytestream_get_be24(&buf); } } if (ctx->setpal) { ctx->setpal = 0; ctx->pic.palette_has_changed = 1; } /* make the palette available on the way out */ memcpy(ctx->pic.data[1], ctx->pal, 1024); blocksize = *buf++; if (blocksize != 8 && blocksize != 127) { av_log(avctx, AV_LOG_ERROR, "Block size = %i\n", blocksize); return -1; } memset(ctx->cur, 0, 320 * 200); switch (header & KMVC_METHOD) { case 0: case 1: // used in palette changed event memcpy(ctx->cur, ctx->prev, 320 * 200); break; case 3: kmvc_decode_intra_8x8(ctx, buf, avctx->width, avctx->height); break; case 4: kmvc_decode_inter_8x8(ctx, buf, avctx->width, avctx->height); break; default: av_log(avctx, AV_LOG_ERROR, "Unknown compression method %i\n", header & KMVC_METHOD); return -1; } out = ctx->pic.data[0]; src = ctx->cur; for (i = 0; i < avctx->height; i++) { memcpy(out, src, avctx->width); src += 320; out += ctx->pic.linesize[0]; } /* flip buffers */ if (ctx->cur == ctx->frm0) { ctx->cur = ctx->frm1; ctx->prev = ctx->frm0; } else { ctx->cur = ctx->frm0; ctx->prev = ctx->frm1; } *data_size = sizeof(AVFrame); *(AVFrame *) data = ctx->pic; /* always report that the buffer was completely consumed */ return buf_size; } /* * Init kmvc decoder */ static av_cold int decode_init(AVCodecContext * avctx) { KmvcContext *const c = avctx->priv_data; int i; c->avctx = avctx; if (avctx->width > 320 || avctx->height > 200) { av_log(avctx, AV_LOG_ERROR, "KMVC supports frames <= 320x200\n"); return -1; } c->frm0 = av_mallocz(320 * 200); c->frm1 = av_mallocz(320 * 200); c->cur = c->frm0; c->prev = c->frm1; for (i = 0; i < 256; i++) { c->pal[i] = i * 0x10101; } if (avctx->extradata_size < 12) { av_log(NULL, 0, "Extradata missing, decoding may not work properly...\n"); c->palsize = 127; } else { c->palsize = AV_RL16(avctx->extradata + 10); } if (avctx->extradata_size == 1036) { // palette in extradata uint8_t *src = avctx->extradata + 12; for (i = 0; i < 256; i++) { c->pal[i] = AV_RL32(src); src += 4; } c->setpal = 1; if (c->avctx->palctrl) { c->avctx->palctrl->palette_changed = 0; } } avctx->pix_fmt = PIX_FMT_PAL8; return 0; } /* * Uninit kmvc decoder */ static av_cold int decode_end(AVCodecContext * avctx) { KmvcContext *const c = avctx->priv_data; av_freep(&c->frm0); av_freep(&c->frm1); if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); return 0; } AVCodec kmvc_decoder = { "kmvc", AVMEDIA_TYPE_VIDEO, CODEC_ID_KMVC, sizeof(KmvcContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Karl Morton's video codec"), };
123linslouis-android-video-cutter
jni/libavcodec/kmvc.c
C
asf20
13,926
/* * Copyright (C) 2008 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "parser.h" static int parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { if(avctx->codec_id == CODEC_ID_THEORA) s->pict_type= (buf[0]&0x40) ? FF_P_TYPE : FF_I_TYPE; else s->pict_type= (buf[0]&0x80) ? FF_P_TYPE : FF_I_TYPE; *poutbuf = buf; *poutbuf_size = buf_size; return buf_size; } AVCodecParser vp3_parser = { { CODEC_ID_THEORA, CODEC_ID_VP3, CODEC_ID_VP6, CODEC_ID_VP6F, CODEC_ID_VP6A }, 0, NULL, parse, };
123linslouis-android-video-cutter
jni/libavcodec/vp3_parser.c
C
asf20
1,457
/* * DSP Group TrueSpeech compatible decoder * Copyright (c) 2005 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "truespeech_data.h" /** * @file * TrueSpeech decoder. */ /** * TrueSpeech decoder context */ typedef struct { /* input data */ int16_t vector[8]; //< input vector: 5/5/4/4/4/3/3/3 int offset1[2]; //< 8-bit value, used in one copying offset int offset2[4]; //< 7-bit value, encodes offsets for copying and for two-point filter int pulseoff[4]; //< 4-bit offset of pulse values block int pulsepos[4]; //< 27-bit variable, encodes 7 pulse positions int pulseval[4]; //< 7x2-bit pulse values int flag; //< 1-bit flag, shows how to choose filters /* temporary data */ int filtbuf[146]; // some big vector used for storing filters int prevfilt[8]; // filter from previous frame int16_t tmp1[8]; // coefficients for adding to out int16_t tmp2[8]; // coefficients for adding to out int16_t tmp3[8]; // coefficients for adding to out int16_t cvector[8]; // correlated input vector int filtval; // gain value for one function int16_t newvec[60]; // tmp vector int16_t filters[32]; // filters for every subframe } TSContext; static av_cold int truespeech_decode_init(AVCodecContext * avctx) { // TSContext *c = avctx->priv_data; avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } static void truespeech_read_frame(TSContext *dec, const uint8_t *input) { uint32_t t; /* first dword */ t = AV_RL32(input); input += 4; dec->flag = t & 1; dec->vector[0] = ts_codebook[0][(t >> 1) & 0x1F]; dec->vector[1] = ts_codebook[1][(t >> 6) & 0x1F]; dec->vector[2] = ts_codebook[2][(t >> 11) & 0xF]; dec->vector[3] = ts_codebook[3][(t >> 15) & 0xF]; dec->vector[4] = ts_codebook[4][(t >> 19) & 0xF]; dec->vector[5] = ts_codebook[5][(t >> 23) & 0x7]; dec->vector[6] = ts_codebook[6][(t >> 26) & 0x7]; dec->vector[7] = ts_codebook[7][(t >> 29) & 0x7]; /* second dword */ t = AV_RL32(input); input += 4; dec->offset2[0] = (t >> 0) & 0x7F; dec->offset2[1] = (t >> 7) & 0x7F; dec->offset2[2] = (t >> 14) & 0x7F; dec->offset2[3] = (t >> 21) & 0x7F; dec->offset1[0] = ((t >> 28) & 0xF) << 4; /* third dword */ t = AV_RL32(input); input += 4; dec->pulseval[0] = (t >> 0) & 0x3FFF; dec->pulseval[1] = (t >> 14) & 0x3FFF; dec->offset1[1] = (t >> 28) & 0x0F; /* fourth dword */ t = AV_RL32(input); input += 4; dec->pulseval[2] = (t >> 0) & 0x3FFF; dec->pulseval[3] = (t >> 14) & 0x3FFF; dec->offset1[1] |= ((t >> 28) & 0x0F) << 4; /* fifth dword */ t = AV_RL32(input); input += 4; dec->pulsepos[0] = (t >> 4) & 0x7FFFFFF; dec->pulseoff[0] = (t >> 0) & 0xF; dec->offset1[0] |= (t >> 31) & 1; /* sixth dword */ t = AV_RL32(input); input += 4; dec->pulsepos[1] = (t >> 4) & 0x7FFFFFF; dec->pulseoff[1] = (t >> 0) & 0xF; dec->offset1[0] |= ((t >> 31) & 1) << 1; /* seventh dword */ t = AV_RL32(input); input += 4; dec->pulsepos[2] = (t >> 4) & 0x7FFFFFF; dec->pulseoff[2] = (t >> 0) & 0xF; dec->offset1[0] |= ((t >> 31) & 1) << 2; /* eighth dword */ t = AV_RL32(input); input += 4; dec->pulsepos[3] = (t >> 4) & 0x7FFFFFF; dec->pulseoff[3] = (t >> 0) & 0xF; dec->offset1[0] |= ((t >> 31) & 1) << 3; } static void truespeech_correlate_filter(TSContext *dec) { int16_t tmp[8]; int i, j; for(i = 0; i < 8; i++){ if(i > 0){ memcpy(tmp, dec->cvector, i * 2); for(j = 0; j < i; j++) dec->cvector[j] = ((tmp[i - j - 1] * dec->vector[i]) + (dec->cvector[j] << 15) + 0x4000) >> 15; } dec->cvector[i] = (8 - dec->vector[i]) >> 3; } for(i = 0; i < 8; i++) dec->cvector[i] = (dec->cvector[i] * ts_230[i]) >> 15; dec->filtval = dec->vector[0]; } static void truespeech_filters_merge(TSContext *dec) { int i; if(!dec->flag){ for(i = 0; i < 8; i++){ dec->filters[i + 0] = dec->prevfilt[i]; dec->filters[i + 8] = dec->prevfilt[i]; } }else{ for(i = 0; i < 8; i++){ dec->filters[i + 0]=(dec->cvector[i] * 21846 + dec->prevfilt[i] * 10923 + 16384) >> 15; dec->filters[i + 8]=(dec->cvector[i] * 10923 + dec->prevfilt[i] * 21846 + 16384) >> 15; } } for(i = 0; i < 8; i++){ dec->filters[i + 16] = dec->cvector[i]; dec->filters[i + 24] = dec->cvector[i]; } } static void truespeech_apply_twopoint_filter(TSContext *dec, int quart) { int16_t tmp[146 + 60], *ptr0, *ptr1; const int16_t *filter; int i, t, off; t = dec->offset2[quart]; if(t == 127){ memset(dec->newvec, 0, 60 * 2); return; } for(i = 0; i < 146; i++) tmp[i] = dec->filtbuf[i]; off = (t / 25) + dec->offset1[quart >> 1] + 18; ptr0 = tmp + 145 - off; ptr1 = tmp + 146; filter = (const int16_t*)ts_240 + (t % 25) * 2; for(i = 0; i < 60; i++){ t = (ptr0[0] * filter[0] + ptr0[1] * filter[1] + 0x2000) >> 14; ptr0++; dec->newvec[i] = t; ptr1[i] = t; } } static void truespeech_place_pulses(TSContext *dec, int16_t *out, int quart) { int16_t tmp[7]; int i, j, t; const int16_t *ptr1; int16_t *ptr2; int coef; memset(out, 0, 60 * 2); for(i = 0; i < 7; i++) { t = dec->pulseval[quart] & 3; dec->pulseval[quart] >>= 2; tmp[6 - i] = ts_562[dec->pulseoff[quart] * 4 + t]; } coef = dec->pulsepos[quart] >> 15; ptr1 = (const int16_t*)ts_140 + 30; ptr2 = tmp; for(i = 0, j = 3; (i < 30) && (j > 0); i++){ t = *ptr1++; if(coef >= t) coef -= t; else{ out[i] = *ptr2++; ptr1 += 30; j--; } } coef = dec->pulsepos[quart] & 0x7FFF; ptr1 = (const int16_t*)ts_140; for(i = 30, j = 4; (i < 60) && (j > 0); i++){ t = *ptr1++; if(coef >= t) coef -= t; else{ out[i] = *ptr2++; ptr1 += 30; j--; } } } static void truespeech_update_filters(TSContext *dec, int16_t *out, int quart) { int i; for(i = 0; i < 86; i++) dec->filtbuf[i] = dec->filtbuf[i + 60]; for(i = 0; i < 60; i++){ dec->filtbuf[i + 86] = out[i] + dec->newvec[i] - (dec->newvec[i] >> 3); out[i] += dec->newvec[i]; } } static void truespeech_synth(TSContext *dec, int16_t *out, int quart) { int i,k; int t[8]; int16_t *ptr0, *ptr1; ptr0 = dec->tmp1; ptr1 = dec->filters + quart * 8; for(i = 0; i < 60; i++){ int sum = 0; for(k = 0; k < 8; k++) sum += ptr0[k] * ptr1[k]; sum = (sum + (out[i] << 12) + 0x800) >> 12; out[i] = av_clip(sum, -0x7FFE, 0x7FFE); for(k = 7; k > 0; k--) ptr0[k] = ptr0[k - 1]; ptr0[0] = out[i]; } for(i = 0; i < 8; i++) t[i] = (ts_5E2[i] * ptr1[i]) >> 15; ptr0 = dec->tmp2; for(i = 0; i < 60; i++){ int sum = 0; for(k = 0; k < 8; k++) sum += ptr0[k] * t[k]; for(k = 7; k > 0; k--) ptr0[k] = ptr0[k - 1]; ptr0[0] = out[i]; out[i] = ((out[i] << 12) - sum) >> 12; } for(i = 0; i < 8; i++) t[i] = (ts_5F2[i] * ptr1[i]) >> 15; ptr0 = dec->tmp3; for(i = 0; i < 60; i++){ int sum = out[i] << 12; for(k = 0; k < 8; k++) sum += ptr0[k] * t[k]; for(k = 7; k > 0; k--) ptr0[k] = ptr0[k - 1]; ptr0[0] = av_clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE); sum = ((ptr0[1] * (dec->filtval - (dec->filtval >> 2))) >> 4) + sum; sum = sum - (sum >> 3); out[i] = av_clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE); } } static void truespeech_save_prevvec(TSContext *c) { int i; for(i = 0; i < 8; i++) c->prevfilt[i] = c->cvector[i]; } static int truespeech_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TSContext *c = avctx->priv_data; int i, j; short *samples = data; int consumed = 0; int16_t out_buf[240]; int iterations; if (!buf_size) return 0; iterations = FFMIN(buf_size / 32, *data_size / 480); for(j = 0; j < iterations; j++) { truespeech_read_frame(c, buf + consumed); consumed += 32; truespeech_correlate_filter(c); truespeech_filters_merge(c); memset(out_buf, 0, 240 * 2); for(i = 0; i < 4; i++) { truespeech_apply_twopoint_filter(c, i); truespeech_place_pulses(c, out_buf + i * 60, i); truespeech_update_filters(c, out_buf + i * 60, i); truespeech_synth(c, out_buf + i * 60, i); } truespeech_save_prevvec(c); /* finally output decoded frame */ for(i = 0; i < 240; i++) *samples++ = out_buf[i]; } *data_size = consumed * 15; return consumed; } AVCodec truespeech_decoder = { "truespeech", AVMEDIA_TYPE_AUDIO, CODEC_ID_TRUESPEECH, sizeof(TSContext), truespeech_decode_init, NULL, NULL, truespeech_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"), };
123linslouis-android-video-cutter
jni/libavcodec/truespeech.c
C
asf20
10,402
/* * CCITT Fax Group 3 and 4 decompression * Copyright (c) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * CCITT Fax Group 3 and 4 decompression * @file * @author Konstantin Shishkov */ #ifndef AVCODEC_FAXCOMPR_H #define AVCODEC_FAXCOMPR_H #include "avcodec.h" #include "tiff.h" /** * initialize upacker code */ void ff_ccitt_unpack_init(void); /** * unpack data compressed with CCITT Group 3 1/2-D or Group 4 method */ int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts); #endif /* AVCODEC_FAXCOMPR_H */
123linslouis-android-video-cutter
jni/libavcodec/faxcompr.h
C
asf20
1,420
/* * FLI/FLC Animation Video Decoder * Copyright (C) 2003, 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Autodesk Animator FLI/FLC Video Decoder * by Mike Melanson (melanson@pcisys.net) * for more information on the .fli/.flc file format and all of its many * variations, visit: * http://www.compuphase.com/flic.htm * * This decoder outputs PAL8/RGB555/RGB565 and maybe one day RGB24 * colorspace data, depending on the FLC. To use this decoder, be * sure that your demuxer sends the FLI file header to the decoder via * the extradata chunk in AVCodecContext. The chunk should be 128 bytes * large. The only exception is for FLI files from the game "Magic Carpet", * in which the header is only 12 bytes. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" #define FLI_256_COLOR 4 #define FLI_DELTA 7 #define FLI_COLOR 11 #define FLI_LC 12 #define FLI_BLACK 13 #define FLI_BRUN 15 #define FLI_COPY 16 #define FLI_MINI 18 #define FLI_DTA_BRUN 25 #define FLI_DTA_COPY 26 #define FLI_DTA_LC 27 #define FLI_TYPE_CODE (0xAF11) #define FLC_FLX_TYPE_CODE (0xAF12) #define FLC_DTA_TYPE_CODE (0xAF44) /* Marks an "Extended FLC" comes from Dave's Targa Animator (DTA) */ #define FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE (0xAF13) #define CHECK_PIXEL_PTR(n) \ if (pixel_ptr + n > pixel_limit) { \ av_log (s->avctx, AV_LOG_INFO, "Problem: pixel_ptr >= pixel_limit (%d >= %d)\n", \ pixel_ptr + n, pixel_limit); \ return -1; \ } \ typedef struct FlicDecodeContext { AVCodecContext *avctx; AVFrame frame; unsigned int palette[256]; int new_palette; int fli_type; /* either 0xAF11 or 0xAF12, affects palette resolution */ } FlicDecodeContext; static av_cold int flic_decode_init(AVCodecContext *avctx) { FlicDecodeContext *s = avctx->priv_data; unsigned char *fli_header = (unsigned char *)avctx->extradata; int depth; s->avctx = avctx; s->fli_type = AV_RL16(&fli_header[4]); /* Might be overridden if a Magic Carpet FLC */ depth = 0; if (s->avctx->extradata_size == 12) { /* special case for magic carpet FLIs */ s->fli_type = FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE; depth = 8; } else if (s->avctx->extradata_size != 128) { av_log(avctx, AV_LOG_ERROR, "Expected extradata of 12 or 128 bytes\n"); return -1; } else { depth = AV_RL16(&fli_header[12]); } if (depth == 0) { depth = 8; /* Some FLC generators set depth to zero, when they mean 8Bpp. Fix up here */ } if ((s->fli_type == FLC_FLX_TYPE_CODE) && (depth == 16)) { depth = 15; /* Original Autodesk FLX's say the depth is 16Bpp when it is really 15Bpp */ } switch (depth) { case 8 : avctx->pix_fmt = PIX_FMT_PAL8; break; case 15 : avctx->pix_fmt = PIX_FMT_RGB555; break; case 16 : avctx->pix_fmt = PIX_FMT_RGB565; break; case 24 : avctx->pix_fmt = PIX_FMT_BGR24; /* Supposedly BGR, but havent any files to test with */ av_log(avctx, AV_LOG_ERROR, "24Bpp FLC/FLX is unsupported due to no test files.\n"); return -1; break; default : av_log(avctx, AV_LOG_ERROR, "Unknown FLC/FLX depth of %d Bpp is unsupported.\n",depth); return -1; } s->frame.data[0] = NULL; s->new_palette = 0; return 0; } static int flic_decode_frame_8BPP(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { FlicDecodeContext *s = avctx->priv_data; int stream_ptr = 0; int stream_ptr_after_color_chunk; int pixel_ptr; int palette_ptr; unsigned char palette_idx1; unsigned char palette_idx2; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int color_packets; int color_changes; int color_shift; unsigned char r, g, b; int lines; int compressed_lines; int starting_line; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; unsigned int pixel_limit; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; frame_size = AV_RL32(&buf[stream_ptr]); stream_ptr += 6; /* skip the magic number */ num_chunks = AV_RL16(&buf[stream_ptr]); stream_ptr += 10; /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = AV_RL32(&buf[stream_ptr]); stream_ptr += 4; chunk_type = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6; /* check special case: If this file is from the Magic Carpet * game and uses 6-bit colors even though it reports 256-color * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during * initialization) */ if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE)) color_shift = 0; else color_shift = 2; /* set up the palette */ color_packets = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; palette_ptr = 0; for (i = 0; i < color_packets; i++) { /* first byte is how many colors to skip */ palette_ptr += buf[stream_ptr++]; /* next byte indicates how many entries to change */ color_changes = buf[stream_ptr++]; /* if there are 0 color changes, there are actually 256 */ if (color_changes == 0) color_changes = 256; for (j = 0; j < color_changes; j++) { unsigned int entry; /* wrap around, for good measure */ if ((unsigned)palette_ptr >= 256) palette_ptr = 0; r = buf[stream_ptr++] << color_shift; g = buf[stream_ptr++] << color_shift; b = buf[stream_ptr++] << color_shift; entry = (r << 16) | (g << 8) | b; if (s->palette[palette_ptr] != entry) s->new_palette = 1; s->palette[palette_ptr++] = entry; } } /* color chunks sometimes have weird 16-bit alignment issues; * therefore, take the hardline approach and set the stream_ptr * to the value calculated w.r.t. the size specified by the color * chunk header */ stream_ptr = stream_ptr_after_color_chunk; break; case FLI_DELTA: y_ptr = 0; compressed_lines = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { line_packets = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; if ((line_packets & 0xC000) == 0xC000) { // line skip opcode line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else if ((line_packets & 0xC000) == 0x4000) { av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets); } else if ((line_packets & 0xC000) == 0x8000) { // "last byte" opcode pixel_ptr= y_ptr + s->frame.linesize[0] - 1; CHECK_PIXEL_PTR(0); pixels[pixel_ptr] = line_packets & 0xff; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; palette_idx2 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { pixels[pixel_ptr++] = palette_idx1; pixels[pixel_ptr++] = palette_idx2; } } else { CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run * 2; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: /* line compressed */ starting_line = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; y_ptr = 0; y_ptr += starting_line * s->frame.linesize[0]; compressed_lines = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; line_packets = buf[stream_ptr++]; if (line_packets > 0) { for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } else if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = palette_idx1; } } } } y_ptr += s->frame.linesize[0]; compressed_lines--; } break; case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: /* Byte run compression: This chunk type only occurs in the first * FLI frame and it will update the entire frame. */ y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > s->avctx->width * s->avctx->height) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "bigger than image, skipping chunk\n", chunk_size - 6); stream_ptr += chunk_size - 6; } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { memcpy(&pixels[y_ptr], &buf[stream_ptr], s->avctx->width); stream_ptr += s->avctx->width; } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ stream_ptr += chunk_size - 6; break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1)) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, stream_ptr); /* make the palette available on the way out */ memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); if (s->new_palette) { s->frame.palette_has_changed = 1; s->new_palette = 0; } *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; } static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { /* Note, the only difference between the 15Bpp and 16Bpp */ /* Format is the pixel format, the packets are processed the same. */ FlicDecodeContext *s = avctx->priv_data; int stream_ptr = 0; int pixel_ptr; unsigned char palette_idx1; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int lines; int compressed_lines; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel; unsigned int pixel_limit; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; frame_size = AV_RL32(&buf[stream_ptr]); stream_ptr += 6; /* skip the magic number */ num_chunks = AV_RL16(&buf[stream_ptr]); stream_ptr += 10; /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = AV_RL32(&buf[stream_ptr]); stream_ptr += 4; chunk_type = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* For some reason, it seems that non-palettized flics do * include one of these chunks in their first frame. * Why I do not know, it seems rather extraneous. */ /* av_log(avctx, AV_LOG_ERROR, "Unexpected Palette chunk %d in non-paletised FLC\n",chunk_type);*/ stream_ptr = stream_ptr + chunk_size - 6; break; case FLI_DELTA: case FLI_DTA_LC: y_ptr = 0; compressed_lines = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { line_packets = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */ pixel_countdown -= pixel_skip; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run < 0) { byte_run = -byte_run; pixel = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; } } else { CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; pixel_ptr += 2; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-paletised FLC\n"); stream_ptr = stream_ptr + chunk_size - 6; break; case FLI_BLACK: /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */ memset(pixels, 0x0000, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = (s->avctx->width * 2); while (pixel_countdown > 0) { byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed. * This does not give us any good oportunity to perform word endian conversion * during decompression. So if it is required (i.e., this is not a LE target, we do * a second pass over the line here, swapping the bytes. */ #if HAVE_BIGENDIAN pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]); pixel_ptr += 2; } #endif y_ptr += s->frame.linesize[0]; } break; case FLI_DTA_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */ while (pixel_countdown > 0) { byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { pixel = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } else { /* copy pixels if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: case FLI_DTA_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "bigger than image, skipping chunk\n", chunk_size - 6); stream_ptr += chunk_size - 6; } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; while (pixel_countdown > 0) { *((signed short*)(&pixels[y_ptr + pixel_ptr])) = AV_RL16(&buf[stream_ptr+pixel_ptr]); pixel_ptr += 2; pixel_countdown--; } stream_ptr += s->avctx->width*2; } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ stream_ptr += chunk_size - 6; break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1)) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, stream_ptr); *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; } static int flic_decode_frame_24BPP(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { av_log(avctx, AV_LOG_ERROR, "24Bpp FLC Unsupported due to lack of test files.\n"); return -1; } static int flic_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; if (avctx->pix_fmt == PIX_FMT_PAL8) { return flic_decode_frame_8BPP(avctx, data, data_size, buf, buf_size); } else if ((avctx->pix_fmt == PIX_FMT_RGB555) || (avctx->pix_fmt == PIX_FMT_RGB565)) { return flic_decode_frame_15_16BPP(avctx, data, data_size, buf, buf_size); } else if (avctx->pix_fmt == PIX_FMT_BGR24) { return flic_decode_frame_24BPP(avctx, data, data_size, buf, buf_size); } /* Should not get here, ever as the pix_fmt is processed */ /* in flic_decode_init and the above if should deal with */ /* the finite set of possibilites allowable by here. */ /* But in case we do, just error out. */ av_log(avctx, AV_LOG_ERROR, "Unknown FLC format, my science cannot explain how this happened.\n"); return -1; } static av_cold int flic_decode_end(AVCodecContext *avctx) { FlicDecodeContext *s = avctx->priv_data; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec flic_decoder = { "flic", AVMEDIA_TYPE_VIDEO, CODEC_ID_FLIC, sizeof(FlicDecodeContext), flic_decode_init, NULL, flic_decode_end, flic_decode_frame, CODEC_CAP_DR1, NULL, NULL, NULL, NULL, .long_name = NULL_IF_CONFIG_SMALL("Autodesk Animator Flic video"), };
123linslouis-android-video-cutter
jni/libavcodec/flicvideo.c
C
asf20
28,997
/* * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 part10 prediction functions. * @author Michael Niedermayer <michaelni@gmx.at> */ #include "avcodec.h" #include "mpegvideo.h" #include "h264pred.h" static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){ const uint32_t a= ((uint32_t*)(src-stride))[0]; ((uint32_t*)(src+0*stride))[0]= a; ((uint32_t*)(src+1*stride))[0]= a; ((uint32_t*)(src+2*stride))[0]= a; ((uint32_t*)(src+3*stride))[0]= a; } static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){ ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101; ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101; ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101; ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101; } static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){ const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3; ((uint32_t*)(src+0*stride))[0]= ((uint32_t*)(src+1*stride))[0]= ((uint32_t*)(src+2*stride))[0]= ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101; } static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){ const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2; ((uint32_t*)(src+0*stride))[0]= ((uint32_t*)(src+1*stride))[0]= ((uint32_t*)(src+2*stride))[0]= ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101; } static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){ const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2; ((uint32_t*)(src+0*stride))[0]= ((uint32_t*)(src+1*stride))[0]= ((uint32_t*)(src+2*stride))[0]= ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101; } static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){ ((uint32_t*)(src+0*stride))[0]= ((uint32_t*)(src+1*stride))[0]= ((uint32_t*)(src+2*stride))[0]= ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U; } #define LOAD_TOP_RIGHT_EDGE\ const int av_unused t4= topright[0];\ const int av_unused t5= topright[1];\ const int av_unused t6= topright[2];\ const int av_unused t7= topright[3];\ #define LOAD_DOWN_LEFT_EDGE\ const int av_unused l4= src[-1+4*stride];\ const int av_unused l5= src[-1+5*stride];\ const int av_unused l6= src[-1+6*stride];\ const int av_unused l7= src[-1+7*stride];\ #define LOAD_LEFT_EDGE\ const int av_unused l0= src[-1+0*stride];\ const int av_unused l1= src[-1+1*stride];\ const int av_unused l2= src[-1+2*stride];\ const int av_unused l3= src[-1+3*stride];\ #define LOAD_TOP_EDGE\ const int av_unused t0= src[ 0-1*stride];\ const int av_unused t1= src[ 1-1*stride];\ const int av_unused t2= src[ 2-1*stride];\ const int av_unused t3= src[ 3-1*stride];\ static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){ const int lt= src[-1-1*stride]; LOAD_TOP_EDGE LOAD_LEFT_EDGE src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2; src[0+2*stride]= src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2; src[0+1*stride]= src[1+2*stride]= src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2; src[0+0*stride]= src[1+1*stride]= src[2+2*stride]= src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2; src[1+0*stride]= src[2+1*stride]= src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2; src[2+0*stride]= src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2; src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2; } static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE // LOAD_LEFT_EDGE src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2; src[1+0*stride]= src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2; src[2+0*stride]= src[1+1*stride]= src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2; src[3+0*stride]= src[2+1*stride]= src[1+2*stride]= src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2; src[3+1*stride]= src[2+2*stride]= src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2; src[3+2*stride]= src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2; src[3+3*stride]=(t6 + 3*t7 + 2)>>2; } static void pred4x4_down_left_svq3_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_TOP_EDGE LOAD_LEFT_EDGE const av_unused int unu0= t0; const av_unused int unu1= l0; src[0+0*stride]=(l1 + t1)>>1; src[1+0*stride]= src[0+1*stride]=(l2 + t2)>>1; src[2+0*stride]= src[1+1*stride]= src[0+2*stride]= src[3+0*stride]= src[2+1*stride]= src[1+2*stride]= src[0+3*stride]= src[3+1*stride]= src[2+2*stride]= src[1+3*stride]= src[3+2*stride]= src[2+3*stride]= src[3+3*stride]=(l3 + t3)>>1; } static void pred4x4_down_left_rv40_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE LOAD_LEFT_EDGE LOAD_DOWN_LEFT_EDGE src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3; src[1+0*stride]= src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3; src[2+0*stride]= src[1+1*stride]= src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3; src[3+0*stride]= src[2+1*stride]= src[1+2*stride]= src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3; src[3+1*stride]= src[2+2*stride]= src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3; src[3+2*stride]= src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3; src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2; } static void pred4x4_down_left_rv40_nodown_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE LOAD_LEFT_EDGE src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3; src[1+0*stride]= src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3; src[2+0*stride]= src[1+1*stride]= src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3; src[3+0*stride]= src[2+1*stride]= src[1+2*stride]= src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3; src[3+1*stride]= src[2+2*stride]= src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3; src[3+2*stride]= src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3; src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2; } static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){ const int lt= src[-1-1*stride]; LOAD_TOP_EDGE LOAD_LEFT_EDGE src[0+0*stride]= src[1+2*stride]=(lt + t0 + 1)>>1; src[1+0*stride]= src[2+2*stride]=(t0 + t1 + 1)>>1; src[2+0*stride]= src[3+2*stride]=(t1 + t2 + 1)>>1; src[3+0*stride]=(t2 + t3 + 1)>>1; src[0+1*stride]= src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2; src[1+1*stride]= src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2; src[2+1*stride]= src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2; src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2; src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2; src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2; } static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE src[0+0*stride]=(t0 + t1 + 1)>>1; src[1+0*stride]= src[0+2*stride]=(t1 + t2 + 1)>>1; src[2+0*stride]= src[1+2*stride]=(t2 + t3 + 1)>>1; src[3+0*stride]= src[2+2*stride]=(t3 + t4+ 1)>>1; src[3+2*stride]=(t4 + t5+ 1)>>1; src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2; src[1+1*stride]= src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2; src[2+1*stride]= src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2; src[3+1*stride]= src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2; src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2; } static void pred4x4_vertical_left_rv40(uint8_t *src, uint8_t *topright, int stride, const int l0, const int l1, const int l2, const int l3, const int l4){ LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3; src[1+0*stride]= src[0+2*stride]=(t1 + t2 + 1)>>1; src[2+0*stride]= src[1+2*stride]=(t2 + t3 + 1)>>1; src[3+0*stride]= src[2+2*stride]=(t3 + t4+ 1)>>1; src[3+2*stride]=(t4 + t5+ 1)>>1; src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3; src[1+1*stride]= src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2; src[2+1*stride]= src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2; src[3+1*stride]= src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2; src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2; } static void pred4x4_vertical_left_rv40_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_LEFT_EDGE LOAD_DOWN_LEFT_EDGE pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4); } static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_LEFT_EDGE pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3); } static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_LEFT_EDGE src[0+0*stride]=(l0 + l1 + 1)>>1; src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2; src[2+0*stride]= src[0+1*stride]=(l1 + l2 + 1)>>1; src[3+0*stride]= src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2; src[2+1*stride]= src[0+2*stride]=(l2 + l3 + 1)>>1; src[3+1*stride]= src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2; src[3+2*stride]= src[1+3*stride]= src[0+3*stride]= src[2+2*stride]= src[2+3*stride]= src[3+3*stride]=l3; } static void pred4x4_horizontal_up_rv40_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_LEFT_EDGE LOAD_DOWN_LEFT_EDGE LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3; src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3; src[2+0*stride]= src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3; src[3+0*stride]= src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3; src[2+1*stride]= src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3; src[3+1*stride]= src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3; src[3+2*stride]= src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2; src[0+3*stride]= src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2; src[2+3*stride]=(l4 + l5 + 1)>>1; src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2; } static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src, uint8_t *topright, int stride){ LOAD_LEFT_EDGE LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3; src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3; src[2+0*stride]= src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3; src[3+0*stride]= src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3; src[2+1*stride]= src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3; src[3+1*stride]= src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3; src[3+2*stride]= src[1+3*stride]=l3; src[0+3*stride]= src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2; src[2+3*stride]= src[3+3*stride]=l3; } static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){ const int lt= src[-1-1*stride]; LOAD_TOP_EDGE LOAD_LEFT_EDGE src[0+0*stride]= src[2+1*stride]=(lt + l0 + 1)>>1; src[1+0*stride]= src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2; src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2; src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2; src[0+1*stride]= src[2+2*stride]=(l0 + l1 + 1)>>1; src[1+1*stride]= src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2; src[0+2*stride]= src[2+3*stride]=(l1 + l2+ 1)>>1; src[1+2*stride]= src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2; src[0+3*stride]=(l2 + l3 + 1)>>1; src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2; } static void pred16x16_vertical_c(uint8_t *src, int stride){ int i; const uint32_t a= ((uint32_t*)(src-stride))[0]; const uint32_t b= ((uint32_t*)(src-stride))[1]; const uint32_t c= ((uint32_t*)(src-stride))[2]; const uint32_t d= ((uint32_t*)(src-stride))[3]; for(i=0; i<16; i++){ ((uint32_t*)(src+i*stride))[0]= a; ((uint32_t*)(src+i*stride))[1]= b; ((uint32_t*)(src+i*stride))[2]= c; ((uint32_t*)(src+i*stride))[3]= d; } } static void pred16x16_horizontal_c(uint8_t *src, int stride){ int i; for(i=0; i<16; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= ((uint32_t*)(src+i*stride))[2]= ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101; } } static void pred16x16_dc_c(uint8_t *src, int stride){ int i, dc=0; for(i=0;i<16; i++){ dc+= src[-1+i*stride]; } for(i=0;i<16; i++){ dc+= src[i-stride]; } dc= 0x01010101*((dc + 16)>>5); for(i=0; i<16; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= ((uint32_t*)(src+i*stride))[2]= ((uint32_t*)(src+i*stride))[3]= dc; } } static void pred16x16_left_dc_c(uint8_t *src, int stride){ int i, dc=0; for(i=0;i<16; i++){ dc+= src[-1+i*stride]; } dc= 0x01010101*((dc + 8)>>4); for(i=0; i<16; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= ((uint32_t*)(src+i*stride))[2]= ((uint32_t*)(src+i*stride))[3]= dc; } } static void pred16x16_top_dc_c(uint8_t *src, int stride){ int i, dc=0; for(i=0;i<16; i++){ dc+= src[i-stride]; } dc= 0x01010101*((dc + 8)>>4); for(i=0; i<16; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= ((uint32_t*)(src+i*stride))[2]= ((uint32_t*)(src+i*stride))[3]= dc; } } static void pred16x16_128_dc_c(uint8_t *src, int stride){ int i; for(i=0; i<16; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= ((uint32_t*)(src+i*stride))[2]= ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U; } } static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3, const int rv40){ int i, j, k; int a; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; const uint8_t * const src0 = src+7-stride; const uint8_t *src1 = src+8*stride-1; const uint8_t *src2 = src1-2*stride; // == src+6*stride-1; int H = src0[1] - src0[-1]; int V = src1[0] - src2[ 0]; for(k=2; k<=8; ++k) { src1 += stride; src2 -= stride; H += k*(src0[k] - src0[-k]); V += k*(src1[0] - src2[ 0]); } if(svq3){ H = ( 5*(H/4) ) / 16; V = ( 5*(V/4) ) / 16; /* required for 100% accuracy */ i = H; H = V; V = i; }else if(rv40){ H = ( H + (H>>2) ) >> 4; V = ( V + (V>>2) ) >> 4; }else{ H = ( 5*H+32 ) >> 6; V = ( 5*V+32 ) >> 6; } a = 16*(src1[0] + src2[16] + 1) - 7*(V+H); for(j=16; j>0; --j) { int b = a; a += V; for(i=-16; i<0; i+=4) { src[16+i] = cm[ (b ) >> 5 ]; src[17+i] = cm[ (b+ H) >> 5 ]; src[18+i] = cm[ (b+2*H) >> 5 ]; src[19+i] = cm[ (b+3*H) >> 5 ]; b += 4*H; } src += stride; } } static void pred16x16_plane_c(uint8_t *src, int stride){ pred16x16_plane_compat_c(src, stride, 0, 0); } static void pred16x16_plane_svq3_c(uint8_t *src, int stride){ pred16x16_plane_compat_c(src, stride, 1, 0); } static void pred16x16_plane_rv40_c(uint8_t *src, int stride){ pred16x16_plane_compat_c(src, stride, 0, 1); } static void pred8x8_vertical_c(uint8_t *src, int stride){ int i; const uint32_t a= ((uint32_t*)(src-stride))[0]; const uint32_t b= ((uint32_t*)(src-stride))[1]; for(i=0; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= a; ((uint32_t*)(src+i*stride))[1]= b; } } static void pred8x8_horizontal_c(uint8_t *src, int stride){ int i; for(i=0; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101; } } static void pred8x8_128_dc_c(uint8_t *src, int stride){ int i; for(i=0; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U; } } static void pred8x8_left_dc_c(uint8_t *src, int stride){ int i; int dc0, dc2; dc0=dc2=0; for(i=0;i<4; i++){ dc0+= src[-1+i*stride]; dc2+= src[-1+(i+4)*stride]; } dc0= 0x01010101*((dc0 + 2)>>2); dc2= 0x01010101*((dc2 + 2)>>2); for(i=0; i<4; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= dc0; } for(i=4; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= dc2; } } static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){ int i; int dc0; dc0=0; for(i=0;i<8; i++) dc0+= src[-1+i*stride]; dc0= 0x01010101*((dc0 + 4)>>3); for(i=0; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= dc0; } } static void pred8x8_top_dc_c(uint8_t *src, int stride){ int i; int dc0, dc1; dc0=dc1=0; for(i=0;i<4; i++){ dc0+= src[i-stride]; dc1+= src[4+i-stride]; } dc0= 0x01010101*((dc0 + 2)>>2); dc1= 0x01010101*((dc1 + 2)>>2); for(i=0; i<4; i++){ ((uint32_t*)(src+i*stride))[0]= dc0; ((uint32_t*)(src+i*stride))[1]= dc1; } for(i=4; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= dc0; ((uint32_t*)(src+i*stride))[1]= dc1; } } static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){ int i; int dc0; dc0=0; for(i=0;i<8; i++) dc0+= src[i-stride]; dc0= 0x01010101*((dc0 + 4)>>3); for(i=0; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= dc0; } } static void pred8x8_dc_c(uint8_t *src, int stride){ int i; int dc0, dc1, dc2, dc3; dc0=dc1=dc2=0; for(i=0;i<4; i++){ dc0+= src[-1+i*stride] + src[i-stride]; dc1+= src[4+i-stride]; dc2+= src[-1+(i+4)*stride]; } dc3= 0x01010101*((dc1 + dc2 + 4)>>3); dc0= 0x01010101*((dc0 + 4)>>3); dc1= 0x01010101*((dc1 + 2)>>2); dc2= 0x01010101*((dc2 + 2)>>2); for(i=0; i<4; i++){ ((uint32_t*)(src+i*stride))[0]= dc0; ((uint32_t*)(src+i*stride))[1]= dc1; } for(i=4; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= dc2; ((uint32_t*)(src+i*stride))[1]= dc3; } } //the following 4 function should not be optimized! static void pred8x8_mad_cow_dc_l0t(uint8_t *src, int stride){ pred8x8_top_dc_c(src, stride); pred4x4_dc_c(src, NULL, stride); } static void pred8x8_mad_cow_dc_0lt(uint8_t *src, int stride){ pred8x8_dc_c(src, stride); pred4x4_top_dc_c(src, NULL, stride); } static void pred8x8_mad_cow_dc_l00(uint8_t *src, int stride){ pred8x8_left_dc_c(src, stride); pred4x4_128_dc_c(src + 4*stride , NULL, stride); pred4x4_128_dc_c(src + 4*stride + 4, NULL, stride); } static void pred8x8_mad_cow_dc_0l0(uint8_t *src, int stride){ pred8x8_left_dc_c(src, stride); pred4x4_128_dc_c(src , NULL, stride); pred4x4_128_dc_c(src + 4, NULL, stride); } static void pred8x8_dc_rv40_c(uint8_t *src, int stride){ int i; int dc0=0; for(i=0;i<4; i++){ dc0+= src[-1+i*stride] + src[i-stride]; dc0+= src[4+i-stride]; dc0+= src[-1+(i+4)*stride]; } dc0= 0x01010101*((dc0 + 8)>>4); for(i=0; i<4; i++){ ((uint32_t*)(src+i*stride))[0]= dc0; ((uint32_t*)(src+i*stride))[1]= dc0; } for(i=4; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= dc0; ((uint32_t*)(src+i*stride))[1]= dc0; } } static void pred8x8_plane_c(uint8_t *src, int stride){ int j, k; int a; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; const uint8_t * const src0 = src+3-stride; const uint8_t *src1 = src+4*stride-1; const uint8_t *src2 = src1-2*stride; // == src+2*stride-1; int H = src0[1] - src0[-1]; int V = src1[0] - src2[ 0]; for(k=2; k<=4; ++k) { src1 += stride; src2 -= stride; H += k*(src0[k] - src0[-k]); V += k*(src1[0] - src2[ 0]); } H = ( 17*H+16 ) >> 5; V = ( 17*V+16 ) >> 5; a = 16*(src1[0] + src2[8]+1) - 3*(V+H); for(j=8; j>0; --j) { int b = a; a += V; src[0] = cm[ (b ) >> 5 ]; src[1] = cm[ (b+ H) >> 5 ]; src[2] = cm[ (b+2*H) >> 5 ]; src[3] = cm[ (b+3*H) >> 5 ]; src[4] = cm[ (b+4*H) >> 5 ]; src[5] = cm[ (b+5*H) >> 5 ]; src[6] = cm[ (b+6*H) >> 5 ]; src[7] = cm[ (b+7*H) >> 5 ]; src += stride; } } #define SRC(x,y) src[(x)+(y)*stride] #define PL(y) \ const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2; #define PREDICT_8x8_LOAD_LEFT \ const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \ + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \ PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \ const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2 #define PT(x) \ const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2; #define PREDICT_8x8_LOAD_TOP \ const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \ + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \ PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \ const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \ + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2 #define PTR(x) \ t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2; #define PREDICT_8x8_LOAD_TOPRIGHT \ int t8, t9, t10, t11, t12, t13, t14, t15; \ if(has_topright) { \ PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \ t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \ } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1); #define PREDICT_8x8_LOAD_TOPLEFT \ const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2 #define PREDICT_8x8_DC(v) \ int y; \ for( y = 0; y < 8; y++ ) { \ ((uint32_t*)src)[0] = \ ((uint32_t*)src)[1] = v; \ src += stride; \ } static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_DC(0x80808080); } static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_LEFT; const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101; PREDICT_8x8_DC(dc); } static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_TOP; const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101; PREDICT_8x8_DC(dc); } static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_LEFT; PREDICT_8x8_LOAD_TOP; const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7 +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101; PREDICT_8x8_DC(dc); } static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_LEFT; #define ROW(y) ((uint32_t*)(src+y*stride))[0] =\ ((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7); #undef ROW } static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride) { int y; PREDICT_8x8_LOAD_TOP; src[0] = t0; src[1] = t1; src[2] = t2; src[3] = t3; src[4] = t4; src[5] = t5; src[6] = t6; src[7] = t7; for( y = 1; y < 8; y++ ) *(uint64_t*)(src+y*stride) = *(uint64_t*)src; } static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_TOP; PREDICT_8x8_LOAD_TOPRIGHT; SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2; SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2; SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2; SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2; SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2; SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2; SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2; SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2; SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2; SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2; SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2; SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2; SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2; SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2; SRC(7,7)= (t14 + 3*t15 + 2) >> 2; } static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_TOP; PREDICT_8x8_LOAD_LEFT; PREDICT_8x8_LOAD_TOPLEFT; SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2; SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2; SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2; SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2; SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2; SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2; SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2; SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2; SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2; SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2; SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2; SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2; SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2; SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2; SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2; } static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_TOP; PREDICT_8x8_LOAD_LEFT; PREDICT_8x8_LOAD_TOPLEFT; SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2; SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2; SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2; SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2; SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2; SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2; SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2; SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1; SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2; SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1; SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2; SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1; SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2; SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1; SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2; SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1; SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2; SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1; SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2; SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1; SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2; SRC(7,0)= (t6 + t7 + 1) >> 1; } static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_TOP; PREDICT_8x8_LOAD_LEFT; PREDICT_8x8_LOAD_TOPLEFT; SRC(0,7)= (l6 + l7 + 1) >> 1; SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2; SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1; SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2; SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1; SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2; SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1; SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2; SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1; SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2; SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1; SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2; SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1; SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2; SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1; SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2; SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2; SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2; SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2; SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2; SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2; SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2; } static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_TOP; PREDICT_8x8_LOAD_TOPRIGHT; SRC(0,0)= (t0 + t1 + 1) >> 1; SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2; SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1; SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2; SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1; SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2; SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1; SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2; SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1; SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2; SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1; SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2; SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1; SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2; SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1; SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2; SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1; SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2; SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1; SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2; SRC(7,6)= (t10 + t11 + 1) >> 1; SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2; } static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride) { PREDICT_8x8_LOAD_LEFT; SRC(0,0)= (l0 + l1 + 1) >> 1; SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2; SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1; SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2; SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1; SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2; SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1; SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2; SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1; SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2; SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1; SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2; SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1; SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2; SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)= SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)= SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)= SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7; } #undef PREDICT_8x8_LOAD_LEFT #undef PREDICT_8x8_LOAD_TOP #undef PREDICT_8x8_LOAD_TOPLEFT #undef PREDICT_8x8_LOAD_TOPRIGHT #undef PREDICT_8x8_DC #undef PTR #undef PT #undef PL #undef SRC static void pred4x4_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){ int i; pix -= stride; for(i=0; i<4; i++){ uint8_t v = pix[0]; pix[1*stride]= v += block[0]; pix[2*stride]= v += block[4]; pix[3*stride]= v += block[8]; pix[4*stride]= v + block[12]; pix++; block++; } } static void pred4x4_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){ int i; for(i=0; i<4; i++){ uint8_t v = pix[-1]; pix[0]= v += block[0]; pix[1]= v += block[1]; pix[2]= v += block[2]; pix[3]= v + block[3]; pix+= stride; block+= 4; } } static void pred8x8l_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){ int i; pix -= stride; for(i=0; i<8; i++){ uint8_t v = pix[0]; pix[1*stride]= v += block[0]; pix[2*stride]= v += block[8]; pix[3*stride]= v += block[16]; pix[4*stride]= v += block[24]; pix[5*stride]= v += block[32]; pix[6*stride]= v += block[40]; pix[7*stride]= v += block[48]; pix[8*stride]= v + block[56]; pix++; block++; } } static void pred8x8l_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){ int i; for(i=0; i<8; i++){ uint8_t v = pix[-1]; pix[0]= v += block[0]; pix[1]= v += block[1]; pix[2]= v += block[2]; pix[3]= v += block[3]; pix[4]= v += block[4]; pix[5]= v += block[5]; pix[6]= v += block[6]; pix[7]= v + block[7]; pix+= stride; block+= 8; } } static void pred16x16_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){ int i; for(i=0; i<16; i++) pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride); } static void pred16x16_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){ int i; for(i=0; i<16; i++) pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride); } static void pred8x8_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){ int i; for(i=0; i<4; i++) pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride); } static void pred8x8_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){ int i; for(i=0; i<4; i++) pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride); } /** * Sets the intra prediction function pointers. */ void ff_h264_pred_init(H264PredContext *h, int codec_id){ // MpegEncContext * const s = &h->s; if(codec_id != CODEC_ID_RV40){ h->pred4x4[VERT_PRED ]= pred4x4_vertical_c; h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c; h->pred4x4[DC_PRED ]= pred4x4_dc_c; if(codec_id == CODEC_ID_SVQ3) h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_svq3_c; else h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c; h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c; h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c; h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c; h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c; h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c; h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c; h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c; h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c; }else{ h->pred4x4[VERT_PRED ]= pred4x4_vertical_c; h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c; h->pred4x4[DC_PRED ]= pred4x4_dc_c; h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_rv40_c; h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c; h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c; h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c; h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_rv40_c; h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_rv40_c; h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c; h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c; h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c; h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= pred4x4_down_left_rv40_nodown_c; h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= pred4x4_horizontal_up_rv40_nodown_c; h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= pred4x4_vertical_left_rv40_nodown_c; } h->pred8x8l[VERT_PRED ]= pred8x8l_vertical_c; h->pred8x8l[HOR_PRED ]= pred8x8l_horizontal_c; h->pred8x8l[DC_PRED ]= pred8x8l_dc_c; h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= pred8x8l_down_left_c; h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= pred8x8l_down_right_c; h->pred8x8l[VERT_RIGHT_PRED ]= pred8x8l_vertical_right_c; h->pred8x8l[HOR_DOWN_PRED ]= pred8x8l_horizontal_down_c; h->pred8x8l[VERT_LEFT_PRED ]= pred8x8l_vertical_left_c; h->pred8x8l[HOR_UP_PRED ]= pred8x8l_horizontal_up_c; h->pred8x8l[LEFT_DC_PRED ]= pred8x8l_left_dc_c; h->pred8x8l[TOP_DC_PRED ]= pred8x8l_top_dc_c; h->pred8x8l[DC_128_PRED ]= pred8x8l_128_dc_c; h->pred8x8[VERT_PRED8x8 ]= pred8x8_vertical_c; h->pred8x8[HOR_PRED8x8 ]= pred8x8_horizontal_c; h->pred8x8[PLANE_PRED8x8 ]= pred8x8_plane_c; if(codec_id != CODEC_ID_RV40){ h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_c; h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c; h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c; h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= pred8x8_mad_cow_dc_l0t; h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= pred8x8_mad_cow_dc_0lt; h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= pred8x8_mad_cow_dc_l00; h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= pred8x8_mad_cow_dc_0l0; }else{ h->pred8x8[DC_PRED8x8 ]= pred8x8_dc_rv40_c; h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_rv40_c; h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_rv40_c; } h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c; h->pred16x16[DC_PRED8x8 ]= pred16x16_dc_c; h->pred16x16[VERT_PRED8x8 ]= pred16x16_vertical_c; h->pred16x16[HOR_PRED8x8 ]= pred16x16_horizontal_c; h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c; switch(codec_id){ case CODEC_ID_SVQ3: h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_svq3_c; break; case CODEC_ID_RV40: h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_rv40_c; break; default: h->pred16x16[PLANE_PRED8x8 ]= pred16x16_plane_c; } h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c; h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c; h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c; //special lossless h/v prediction for h264 h->pred4x4_add [VERT_PRED ]= pred4x4_vertical_add_c; h->pred4x4_add [ HOR_PRED ]= pred4x4_horizontal_add_c; h->pred8x8l_add [VERT_PRED ]= pred8x8l_vertical_add_c; h->pred8x8l_add [ HOR_PRED ]= pred8x8l_horizontal_add_c; h->pred8x8_add [VERT_PRED8x8]= pred8x8_vertical_add_c; h->pred8x8_add [ HOR_PRED8x8]= pred8x8_horizontal_add_c; h->pred16x16_add[VERT_PRED8x8]= pred16x16_vertical_add_c; h->pred16x16_add[ HOR_PRED8x8]= pred16x16_horizontal_add_c; if (ARCH_ARM) ff_h264_pred_init_arm(h, codec_id); }
123linslouis-android-video-cutter
jni/libavcodec/h264pred.c
C
asf20
40,055
/* * XVideo Motion Compensation internal functions * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_INTERNAL_XVMC_H #define AVCODEC_INTERNAL_XVMC_H #include "avcodec.h" #include "mpegvideo.h" void ff_xvmc_init_block(MpegEncContext *s); void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp); int ff_xvmc_field_start(MpegEncContext*s, AVCodecContext *avctx); void ff_xvmc_field_end(MpegEncContext *s); void ff_xvmc_decode_mb(MpegEncContext *s); #endif /* AVCODEC_INTERNAL_XVMC_H */
123linslouis-android-video-cutter
jni/libavcodec/xvmc_internal.h
C
asf20
1,216
/* * SIPR / ACELP.NET decoder * * Copyright (c) 2008 Vladimir Voroshilov * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_SIPR_H #define AVCODEC_SIPR_H #include "avcodec.h" #include "dsputil.h" #include "acelp_pitch_delay.h" #define LP_FILTER_ORDER_16k 16 #define L_SUBFR_16k 80 #define PITCH_MIN 30 #define PITCH_MAX 281 #define LSFQ_DIFF_MIN (0.0125 * M_PI) #define LP_FILTER_ORDER 10 /** Number of past samples needed for excitation interpolation */ #define L_INTERPOL (LP_FILTER_ORDER + 1) /** Subframe size for all modes except 16k */ #define SUBFR_SIZE 48 #define SUBFRAME_COUNT_16k 2 typedef enum { MODE_16k, MODE_8k5, MODE_6k5, MODE_5k0, MODE_COUNT } SiprMode; typedef struct { AVCodecContext *avctx; DSPContext dsp; SiprMode mode; float past_pitch_gain; float lsf_history[LP_FILTER_ORDER_16k]; float excitation[L_INTERPOL + PITCH_MAX + 2 * L_SUBFR_16k]; DECLARE_ALIGNED(16, float, synth_buf)[LP_FILTER_ORDER + 5*SUBFR_SIZE + 6]; float lsp_history[LP_FILTER_ORDER]; float gain_mem; float energy_history[4]; float highpass_filt_mem[2]; float postfilter_mem[PITCH_DELAY_MAX + LP_FILTER_ORDER]; /* 5k0 */ float tilt_mem; float postfilter_agc; float postfilter_mem5k0[PITCH_DELAY_MAX + LP_FILTER_ORDER]; float postfilter_syn5k0[LP_FILTER_ORDER + SUBFR_SIZE*5]; /* 16k */ int pitch_lag_prev; float iir_mem[LP_FILTER_ORDER_16k+1]; float filt_buf[2][LP_FILTER_ORDER_16k+1]; float *filt_mem[2]; float mem_preemph[LP_FILTER_ORDER_16k]; float synth[LP_FILTER_ORDER_16k]; double lsp_history_16k[16]; } SiprContext; typedef struct { int ma_pred_switch; ///< switched moving average predictor int vq_indexes[5]; int pitch_delay[5]; ///< pitch delay int gp_index[5]; ///< adaptive-codebook gain indexes int16_t fc_indexes[5][10]; ///< fixed-codebook indexes int gc_index[5]; ///< fixed-codebook gain indexes } SiprParameters; extern const float ff_pow_0_5[16]; void ff_sipr_init_16k(SiprContext *ctx); void ff_sipr_decode_frame_16k(SiprContext *ctx, SiprParameters *params, float *out_data); #endif /* AVCODEC_SIPR_H */
123linslouis-android-video-cutter
jni/libavcodec/sipr.h
C
asf20
3,079
/* * IIR filter * Copyright (c) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * different IIR filters implementation */ #include "iirfilter.h" #include <math.h> /** * IIR filter global parameters */ typedef struct FFIIRFilterCoeffs{ int order; float gain; int *cx; float *cy; }FFIIRFilterCoeffs; /** * IIR filter state */ typedef struct FFIIRFilterState{ float x[1]; }FFIIRFilterState; /// maximum supported filter order #define MAXORDER 30 av_cold struct FFIIRFilterCoeffs* ff_iir_filter_init_coeffs(enum IIRFilterType filt_type, enum IIRFilterMode filt_mode, int order, float cutoff_ratio, float stopband, float ripple) { int i, j; FFIIRFilterCoeffs *c; double wa; double p[MAXORDER + 1][2]; if(filt_type != FF_FILTER_TYPE_BUTTERWORTH || filt_mode != FF_FILTER_MODE_LOWPASS) return NULL; if(order <= 1 || (order & 1) || order > MAXORDER || cutoff_ratio >= 1.0) return NULL; c = av_malloc(sizeof(FFIIRFilterCoeffs)); c->cx = av_malloc(sizeof(c->cx[0]) * ((order >> 1) + 1)); c->cy = av_malloc(sizeof(c->cy[0]) * order); c->order = order; wa = 2 * tan(M_PI * 0.5 * cutoff_ratio); c->cx[0] = 1; for(i = 1; i < (order >> 1) + 1; i++) c->cx[i] = c->cx[i - 1] * (order - i + 1LL) / i; p[0][0] = 1.0; p[0][1] = 0.0; for(i = 1; i <= order; i++) p[i][0] = p[i][1] = 0.0; for(i = 0; i < order; i++){ double zp[2]; double th = (i + (order >> 1) + 0.5) * M_PI / order; double a_re, a_im, c_re, c_im; zp[0] = cos(th) * wa; zp[1] = sin(th) * wa; a_re = zp[0] + 2.0; c_re = zp[0] - 2.0; a_im = c_im = zp[1]; zp[0] = (a_re * c_re + a_im * c_im) / (c_re * c_re + c_im * c_im); zp[1] = (a_im * c_re - a_re * c_im) / (c_re * c_re + c_im * c_im); for(j = order; j >= 1; j--) { a_re = p[j][0]; a_im = p[j][1]; p[j][0] = a_re*zp[0] - a_im*zp[1] + p[j-1][0]; p[j][1] = a_re*zp[1] + a_im*zp[0] + p[j-1][1]; } a_re = p[0][0]*zp[0] - p[0][1]*zp[1]; p[0][1] = p[0][0]*zp[1] + p[0][1]*zp[0]; p[0][0] = a_re; } c->gain = p[order][0]; for(i = 0; i < order; i++){ c->gain += p[i][0]; c->cy[i] = (-p[i][0] * p[order][0] + -p[i][1] * p[order][1]) / (p[order][0] * p[order][0] + p[order][1] * p[order][1]); } c->gain /= 1 << order; return c; } av_cold struct FFIIRFilterState* ff_iir_filter_init_state(int order) { FFIIRFilterState* s = av_mallocz(sizeof(FFIIRFilterState) + sizeof(s->x[0]) * (order - 1)); return s; } #define FILTER(i0, i1, i2, i3) \ in = *src * c->gain \ + c->cy[0]*s->x[i0] + c->cy[1]*s->x[i1] \ + c->cy[2]*s->x[i2] + c->cy[3]*s->x[i3]; \ res = (s->x[i0] + in )*1 \ + (s->x[i1] + s->x[i3])*4 \ + s->x[i2] *6; \ *dst = av_clip_int16(lrintf(res)); \ s->x[i0] = in; \ src += sstep; \ dst += dstep; \ void ff_iir_filter(const struct FFIIRFilterCoeffs *c, struct FFIIRFilterState *s, int size, const int16_t *src, int sstep, int16_t *dst, int dstep) { int i; if(c->order == 4){ for(i = 0; i < size; i += 4){ float in, res; FILTER(0, 1, 2, 3); FILTER(1, 2, 3, 0); FILTER(2, 3, 0, 1); FILTER(3, 0, 1, 2); } }else{ for(i = 0; i < size; i++){ int j; float in, res; in = *src * c->gain; for(j = 0; j < c->order; j++) in += c->cy[j] * s->x[j]; res = s->x[0] + in + s->x[c->order >> 1] * c->cx[c->order >> 1]; for(j = 1; j < c->order >> 1; j++) res += (s->x[j] + s->x[c->order - j]) * c->cx[j]; for(j = 0; j < c->order - 1; j++) s->x[j] = s->x[j + 1]; *dst = av_clip_int16(lrintf(res)); s->x[c->order - 1] = in; src += sstep; dst += sstep; } } } av_cold void ff_iir_filter_free_state(struct FFIIRFilterState *state) { av_free(state); } av_cold void ff_iir_filter_free_coeffs(struct FFIIRFilterCoeffs *coeffs) { if(coeffs){ av_free(coeffs->cx); av_free(coeffs->cy); } av_free(coeffs); } #ifdef TEST #define FILT_ORDER 4 #define SIZE 1024 int main(void) { struct FFIIRFilterCoeffs *fcoeffs = NULL; struct FFIIRFilterState *fstate = NULL; float cutoff_coeff = 0.4; int16_t x[SIZE], y[SIZE]; int i; FILE* fd; fcoeffs = ff_iir_filter_init_coeffs(FF_FILTER_TYPE_BUTTERWORTH, FF_FILTER_MODE_LOWPASS, FILT_ORDER, cutoff_coeff, 0.0, 0.0); fstate = ff_iir_filter_init_state(FILT_ORDER); for (i = 0; i < SIZE; i++) { x[i] = lrint(0.75 * INT16_MAX * sin(0.5*M_PI*i*i/SIZE)); } ff_iir_filter(fcoeffs, fstate, SIZE, x, 1, y, 1); fd = fopen("in.bin", "w"); fwrite(x, sizeof(x[0]), SIZE, fd); fclose(fd); fd = fopen("out.bin", "w"); fwrite(y, sizeof(y[0]), SIZE, fd); fclose(fd); ff_iir_filter_free_coeffs(fcoeffs); ff_iir_filter_free_state(fstate); return 0; } #endif /* TEST */
123linslouis-android-video-cutter
jni/libavcodec/iirfilter.c
C
asf20
6,429
/* * DVD subtitle encoding for ffmpeg * Copyright (c) 2005 Wolfram Gloger * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" #undef NDEBUG #include <assert.h> // ncnt is the nibble counter #define PUTNIBBLE(val)\ do {\ if (ncnt++ & 1)\ *q++ = bitbuf | ((val) & 0x0f);\ else\ bitbuf = (val) << 4;\ } while(0) static void dvd_encode_rle(uint8_t **pq, const uint8_t *bitmap, int linesize, int w, int h, const int cmap[256]) { uint8_t *q; unsigned int bitbuf = 0; int ncnt; int x, y, len, color; q = *pq; for (y = 0; y < h; ++y) { ncnt = 0; for(x = 0; x < w; x += len) { color = bitmap[x]; for (len=1; x+len < w; ++len) if (bitmap[x+len] != color) break; color = cmap[color]; assert(color < 4); if (len < 0x04) { PUTNIBBLE((len << 2)|color); } else if (len < 0x10) { PUTNIBBLE(len >> 2); PUTNIBBLE((len << 2)|color); } else if (len < 0x40) { PUTNIBBLE(0); PUTNIBBLE(len >> 2); PUTNIBBLE((len << 2)|color); } else if (x+len == w) { PUTNIBBLE(0); PUTNIBBLE(0); PUTNIBBLE(0); PUTNIBBLE(color); } else { if (len > 0xff) len = 0xff; PUTNIBBLE(0); PUTNIBBLE(len >> 6); PUTNIBBLE(len >> 2); PUTNIBBLE((len << 2)|color); } } /* end of line */ if (ncnt & 1) PUTNIBBLE(0); bitmap += linesize; } *pq = q; } static int encode_dvd_subtitles(uint8_t *outbuf, int outbuf_size, const AVSubtitle *h) { uint8_t *q, *qq; int object_id; int offset1[20], offset2[20]; int i, imax, color, alpha, rects = h->num_rects; unsigned long hmax; unsigned long hist[256]; int cmap[256]; if (rects == 0 || h->rects == NULL) return -1; if (rects > 20) rects = 20; // analyze bitmaps, compress to 4 colors for (i=0; i<256; ++i) { hist[i] = 0; cmap[i] = 0; } for (object_id = 0; object_id < rects; object_id++) for (i=0; i<h->rects[object_id]->w*h->rects[object_id]->h; ++i) { color = h->rects[object_id]->pict.data[0][i]; // only count non-transparent pixels alpha = ((uint32_t*)h->rects[object_id]->pict.data[1])[color] >> 24; hist[color] += alpha; } for (color=3;; --color) { hmax = 0; imax = 0; for (i=0; i<256; ++i) if (hist[i] > hmax) { imax = i; hmax = hist[i]; } if (hmax == 0) break; if (color == 0) color = 3; av_log(NULL, AV_LOG_DEBUG, "dvd_subtitle hist[%d]=%ld -> col %d\n", imax, hist[imax], color); cmap[imax] = color; hist[imax] = 0; } // encode data block q = outbuf + 4; for (object_id = 0; object_id < rects; object_id++) { offset1[object_id] = q - outbuf; // worst case memory requirement: 1 nibble per pixel.. if ((q - outbuf) + h->rects[object_id]->w*h->rects[object_id]->h/2 + 17*rects + 21 > outbuf_size) { av_log(NULL, AV_LOG_ERROR, "dvd_subtitle too big\n"); return -1; } dvd_encode_rle(&q, h->rects[object_id]->pict.data[0], h->rects[object_id]->w*2, h->rects[object_id]->w, h->rects[object_id]->h >> 1, cmap); offset2[object_id] = q - outbuf; dvd_encode_rle(&q, h->rects[object_id]->pict.data[0] + h->rects[object_id]->w, h->rects[object_id]->w*2, h->rects[object_id]->w, h->rects[object_id]->h >> 1, cmap); } // set data packet size qq = outbuf + 2; bytestream_put_be16(&qq, q - outbuf); // send start display command bytestream_put_be16(&q, (h->start_display_time*90) >> 10); bytestream_put_be16(&q, (q - outbuf) /*- 2 */ + 8 + 12*rects + 2); *q++ = 0x03; // palette - 4 nibbles *q++ = 0x03; *q++ = 0x7f; *q++ = 0x04; // alpha - 4 nibbles *q++ = 0xf0; *q++ = 0x00; //*q++ = 0x0f; *q++ = 0xff; // XXX not sure if more than one rect can really be encoded.. // 12 bytes per rect for (object_id = 0; object_id < rects; object_id++) { int x2 = h->rects[object_id]->x + h->rects[object_id]->w - 1; int y2 = h->rects[object_id]->y + h->rects[object_id]->h - 1; *q++ = 0x05; // x1 x2 -> 6 nibbles *q++ = h->rects[object_id]->x >> 4; *q++ = (h->rects[object_id]->x << 4) | ((x2 >> 8) & 0xf); *q++ = x2; // y1 y2 -> 6 nibbles *q++ = h->rects[object_id]->y >> 4; *q++ = (h->rects[object_id]->y << 4) | ((y2 >> 8) & 0xf); *q++ = y2; *q++ = 0x06; // offset1, offset2 bytestream_put_be16(&q, offset1[object_id]); bytestream_put_be16(&q, offset2[object_id]); } *q++ = 0x01; // start command *q++ = 0xff; // terminating command // send stop display command last bytestream_put_be16(&q, (h->end_display_time*90) >> 10); bytestream_put_be16(&q, (q - outbuf) - 2 /*+ 4*/); *q++ = 0x02; // set end *q++ = 0xff; // terminating command qq = outbuf; bytestream_put_be16(&qq, q - outbuf); av_log(NULL, AV_LOG_DEBUG, "subtitle_packet size=%td\n", q - outbuf); return q - outbuf; } static int dvdsub_encode(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { //DVDSubtitleContext *s = avctx->priv_data; AVSubtitle *sub = data; int ret; ret = encode_dvd_subtitles(buf, buf_size, sub); return ret; } AVCodec dvdsub_encoder = { "dvdsub", AVMEDIA_TYPE_SUBTITLE, CODEC_ID_DVD_SUBTITLE, 0, NULL, dvdsub_encode, .long_name = NULL_IF_CONFIG_SMALL("DVD subtitles"), };
123linslouis-android-video-cutter
jni/libavcodec/dvdsubenc.c
C
asf20
7,038
/* * Copyright (c) 2006 Paul Richards <paul.richards@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Theora encoder using libtheora. * @author Paul Richards <paul.richards@gmail.com> * * A lot of this is copy / paste from other output codecs in * libavcodec or pure guesswork (or both). * * I have used t_ prefixes on variables which are libtheora types * and o_ prefixes on variables which are libogg types. */ /* FFmpeg includes */ #include "libavutil/intreadwrite.h" #include "libavutil/log.h" #include "libavutil/base64.h" #include "avcodec.h" /* libtheora includes */ #include <theora/theoraenc.h> typedef struct TheoraContext { th_enc_ctx *t_state; uint8_t *stats; int stats_size; int stats_offset; int uv_hshift; int uv_vshift; int keyframe_mask; } TheoraContext; /** Concatenates an ogg_packet into the extradata. */ static int concatenate_packet(unsigned int* offset, AVCodecContext* avc_context, const ogg_packet* packet) { const char* message = NULL; uint8_t* newdata = NULL; int newsize = avc_context->extradata_size + 2 + packet->bytes; if (packet->bytes < 0) { message = "ogg_packet has negative size"; } else if (packet->bytes > 0xffff) { message = "ogg_packet is larger than 65535 bytes"; } else if (newsize < avc_context->extradata_size) { message = "extradata_size would overflow"; } else { newdata = av_realloc(avc_context->extradata, newsize); if (!newdata) message = "av_realloc failed"; } if (message) { av_log(avc_context, AV_LOG_ERROR, "concatenate_packet failed: %s\n", message); return -1; } avc_context->extradata = newdata; avc_context->extradata_size = newsize; AV_WB16(avc_context->extradata + (*offset), packet->bytes); *offset += 2; memcpy(avc_context->extradata + (*offset), packet->packet, packet->bytes); (*offset) += packet->bytes; return 0; } static int get_stats(AVCodecContext *avctx, int eos) { #ifdef TH_ENCCTL_2PASS_OUT TheoraContext *h = avctx->priv_data; uint8_t *buf; int bytes; bytes = th_encode_ctl(h->t_state, TH_ENCCTL_2PASS_OUT, &buf, sizeof(buf)); if (bytes < 0) { av_log(avctx, AV_LOG_ERROR, "Error getting first pass stats\n"); return -1; } if (!eos) { h->stats = av_fast_realloc(h->stats, &h->stats_size, h->stats_offset + bytes); memcpy(h->stats + h->stats_offset, buf, bytes); h->stats_offset += bytes; } else { int b64_size = ((h->stats_offset + 2) / 3) * 4 + 1; // libtheora generates a summary header at the end memcpy(h->stats, buf, bytes); avctx->stats_out = av_malloc(b64_size); av_base64_encode(avctx->stats_out, b64_size, h->stats, h->stats_offset); } return 0; #else av_log(avctx, AV_LOG_ERROR, "libtheora too old to support 2pass\n"); return -1; #endif } // libtheora won't read the entire buffer we give it at once, so we have to // repeatedly submit it... static int submit_stats(AVCodecContext *avctx) { #ifdef TH_ENCCTL_2PASS_IN TheoraContext *h = avctx->priv_data; int bytes; if (!h->stats) { if (!avctx->stats_in) { av_log(avctx, AV_LOG_ERROR, "No statsfile for second pass\n"); return -1; } h->stats_size = strlen(avctx->stats_in) * 3/4; h->stats = av_malloc(h->stats_size); h->stats_size = av_base64_decode(h->stats, avctx->stats_in, h->stats_size); } while (h->stats_size - h->stats_offset > 0) { bytes = th_encode_ctl(h->t_state, TH_ENCCTL_2PASS_IN, h->stats + h->stats_offset, h->stats_size - h->stats_offset); if (bytes < 0) { av_log(avctx, AV_LOG_ERROR, "Error submitting stats\n"); return -1; } if (!bytes) return 0; h->stats_offset += bytes; } return 0; #else av_log(avctx, AV_LOG_ERROR, "libtheora too old to support 2pass\n"); return -1; #endif } static av_cold int encode_init(AVCodecContext* avc_context) { th_info t_info; th_comment t_comment; ogg_packet o_packet; unsigned int offset; TheoraContext *h = avc_context->priv_data; uint32_t gop_size = avc_context->gop_size; /* Set up the theora_info struct */ th_info_init(&t_info); t_info.frame_width = FFALIGN(avc_context->width, 16); t_info.frame_height = FFALIGN(avc_context->height, 16); t_info.pic_width = avc_context->width; t_info.pic_height = avc_context->height; t_info.pic_x = 0; t_info.pic_y = 0; /* Swap numerator and denominator as time_base in AVCodecContext gives the * time period between frames, but theora_info needs the framerate. */ t_info.fps_numerator = avc_context->time_base.den; t_info.fps_denominator = avc_context->time_base.num; if (avc_context->sample_aspect_ratio.num) { t_info.aspect_numerator = avc_context->sample_aspect_ratio.num; t_info.aspect_denominator = avc_context->sample_aspect_ratio.den; } else { t_info.aspect_numerator = 1; t_info.aspect_denominator = 1; } if (avc_context->color_primaries == AVCOL_PRI_BT470M) t_info.colorspace = TH_CS_ITU_REC_470M; else if (avc_context->color_primaries == AVCOL_PRI_BT470BG) t_info.colorspace = TH_CS_ITU_REC_470BG; else t_info.colorspace = TH_CS_UNSPECIFIED; if (avc_context->pix_fmt == PIX_FMT_YUV420P) t_info.pixel_fmt = TH_PF_420; else if (avc_context->pix_fmt == PIX_FMT_YUV422P) t_info.pixel_fmt = TH_PF_422; else if (avc_context->pix_fmt == PIX_FMT_YUV444P) t_info.pixel_fmt = TH_PF_444; else { av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n"); return -1; } avcodec_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift); if (avc_context->flags & CODEC_FLAG_QSCALE) { /* to be constant with the libvorbis implementation, clip global_quality to 0 - 10 Theora accepts a quality parameter p, which is: * 0 <= p <=63 * an int value */ t_info.quality = av_clip(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3; t_info.target_bitrate = 0; } else { t_info.target_bitrate = avc_context->bit_rate; t_info.quality = 0; } /* Now initialise libtheora */ h->t_state = th_encode_alloc(&t_info); if (!h->t_state) { av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n"); return -1; } h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1; /* Clear up theora_info struct */ th_info_clear(&t_info); if (th_encode_ctl(h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE, &gop_size, sizeof(gop_size))) { av_log(avc_context, AV_LOG_ERROR, "Error setting GOP size\n"); return -1; } // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers if (avc_context->flags & CODEC_FLAG_PASS1) { if (get_stats(avc_context, 0)) return -1; } else if (avc_context->flags & CODEC_FLAG_PASS2) { if (submit_stats(avc_context)) return -1; } /* Output first header packet consisting of theora header, comment, and tables. Each one is prefixed with a 16bit size, then they are concatenated together into ffmpeg's extradata. */ offset = 0; /* Headers */ th_comment_init(&t_comment); while (th_encode_flushheader(h->t_state, &t_comment, &o_packet)) if (concatenate_packet(&offset, avc_context, &o_packet)) return -1; th_comment_clear(&t_comment); /* Set up the output AVFrame */ avc_context->coded_frame= avcodec_alloc_frame(); return 0; } static int encode_frame(AVCodecContext* avc_context, uint8_t *outbuf, int buf_size, void *data) { th_ycbcr_buffer t_yuv_buffer; TheoraContext *h = avc_context->priv_data; AVFrame *frame = data; ogg_packet o_packet; int result, i; // EOS, finish and get 1st pass stats if applicable if (!frame) { th_encode_packetout(h->t_state, 1, &o_packet); if (avc_context->flags & CODEC_FLAG_PASS1) if (get_stats(avc_context, 1)) return -1; return 0; } /* Copy planes to the theora yuv_buffer */ for (i = 0; i < 3; i++) { t_yuv_buffer[i].width = FFALIGN(avc_context->width, 16) >> (i && h->uv_hshift); t_yuv_buffer[i].height = FFALIGN(avc_context->height, 16) >> (i && h->uv_vshift); t_yuv_buffer[i].stride = frame->linesize[i]; t_yuv_buffer[i].data = frame->data[i]; } if (avc_context->flags & CODEC_FLAG_PASS2) if (submit_stats(avc_context)) return -1; /* Now call into theora_encode_YUVin */ result = th_encode_ycbcr_in(h->t_state, t_yuv_buffer); if (result) { const char* message; switch (result) { case -1: message = "differing frame sizes"; break; case TH_EINVAL: message = "encoder is not ready or is finished"; break; default: message = "unknown reason"; break; } av_log(avc_context, AV_LOG_ERROR, "theora_encode_YUVin failed (%s) [%d]\n", message, result); return -1; } if (avc_context->flags & CODEC_FLAG_PASS1) if (get_stats(avc_context, 0)) return -1; /* Pick up returned ogg_packet */ result = th_encode_packetout(h->t_state, 0, &o_packet); switch (result) { case 0: /* No packet is ready */ return 0; case 1: /* Success, we have a packet */ break; default: av_log(avc_context, AV_LOG_ERROR, "theora_encode_packetout failed [%d]\n", result); return -1; } /* Copy ogg_packet content out to buffer */ if (buf_size < o_packet.bytes) { av_log(avc_context, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } memcpy(outbuf, o_packet.packet, o_packet.bytes); // HACK: assumes no encoder delay, this is true until libtheora becomes // multithreaded (which will be disabled unless explictly requested) avc_context->coded_frame->pts = frame->pts; avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask); return o_packet.bytes; } static av_cold int encode_close(AVCodecContext* avc_context) { TheoraContext *h = avc_context->priv_data; th_encode_free(h->t_state); av_freep(&h->stats); av_freep(&avc_context->coded_frame); av_freep(&avc_context->stats_out); av_freep(&avc_context->extradata); avc_context->extradata_size = 0; return 0; } /** AVCodec struct exposed to libavcodec */ AVCodec libtheora_encoder = { .name = "libtheora", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_THEORA, .priv_data_size = sizeof(TheoraContext), .init = encode_init, .close = encode_close, .encode = encode_frame, .capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"), };
123linslouis-android-video-cutter
jni/libavcodec/libtheoraenc.c
C
asf20
12,372
/* * QuickDraw (qdrw) codec * Copyright (c) 2004 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Apple QuickDraw codec. */ #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct QdrawContext{ AVCodecContext *avctx; AVFrame pic; } QdrawContext; static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; QdrawContext * const a = avctx->priv_data; AVFrame * const p= (AVFrame*)&a->pic; uint8_t* outdata; int colors; int i; uint32_t *pal; int r, g, b; if(p->data[0]) avctx->release_buffer(avctx, p); p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type= FF_I_TYPE; p->key_frame= 1; outdata = a->pic.data[0]; buf += 0x68; /* jump to palette */ colors = AV_RB32(buf); buf += 4; if(colors < 0 || colors > 256) { av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors); return -1; } pal = (uint32_t*)p->data[1]; for (i = 0; i <= colors; i++) { unsigned int idx; idx = AV_RB16(buf); /* color index */ buf += 2; if (idx > 255) { av_log(avctx, AV_LOG_ERROR, "Palette index out of range: %u\n", idx); buf += 6; continue; } r = *buf++; buf++; g = *buf++; buf++; b = *buf++; buf++; pal[idx] = (r << 16) | (g << 8) | b; } p->palette_has_changed = 1; buf += 18; /* skip unneeded data */ for (i = 0; i < avctx->height; i++) { int size, left, code, pix; const uint8_t *next; uint8_t *out; int tsize = 0; /* decode line */ out = outdata; size = AV_RB16(buf); /* size of packed line */ buf += 2; left = size; next = buf + size; while (left > 0) { code = *buf++; if (code & 0x80 ) { /* run */ pix = *buf++; if ((out + (257 - code)) > (outdata + a->pic.linesize[0])) break; memset(out, pix, 257 - code); out += 257 - code; tsize += 257 - code; left -= 2; } else { /* copy */ if ((out + code) > (outdata + a->pic.linesize[0])) break; memcpy(out, buf, code + 1); out += code + 1; buf += code + 1; left -= 2 + code; tsize += code + 1; } } buf = next; outdata += a->pic.linesize[0]; } *data_size = sizeof(AVFrame); *(AVFrame*)data = a->pic; return buf_size; } static av_cold int decode_init(AVCodecContext *avctx){ // QdrawContext * const a = avctx->priv_data; avctx->pix_fmt= PIX_FMT_PAL8; return 0; } static av_cold int decode_end(AVCodecContext *avctx){ QdrawContext * const a = avctx->priv_data; AVFrame *pic = &a->pic; if (pic->data[0]) avctx->release_buffer(avctx, pic); return 0; } AVCodec qdraw_decoder = { "qdraw", AVMEDIA_TYPE_VIDEO, CODEC_ID_QDRAW, sizeof(QdrawContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Apple QuickDraw"), };
123linslouis-android-video-cutter
jni/libavcodec/qdrw.c
C
asf20
4,288
/* * Dirac encoder support via Schroedinger libraries * Copyright (c) 2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com > * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Dirac encoder support via libschroedinger-1.0 libraries. More details about * the Schroedinger project can be found at http://www.diracvideo.org/. * The library implements Dirac Specification Version 2.2 * (http://dirac.sourceforge.net/specification.html). */ #undef NDEBUG #include <assert.h> #include <schroedinger/schro.h> #include <schroedinger/schrodebug.h> #include <schroedinger/schrovideoformat.h> #include "avcodec.h" #include "libdirac_libschro.h" #include "libschroedinger.h" /** libschroedinger encoder private data */ typedef struct FfmpegSchroEncoderParams { /** Schroedinger video format */ SchroVideoFormat *format; /** Schroedinger frame format */ SchroFrameFormat frame_format; /** frame being encoded */ AVFrame picture; /** frame size */ int frame_size; /** Schroedinger encoder handle*/ SchroEncoder* encoder; /** buffer to store encoder output before writing it to the frame queue*/ unsigned char *enc_buf; /** Size of encoder buffer*/ int enc_buf_size; /** queue storing encoded frames */ FfmpegDiracSchroQueue enc_frame_queue; /** end of sequence signalled */ int eos_signalled; /** end of sequence pulled */ int eos_pulled; } FfmpegSchroEncoderParams; /** * Works out Schro-compatible chroma format. */ static int SetSchroChromaFormat(AVCodecContext *avccontext) { int num_formats = sizeof(ffmpeg_schro_pixel_format_map) / sizeof(ffmpeg_schro_pixel_format_map[0]); int idx; FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; for (idx = 0; idx < num_formats; ++idx) { if (ffmpeg_schro_pixel_format_map[idx].ff_pix_fmt == avccontext->pix_fmt) { p_schro_params->format->chroma_format = ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt; return 0; } } av_log(avccontext, AV_LOG_ERROR, "This codec currently only supports planar YUV 4:2:0, 4:2:2" " and 4:4:4 formats.\n"); return -1; } static int libschroedinger_encode_init(AVCodecContext *avccontext) { FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; SchroVideoFormatEnum preset; /* Initialize the libraries that libschroedinger depends on. */ schro_init(); /* Create an encoder object. */ p_schro_params->encoder = schro_encoder_new(); if (!p_schro_params->encoder) { av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Error: schro_encoder_new failed. "); return -1; } /* Initialize the format. */ preset = ff_get_schro_video_format_preset(avccontext); p_schro_params->format = schro_encoder_get_video_format(p_schro_params->encoder); schro_video_format_set_std_video_format(p_schro_params->format, preset); p_schro_params->format->width = avccontext->width; p_schro_params->format->height = avccontext->height; if (SetSchroChromaFormat(avccontext) == -1) return -1; if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, &p_schro_params->frame_format) == -1) { av_log(avccontext, AV_LOG_ERROR, "This codec currently supports only planar YUV 4:2:0, 4:2:2" " and 4:4:4 formats.\n"); return -1; } p_schro_params->format->frame_rate_numerator = avccontext->time_base.den; p_schro_params->format->frame_rate_denominator = avccontext->time_base.num; p_schro_params->frame_size = avpicture_get_size(avccontext->pix_fmt, avccontext->width, avccontext->height); avccontext->coded_frame = &p_schro_params->picture; if (!avccontext->gop_size) { schro_encoder_setting_set_double(p_schro_params->encoder, "gop_structure", SCHRO_ENCODER_GOP_INTRA_ONLY); if (avccontext->coder_type == FF_CODER_TYPE_VLC) schro_encoder_setting_set_double(p_schro_params->encoder, "enable_noarith", 1); } else { schro_encoder_setting_set_double(p_schro_params->encoder, "gop_structure", SCHRO_ENCODER_GOP_BIREF); avccontext->has_b_frames = 1; } /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ if (avccontext->flags & CODEC_FLAG_QSCALE) { if (!avccontext->global_quality) { /* lossless coding */ schro_encoder_setting_set_double(p_schro_params->encoder, "rate_control", SCHRO_ENCODER_RATE_CONTROL_LOSSLESS); } else { int noise_threshold; schro_encoder_setting_set_double(p_schro_params->encoder, "rate_control", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_NOISE_THRESHOLD); noise_threshold = avccontext->global_quality / FF_QP2LAMBDA; if (noise_threshold > 100) noise_threshold = 100; schro_encoder_setting_set_double(p_schro_params->encoder, "noise_threshold", noise_threshold); } } else { schro_encoder_setting_set_double(p_schro_params->encoder, "rate_control", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE); schro_encoder_setting_set_double(p_schro_params->encoder, "bitrate", avccontext->bit_rate); } if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) /* All material can be coded as interlaced or progressive irrespective of the type of source material. */ schro_encoder_setting_set_double(p_schro_params->encoder, "interlaced_coding", 1); /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger * and libdirac support other bit-depth data. */ schro_video_format_set_std_signal_range(p_schro_params->format, SCHRO_SIGNAL_RANGE_8BIT_VIDEO); /* Set the encoder format. */ schro_encoder_set_video_format(p_schro_params->encoder, p_schro_params->format); /* Set the debug level. */ schro_debug_set_level(avccontext->debug); schro_encoder_start(p_schro_params->encoder); /* Initialize the encoded frame queue. */ ff_dirac_schro_queue_init(&p_schro_params->enc_frame_queue); return 0; } static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext, void *in_data) { FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; SchroFrame *in_frame; /* Input line size may differ from what the codec supports. Especially * when transcoding from one format to another. So use avpicture_layout * to copy the frame. */ in_frame = ff_create_schro_frame(avccontext, p_schro_params->frame_format); if (in_frame) avpicture_layout((AVPicture *)in_data, avccontext->pix_fmt, avccontext->width, avccontext->height, in_frame->components[0].data, p_schro_params->frame_size); return in_frame; } static void SchroedingerFreeFrame(void *data) { FfmpegDiracSchroEncodedFrame *enc_frame = data; av_freep(&(enc_frame->p_encbuf)); av_free(enc_frame); } static int libschroedinger_encode_frame(AVCodecContext *avccontext, unsigned char *frame, int buf_size, void *data) { int enc_size = 0; FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; SchroEncoder *encoder = p_schro_params->encoder; struct FfmpegDiracSchroEncodedFrame* p_frame_output = NULL; int go = 1; SchroBuffer *enc_buf; int presentation_frame; int parse_code; int last_frame_in_sequence = 0; if (!data) { /* Push end of sequence if not already signalled. */ if (!p_schro_params->eos_signalled) { schro_encoder_end_of_stream(encoder); p_schro_params->eos_signalled = 1; } } else { /* Allocate frame data to schro input buffer. */ SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext, data); /* Load next frame. */ schro_encoder_push_frame(encoder, in_frame); } if (p_schro_params->eos_pulled) go = 0; /* Now check to see if we have any output from the encoder. */ while (go) { SchroStateEnum state; state = schro_encoder_wait(encoder); switch (state) { case SCHRO_STATE_HAVE_BUFFER: case SCHRO_STATE_END_OF_STREAM: enc_buf = schro_encoder_pull(encoder, &presentation_frame); assert(enc_buf->length > 0); assert(enc_buf->length <= buf_size); parse_code = enc_buf->data[4]; /* All non-frame data is prepended to actual frame data to * be able to set the pts correctly. So we don't write data * to the frame output queue until we actually have a frame */ p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf, p_schro_params->enc_buf_size + enc_buf->length); memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size, enc_buf->data, enc_buf->length); p_schro_params->enc_buf_size += enc_buf->length; if (state == SCHRO_STATE_END_OF_STREAM) { p_schro_params->eos_pulled = 1; go = 0; } if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) { schro_buffer_unref(enc_buf); break; } /* Create output frame. */ p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame)); /* Set output data. */ p_frame_output->size = p_schro_params->enc_buf_size; p_frame_output->p_encbuf = p_schro_params->enc_buf; if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) && SCHRO_PARSE_CODE_IS_REFERENCE(parse_code)) p_frame_output->key_frame = 1; /* Parse the coded frame number from the bitstream. Bytes 14 * through 17 represesent the frame number. */ p_frame_output->frame_num = (enc_buf->data[13] << 24) + (enc_buf->data[14] << 16) + (enc_buf->data[15] << 8) + enc_buf->data[16]; ff_dirac_schro_queue_push_back(&p_schro_params->enc_frame_queue, p_frame_output); p_schro_params->enc_buf_size = 0; p_schro_params->enc_buf = NULL; schro_buffer_unref(enc_buf); break; case SCHRO_STATE_NEED_FRAME: go = 0; break; case SCHRO_STATE_AGAIN: break; default: av_log(avccontext, AV_LOG_ERROR, "Unknown Schro Encoder state\n"); return -1; } } /* Copy 'next' frame in queue. */ if (p_schro_params->enc_frame_queue.size == 1 && p_schro_params->eos_pulled) last_frame_in_sequence = 1; p_frame_output = ff_dirac_schro_queue_pop(&p_schro_params->enc_frame_queue); if (!p_frame_output) return 0; memcpy(frame, p_frame_output->p_encbuf, p_frame_output->size); avccontext->coded_frame->key_frame = p_frame_output->key_frame; /* Use the frame number of the encoded frame as the pts. It is OK to * do so since Dirac is a constant frame rate codec. It expects input * to be of constant frame rate. */ avccontext->coded_frame->pts = p_frame_output->frame_num; enc_size = p_frame_output->size; /* Append the end of sequence information to the last frame in the * sequence. */ if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) { memcpy(frame + enc_size, p_schro_params->enc_buf, p_schro_params->enc_buf_size); enc_size += p_schro_params->enc_buf_size; av_freep(&p_schro_params->enc_buf); p_schro_params->enc_buf_size = 0; } /* free frame */ SchroedingerFreeFrame(p_frame_output); return enc_size; } static int libschroedinger_encode_close(AVCodecContext *avccontext) { FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; /* Close the encoder. */ schro_encoder_free(p_schro_params->encoder); /* Free data in the output frame queue. */ ff_dirac_schro_queue_free(&p_schro_params->enc_frame_queue, SchroedingerFreeFrame); /* Free the encoder buffer. */ if (p_schro_params->enc_buf_size) av_freep(&p_schro_params->enc_buf); /* Free the video format structure. */ av_freep(&p_schro_params->format); return 0; } AVCodec libschroedinger_encoder = { "libschroedinger", AVMEDIA_TYPE_VIDEO, CODEC_ID_DIRAC, sizeof(FfmpegSchroEncoderParams), libschroedinger_encode_init, libschroedinger_encode_frame, libschroedinger_encode_close, .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), };
123linslouis-android-video-cutter
jni/libavcodec/libschroedingerenc.c
C
asf20
14,994
/* * Electronic Arts TQI Video Decoder * Copyright (c) 2007-2009 Peter Ross <pross@xvid.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Electronic Arts TQI Video Decoder * by Peter Ross <pross@xvid.org> * * Technical details here: * http://wiki.multimedia.cx/index.php?title=Electronic_Arts_TQI */ #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "aandcttab.h" #include "mpeg12.h" #include "mpegvideo.h" typedef struct TqiContext { MpegEncContext s; AVFrame frame; void *bitstream_buf; unsigned int bitstream_buf_size; DECLARE_ALIGNED(16, DCTELEM, block)[6][64]; } TqiContext; static av_cold int tqi_decode_init(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->avctx = avctx; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_EA; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); s->qscale = 1; avctx->time_base = (AVRational){1, 15}; avctx->pix_fmt = PIX_FMT_YUV420P; ff_mpeg12_init_vlcs(); return 0; } static void tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64]) { int n; s->dsp.clear_blocks(block[0]); for (n=0; n<6; n++) ff_mpeg1_decode_block_intra(s, block[n], n); } static inline void tqi_idct_put(TqiContext *t, DCTELEM (*block)[64]) { MpegEncContext *s = &t->s; int linesize= t->frame.linesize[0]; uint8_t *dest_y = t->frame.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16; uint8_t *dest_cb = t->frame.data[1] + (s->mb_y * 8 * t->frame.linesize[1]) + s->mb_x * 8; uint8_t *dest_cr = t->frame.data[2] + (s->mb_y * 8 * t->frame.linesize[2]) + s->mb_x * 8; s->dsp.idct_put(dest_y , linesize, block[0]); s->dsp.idct_put(dest_y + 8, linesize, block[1]); s->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]); s->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]); if(!(s->avctx->flags&CODEC_FLAG_GRAY)) { s->dsp.idct_put(dest_cb, t->frame.linesize[1], block[4]); s->dsp.idct_put(dest_cr, t->frame.linesize[2], block[5]); } } static void tqi_calculate_qtable(MpegEncContext *s, int quant) { const int qscale = (215 - 2*quant)*5; int i; if (s->avctx->idct_algo==FF_IDCT_EA) { s->intra_matrix[0] = (ff_inv_aanscales[0]*ff_mpeg1_default_intra_matrix[0])>>11; for(i=1; i<64; i++) s->intra_matrix[i] = (ff_inv_aanscales[i]*ff_mpeg1_default_intra_matrix[i]*qscale + 32)>>14; }else{ s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; for(i=1; i<64; i++) s->intra_matrix[i] = (ff_mpeg1_default_intra_matrix[i]*qscale + 32)>>3; } } static int tqi_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; TqiContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->width = AV_RL16(&buf[0]); s->height = AV_RL16(&buf[2]); tqi_calculate_qtable(s, buf[4]); buf += 8; if (t->frame.data[0]) avctx->release_buffer(avctx, &t->frame); if (s->avctx->width!=s->width || s->avctx->height!=s->height) avcodec_set_dimensions(s->avctx, s->width, s->height); if(avctx->get_buffer(avctx, &t->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } av_fast_malloc(&t->bitstream_buf, &t->bitstream_buf_size, (buf_end-buf) + FF_INPUT_BUFFER_PADDING_SIZE); if (!t->bitstream_buf) return AVERROR(ENOMEM); s->dsp.bswap_buf(t->bitstream_buf, (const uint32_t*)buf, (buf_end-buf)/4); init_get_bits(&s->gb, t->bitstream_buf, 8*(buf_end-buf)); s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 0; for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++) for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++) { tqi_decode_mb(s, t->block); tqi_idct_put(t, t->block); } *data_size = sizeof(AVFrame); *(AVFrame*)data = t->frame; return buf_size; } static av_cold int tqi_decode_end(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; if(t->frame.data[0]) avctx->release_buffer(avctx, &t->frame); av_free(t->bitstream_buf); return 0; } AVCodec eatqi_decoder = { "eatqi", AVMEDIA_TYPE_VIDEO, CODEC_ID_TQI, sizeof(TqiContext), tqi_decode_init, NULL, tqi_decode_end, tqi_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TQI Video"), };
123linslouis-android-video-cutter
jni/libavcodec/eatqi.c
C
asf20
5,458
/* * FLAC (Free Lossless Audio Codec) decoder * Copyright (c) 2003 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * FLAC (Free Lossless Audio Codec) decoder * @author Alex Beregszaszi * * For more information on the FLAC format, visit: * http://flac.sourceforge.net/ * * This decoder can be used in 1 of 2 ways: Either raw FLAC data can be fed * through, starting from the initial 'fLaC' signature; or by passing the * 34-byte streaminfo structure through avctx->extradata[_size] followed * by data starting with the 0xFFF8 marker. */ #include <limits.h> #include "libavutil/crc.h" #include "avcodec.h" #include "internal.h" #include "get_bits.h" #include "bytestream.h" #include "golomb.h" #include "flac.h" #include "flacdata.h" #undef NDEBUG #include <assert.h> typedef struct FLACContext { FLACSTREAMINFO AVCodecContext *avctx; ///< parent AVCodecContext GetBitContext gb; ///< GetBitContext initialized to start at the current frame int blocksize; ///< number of samples in the current frame int curr_bps; ///< bps for current subframe, adjusted for channel correlation and wasted bits int sample_shift; ///< shift required to make output samples 16-bit or 32-bit int is32; ///< flag to indicate if output should be 32-bit instead of 16-bit int ch_mode; ///< channel decorrelation type in the current frame int got_streaminfo; ///< indicates if the STREAMINFO has been read int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples uint8_t *bitstream; unsigned int bitstream_size; unsigned int bitstream_index; unsigned int allocated_bitstream_size; } FLACContext; static const int sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 }; static int64_t get_utf8(GetBitContext *gb) { int64_t val; GET_UTF8(val, get_bits(gb, 8), return -1;) return val; } static void allocate_buffers(FLACContext *s); int ff_flac_is_extradata_valid(AVCodecContext *avctx, enum FLACExtradataFormat *format, uint8_t **streaminfo_start) { if (!avctx->extradata || avctx->extradata_size < FLAC_STREAMINFO_SIZE) { av_log(avctx, AV_LOG_ERROR, "extradata NULL or too small.\n"); return 0; } if (AV_RL32(avctx->extradata) != MKTAG('f','L','a','C')) { /* extradata contains STREAMINFO only */ if (avctx->extradata_size != FLAC_STREAMINFO_SIZE) { av_log(avctx, AV_LOG_WARNING, "extradata contains %d bytes too many.\n", FLAC_STREAMINFO_SIZE-avctx->extradata_size); } *format = FLAC_EXTRADATA_FORMAT_STREAMINFO; *streaminfo_start = avctx->extradata; } else { if (avctx->extradata_size < 8+FLAC_STREAMINFO_SIZE) { av_log(avctx, AV_LOG_ERROR, "extradata too small.\n"); return 0; } *format = FLAC_EXTRADATA_FORMAT_FULL_HEADER; *streaminfo_start = &avctx->extradata[8]; } return 1; } static av_cold int flac_decode_init(AVCodecContext *avctx) { enum FLACExtradataFormat format; uint8_t *streaminfo; FLACContext *s = avctx->priv_data; s->avctx = avctx; avctx->sample_fmt = SAMPLE_FMT_S16; /* for now, the raw FLAC header is allowed to be passed to the decoder as frame data instead of extradata. */ if (!avctx->extradata) return 0; if (!ff_flac_is_extradata_valid(avctx, &format, &streaminfo)) return -1; /* initialize based on the demuxer-supplied streamdata header */ ff_flac_parse_streaminfo(avctx, (FLACStreaminfo *)s, streaminfo); if (s->bps > 16) avctx->sample_fmt = SAMPLE_FMT_S32; else avctx->sample_fmt = SAMPLE_FMT_S16; allocate_buffers(s); s->got_streaminfo = 1; return 0; } static void dump_headers(AVCodecContext *avctx, FLACStreaminfo *s) { av_log(avctx, AV_LOG_DEBUG, " Max Blocksize: %d\n", s->max_blocksize); av_log(avctx, AV_LOG_DEBUG, " Max Framesize: %d\n", s->max_framesize); av_log(avctx, AV_LOG_DEBUG, " Samplerate: %d\n", s->samplerate); av_log(avctx, AV_LOG_DEBUG, " Channels: %d\n", s->channels); av_log(avctx, AV_LOG_DEBUG, " Bits: %d\n", s->bps); } static void allocate_buffers(FLACContext *s) { int i; assert(s->max_blocksize); if (s->max_framesize == 0 && s->max_blocksize) { s->max_framesize = ff_flac_get_max_frame_size(s->max_blocksize, s->channels, s->bps); } for (i = 0; i < s->channels; i++) { s->decoded[i] = av_realloc(s->decoded[i], sizeof(int32_t)*s->max_blocksize); } if (s->allocated_bitstream_size < s->max_framesize) s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize); } void ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s, const uint8_t *buffer) { GetBitContext gb; init_get_bits(&gb, buffer, FLAC_STREAMINFO_SIZE*8); skip_bits(&gb, 16); /* skip min blocksize */ s->max_blocksize = get_bits(&gb, 16); if (s->max_blocksize < FLAC_MIN_BLOCKSIZE) { av_log(avctx, AV_LOG_WARNING, "invalid max blocksize: %d\n", s->max_blocksize); s->max_blocksize = 16; } skip_bits(&gb, 24); /* skip min frame size */ s->max_framesize = get_bits_long(&gb, 24); s->samplerate = get_bits_long(&gb, 20); s->channels = get_bits(&gb, 3) + 1; s->bps = get_bits(&gb, 5) + 1; avctx->channels = s->channels; avctx->sample_rate = s->samplerate; avctx->bits_per_raw_sample = s->bps; s->samples = get_bits_long(&gb, 32) << 4; s->samples |= get_bits(&gb, 4); skip_bits_long(&gb, 64); /* md5 sum */ skip_bits_long(&gb, 64); /* md5 sum */ dump_headers(avctx, s); } void ff_flac_parse_block_header(const uint8_t *block_header, int *last, int *type, int *size) { int tmp = bytestream_get_byte(&block_header); if (last) *last = tmp & 0x80; if (type) *type = tmp & 0x7F; if (size) *size = bytestream_get_be24(&block_header); } /** * Parse the STREAMINFO from an inline header. * @param s the flac decoding context * @param buf input buffer, starting with the "fLaC" marker * @param buf_size buffer size * @return non-zero if metadata is invalid */ static int parse_streaminfo(FLACContext *s, const uint8_t *buf, int buf_size) { int metadata_type, metadata_size; if (buf_size < FLAC_STREAMINFO_SIZE+8) { /* need more data */ return 0; } ff_flac_parse_block_header(&buf[4], NULL, &metadata_type, &metadata_size); if (metadata_type != FLAC_METADATA_TYPE_STREAMINFO || metadata_size != FLAC_STREAMINFO_SIZE) { return AVERROR_INVALIDDATA; } ff_flac_parse_streaminfo(s->avctx, (FLACStreaminfo *)s, &buf[8]); allocate_buffers(s); s->got_streaminfo = 1; return 0; } /** * Determine the size of an inline header. * @param buf input buffer, starting with the "fLaC" marker * @param buf_size buffer size * @return number of bytes in the header, or 0 if more data is needed */ static int get_metadata_size(const uint8_t *buf, int buf_size) { int metadata_last, metadata_size; const uint8_t *buf_end = buf + buf_size; buf += 4; do { ff_flac_parse_block_header(buf, &metadata_last, NULL, &metadata_size); buf += 4; if (buf + metadata_size > buf_end) { /* need more data in order to read the complete header */ return 0; } buf += metadata_size; } while (!metadata_last); return buf_size - (buf_end - buf); } static int decode_residuals(FLACContext *s, int channel, int pred_order) { int i, tmp, partition, method_type, rice_order; int sample = 0, samples; method_type = get_bits(&s->gb, 2); if (method_type > 1) { av_log(s->avctx, AV_LOG_ERROR, "illegal residual coding method %d\n", method_type); return -1; } rice_order = get_bits(&s->gb, 4); samples= s->blocksize >> rice_order; if (pred_order > samples) { av_log(s->avctx, AV_LOG_ERROR, "invalid predictor order: %i > %i\n", pred_order, samples); return -1; } sample= i= pred_order; for (partition = 0; partition < (1 << rice_order); partition++) { tmp = get_bits(&s->gb, method_type == 0 ? 4 : 5); if (tmp == (method_type == 0 ? 15 : 31)) { tmp = get_bits(&s->gb, 5); for (; i < samples; i++, sample++) s->decoded[channel][sample] = get_sbits_long(&s->gb, tmp); } else { for (; i < samples; i++, sample++) { s->decoded[channel][sample] = get_sr_golomb_flac(&s->gb, tmp, INT_MAX, 0); } } i= 0; } return 0; } static int decode_subframe_fixed(FLACContext *s, int channel, int pred_order) { const int blocksize = s->blocksize; int32_t *decoded = s->decoded[channel]; int av_uninit(a), av_uninit(b), av_uninit(c), av_uninit(d), i; /* warm up samples */ for (i = 0; i < pred_order; i++) { decoded[i] = get_sbits_long(&s->gb, s->curr_bps); } if (decode_residuals(s, channel, pred_order) < 0) return -1; if (pred_order > 0) a = decoded[pred_order-1]; if (pred_order > 1) b = a - decoded[pred_order-2]; if (pred_order > 2) c = b - decoded[pred_order-2] + decoded[pred_order-3]; if (pred_order > 3) d = c - decoded[pred_order-2] + 2*decoded[pred_order-3] - decoded[pred_order-4]; switch (pred_order) { case 0: break; case 1: for (i = pred_order; i < blocksize; i++) decoded[i] = a += decoded[i]; break; case 2: for (i = pred_order; i < blocksize; i++) decoded[i] = a += b += decoded[i]; break; case 3: for (i = pred_order; i < blocksize; i++) decoded[i] = a += b += c += decoded[i]; break; case 4: for (i = pred_order; i < blocksize; i++) decoded[i] = a += b += c += d += decoded[i]; break; default: av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order); return -1; } return 0; } static int decode_subframe_lpc(FLACContext *s, int channel, int pred_order) { int i, j; int coeff_prec, qlevel; int coeffs[32]; int32_t *decoded = s->decoded[channel]; /* warm up samples */ for (i = 0; i < pred_order; i++) { decoded[i] = get_sbits_long(&s->gb, s->curr_bps); } coeff_prec = get_bits(&s->gb, 4) + 1; if (coeff_prec == 16) { av_log(s->avctx, AV_LOG_ERROR, "invalid coeff precision\n"); return -1; } qlevel = get_sbits(&s->gb, 5); if (qlevel < 0) { av_log(s->avctx, AV_LOG_ERROR, "qlevel %d not supported, maybe buggy stream\n", qlevel); return -1; } for (i = 0; i < pred_order; i++) { coeffs[i] = get_sbits(&s->gb, coeff_prec); } if (decode_residuals(s, channel, pred_order) < 0) return -1; if (s->bps > 16) { int64_t sum; for (i = pred_order; i < s->blocksize; i++) { sum = 0; for (j = 0; j < pred_order; j++) sum += (int64_t)coeffs[j] * decoded[i-j-1]; decoded[i] += sum >> qlevel; } } else { for (i = pred_order; i < s->blocksize-1; i += 2) { int c; int d = decoded[i-pred_order]; int s0 = 0, s1 = 0; for (j = pred_order-1; j > 0; j--) { c = coeffs[j]; s0 += c*d; d = decoded[i-j]; s1 += c*d; } c = coeffs[0]; s0 += c*d; d = decoded[i] += s0 >> qlevel; s1 += c*d; decoded[i+1] += s1 >> qlevel; } if (i < s->blocksize) { int sum = 0; for (j = 0; j < pred_order; j++) sum += coeffs[j] * decoded[i-j-1]; decoded[i] += sum >> qlevel; } } return 0; } static inline int decode_subframe(FLACContext *s, int channel) { int type, wasted = 0; int i, tmp; s->curr_bps = s->bps; if (channel == 0) { if (s->ch_mode == FLAC_CHMODE_RIGHT_SIDE) s->curr_bps++; } else { if (s->ch_mode == FLAC_CHMODE_LEFT_SIDE || s->ch_mode == FLAC_CHMODE_MID_SIDE) s->curr_bps++; } if (get_bits1(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, "invalid subframe padding\n"); return -1; } type = get_bits(&s->gb, 6); if (get_bits1(&s->gb)) { wasted = 1; while (!get_bits1(&s->gb)) wasted++; s->curr_bps -= wasted; } if (s->curr_bps > 32) { av_log_missing_feature(s->avctx, "decorrelated bit depth > 32", 0); return -1; } //FIXME use av_log2 for types if (type == 0) { tmp = get_sbits_long(&s->gb, s->curr_bps); for (i = 0; i < s->blocksize; i++) s->decoded[channel][i] = tmp; } else if (type == 1) { for (i = 0; i < s->blocksize; i++) s->decoded[channel][i] = get_sbits_long(&s->gb, s->curr_bps); } else if ((type >= 8) && (type <= 12)) { if (decode_subframe_fixed(s, channel, type & ~0x8) < 0) return -1; } else if (type >= 32) { if (decode_subframe_lpc(s, channel, (type & ~0x20)+1) < 0) return -1; } else { av_log(s->avctx, AV_LOG_ERROR, "invalid coding type\n"); return -1; } if (wasted) { int i; for (i = 0; i < s->blocksize; i++) s->decoded[channel][i] <<= wasted; } return 0; } /** * Validate and decode a frame header. * @param avctx AVCodecContext to use as av_log() context * @param gb GetBitContext from which to read frame header * @param[out] fi frame information * @return non-zero on error, 0 if ok */ static int decode_frame_header(AVCodecContext *avctx, GetBitContext *gb, FLACFrameInfo *fi) { int bs_code, sr_code, bps_code; /* frame sync code */ skip_bits(gb, 16); /* block size and sample rate codes */ bs_code = get_bits(gb, 4); sr_code = get_bits(gb, 4); /* channels and decorrelation */ fi->ch_mode = get_bits(gb, 4); if (fi->ch_mode < FLAC_MAX_CHANNELS) { fi->channels = fi->ch_mode + 1; fi->ch_mode = FLAC_CHMODE_INDEPENDENT; } else if (fi->ch_mode <= FLAC_CHMODE_MID_SIDE) { fi->channels = 2; } else { av_log(avctx, AV_LOG_ERROR, "invalid channel mode: %d\n", fi->ch_mode); return -1; } /* bits per sample */ bps_code = get_bits(gb, 3); if (bps_code == 3 || bps_code == 7) { av_log(avctx, AV_LOG_ERROR, "invalid sample size code (%d)\n", bps_code); return -1; } fi->bps = sample_size_table[bps_code]; /* reserved bit */ if (get_bits1(gb)) { av_log(avctx, AV_LOG_ERROR, "broken stream, invalid padding\n"); return -1; } /* sample or frame count */ if (get_utf8(gb) < 0) { av_log(avctx, AV_LOG_ERROR, "utf8 fscked\n"); return -1; } /* blocksize */ if (bs_code == 0) { av_log(avctx, AV_LOG_ERROR, "reserved blocksize code: 0\n"); return -1; } else if (bs_code == 6) { fi->blocksize = get_bits(gb, 8) + 1; } else if (bs_code == 7) { fi->blocksize = get_bits(gb, 16) + 1; } else { fi->blocksize = ff_flac_blocksize_table[bs_code]; } /* sample rate */ if (sr_code < 12) { fi->samplerate = ff_flac_sample_rate_table[sr_code]; } else if (sr_code == 12) { fi->samplerate = get_bits(gb, 8) * 1000; } else if (sr_code == 13) { fi->samplerate = get_bits(gb, 16); } else if (sr_code == 14) { fi->samplerate = get_bits(gb, 16) * 10; } else { av_log(avctx, AV_LOG_ERROR, "illegal sample rate code %d\n", sr_code); return -1; } /* header CRC-8 check */ skip_bits(gb, 8); if (av_crc(av_crc_get_table(AV_CRC_8_ATM), 0, gb->buffer, get_bits_count(gb)/8)) { av_log(avctx, AV_LOG_ERROR, "header crc mismatch\n"); return -1; } return 0; } static int decode_frame(FLACContext *s) { int i; GetBitContext *gb = &s->gb; FLACFrameInfo fi; if (decode_frame_header(s->avctx, gb, &fi)) { av_log(s->avctx, AV_LOG_ERROR, "invalid frame header\n"); return -1; } if (fi.channels != s->channels) { av_log(s->avctx, AV_LOG_ERROR, "switching channel layout mid-stream " "is not supported\n"); return -1; } s->ch_mode = fi.ch_mode; if (fi.bps && fi.bps != s->bps) { av_log(s->avctx, AV_LOG_ERROR, "switching bps mid-stream is not " "supported\n"); return -1; } if (s->bps > 16) { s->avctx->sample_fmt = SAMPLE_FMT_S32; s->sample_shift = 32 - s->bps; s->is32 = 1; } else { s->avctx->sample_fmt = SAMPLE_FMT_S16; s->sample_shift = 16 - s->bps; s->is32 = 0; } if (fi.blocksize > s->max_blocksize) { av_log(s->avctx, AV_LOG_ERROR, "blocksize %d > %d\n", fi.blocksize, s->max_blocksize); return -1; } s->blocksize = fi.blocksize; if (fi.samplerate == 0) { fi.samplerate = s->samplerate; } else if (fi.samplerate != s->samplerate) { av_log(s->avctx, AV_LOG_WARNING, "sample rate changed from %d to %d\n", s->samplerate, fi.samplerate); } s->samplerate = s->avctx->sample_rate = fi.samplerate; // dump_headers(s->avctx, (FLACStreaminfo *)s); /* subframes */ for (i = 0; i < s->channels; i++) { if (decode_subframe(s, i) < 0) return -1; } align_get_bits(gb); /* frame footer */ skip_bits(gb, 16); /* data crc */ return 0; } static int flac_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FLACContext *s = avctx->priv_data; int i, j = 0, input_buf_size = 0, bytes_read = 0; int16_t *samples_16 = data; int32_t *samples_32 = data; int alloc_data_size= *data_size; int output_size; *data_size=0; if (s->max_framesize == 0) { s->max_framesize= FFMAX(4, buf_size); // should hopefully be enough for the first header s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize); } if (1 && s->max_framesize) { //FIXME truncated if (s->bitstream_size < 4 || AV_RL32(s->bitstream) != MKTAG('f','L','a','C')) buf_size= FFMIN(buf_size, s->max_framesize - FFMIN(s->bitstream_size, s->max_framesize)); input_buf_size= buf_size; if (s->bitstream_size + buf_size < buf_size || s->bitstream_index + s->bitstream_size + buf_size < s->bitstream_index) return -1; if (s->allocated_bitstream_size < s->bitstream_size + buf_size) s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->bitstream_size + buf_size); if (s->bitstream_index + s->bitstream_size + buf_size > s->allocated_bitstream_size) { memmove(s->bitstream, &s->bitstream[s->bitstream_index], s->bitstream_size); s->bitstream_index=0; } memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], buf, buf_size); buf= &s->bitstream[s->bitstream_index]; buf_size += s->bitstream_size; s->bitstream_size= buf_size; if (buf_size < s->max_framesize && input_buf_size) { return input_buf_size; } } /* check that there is at least the smallest decodable amount of data. this amount corresponds to the smallest valid FLAC frame possible. FF F8 69 02 00 00 9A 00 00 34 46 */ if (buf_size < 11) goto end; /* check for inline header */ if (AV_RB32(buf) == MKBETAG('f','L','a','C')) { if (!s->got_streaminfo && parse_streaminfo(s, buf, buf_size)) { av_log(s->avctx, AV_LOG_ERROR, "invalid header\n"); return -1; } bytes_read = get_metadata_size(buf, buf_size); goto end; } /* check for frame sync code and resync stream if necessary */ if ((AV_RB16(buf) & 0xFFFE) != 0xFFF8) { const uint8_t *buf_end = buf + buf_size; av_log(s->avctx, AV_LOG_ERROR, "FRAME HEADER not here\n"); while (buf+2 < buf_end && (AV_RB16(buf) & 0xFFFE) != 0xFFF8) buf++; bytes_read = buf_size - (buf_end - buf); goto end; // we may not have enough bits left to decode a frame, so try next time } /* decode frame */ init_get_bits(&s->gb, buf, buf_size*8); if (decode_frame(s) < 0) { av_log(s->avctx, AV_LOG_ERROR, "decode_frame() failed\n"); s->bitstream_size=0; s->bitstream_index=0; return -1; } bytes_read = (get_bits_count(&s->gb)+7)/8; /* check if allocated data size is large enough for output */ output_size = s->blocksize * s->channels * (s->is32 ? 4 : 2); if (output_size > alloc_data_size) { av_log(s->avctx, AV_LOG_ERROR, "output data size is larger than " "allocated data size\n"); goto end; } *data_size = output_size; #define DECORRELATE(left, right)\ assert(s->channels == 2);\ for (i = 0; i < s->blocksize; i++) {\ int a= s->decoded[0][i];\ int b= s->decoded[1][i];\ if (s->is32) {\ *samples_32++ = (left) << s->sample_shift;\ *samples_32++ = (right) << s->sample_shift;\ } else {\ *samples_16++ = (left) << s->sample_shift;\ *samples_16++ = (right) << s->sample_shift;\ }\ }\ break; switch (s->ch_mode) { case FLAC_CHMODE_INDEPENDENT: for (j = 0; j < s->blocksize; j++) { for (i = 0; i < s->channels; i++) { if (s->is32) *samples_32++ = s->decoded[i][j] << s->sample_shift; else *samples_16++ = s->decoded[i][j] << s->sample_shift; } } break; case FLAC_CHMODE_LEFT_SIDE: DECORRELATE(a,a-b) case FLAC_CHMODE_RIGHT_SIDE: DECORRELATE(a+b,b) case FLAC_CHMODE_MID_SIDE: DECORRELATE( (a-=b>>1) + b, a) } end: if (bytes_read > buf_size) { av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size); s->bitstream_size=0; s->bitstream_index=0; return -1; } if (s->bitstream_size) { s->bitstream_index += bytes_read; s->bitstream_size -= bytes_read; return input_buf_size; } else return bytes_read; } static av_cold int flac_decode_close(AVCodecContext *avctx) { FLACContext *s = avctx->priv_data; int i; for (i = 0; i < s->channels; i++) { av_freep(&s->decoded[i]); } av_freep(&s->bitstream); return 0; } static void flac_flush(AVCodecContext *avctx) { FLACContext *s = avctx->priv_data; s->bitstream_size= s->bitstream_index= 0; } AVCodec flac_decoder = { "flac", AVMEDIA_TYPE_AUDIO, CODEC_ID_FLAC, sizeof(FLACContext), flac_decode_init, NULL, flac_decode_close, flac_decode_frame, CODEC_CAP_DELAY | CODEC_CAP_SUBFRAMES, /* FIXME: add a FLAC parser so that we will not need to use either of these capabilities */ .flush= flac_flush, .long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), };
123linslouis-android-video-cutter
jni/libavcodec/flacdec.c
C
asf20
25,292
/* * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * bitstream reader API header. */ #ifndef AVCODEC_GET_BITS_H #define AVCODEC_GET_BITS_H #include <stdint.h> #include <stdlib.h> #include <assert.h> #include "libavutil/bswap.h" #include "libavutil/common.h" #include "libavutil/intreadwrite.h" #include "libavutil/log.h" #include "mathops.h" #if defined(ALT_BITSTREAM_READER_LE) && !defined(ALT_BITSTREAM_READER) # define ALT_BITSTREAM_READER #endif #if !defined(LIBMPEG2_BITSTREAM_READER) && !defined(A32_BITSTREAM_READER) && !defined(ALT_BITSTREAM_READER) # if ARCH_ARM && !HAVE_FAST_UNALIGNED # define A32_BITSTREAM_READER # else # define ALT_BITSTREAM_READER //#define LIBMPEG2_BITSTREAM_READER //#define A32_BITSTREAM_READER # endif #endif /* bit input */ /* buffer, buffer_end and size_in_bits must be present and used by every reader */ typedef struct GetBitContext { const uint8_t *buffer, *buffer_end; #ifdef ALT_BITSTREAM_READER int index; #elif defined LIBMPEG2_BITSTREAM_READER uint8_t *buffer_ptr; uint32_t cache; int bit_count; #elif defined A32_BITSTREAM_READER uint32_t *buffer_ptr; uint32_t cache0; uint32_t cache1; int bit_count; #endif int size_in_bits; } GetBitContext; #define VLC_TYPE int16_t typedef struct VLC { int bits; VLC_TYPE (*table)[2]; ///< code, bits int table_size, table_allocated; } VLC; typedef struct RL_VLC_ELEM { int16_t level; int8_t len; uint8_t run; } RL_VLC_ELEM; /* Bitstream reader API docs: name arbitrary name which is used as prefix for the internal variables gb getbitcontext OPEN_READER(name, gb) loads gb into local variables CLOSE_READER(name, gb) stores local vars in gb UPDATE_CACHE(name, gb) refills the internal cache from the bitstream after this call at least MIN_CACHE_BITS will be available, GET_CACHE(name, gb) will output the contents of the internal cache, next bit is MSB of 32 or 64 bit (FIXME 64bit) SHOW_UBITS(name, gb, num) will return the next num bits SHOW_SBITS(name, gb, num) will return the next num bits and do sign extension SKIP_BITS(name, gb, num) will skip over the next num bits note, this is equivalent to SKIP_CACHE; SKIP_COUNTER SKIP_CACHE(name, gb, num) will remove the next num bits from the cache (note SKIP_COUNTER MUST be called before UPDATE_CACHE / CLOSE_READER) SKIP_COUNTER(name, gb, num) will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS) LAST_SKIP_CACHE(name, gb, num) will remove the next num bits from the cache if it is needed for UPDATE_CACHE otherwise it will do nothing LAST_SKIP_BITS(name, gb, num) is equivalent to LAST_SKIP_CACHE; SKIP_COUNTER for examples see get_bits, show_bits, skip_bits, get_vlc */ #ifdef ALT_BITSTREAM_READER # define MIN_CACHE_BITS 25 # define OPEN_READER(name, gb)\ unsigned int name##_index= (gb)->index;\ int name##_cache= 0;\ # define CLOSE_READER(name, gb)\ (gb)->index= name##_index;\ # ifdef ALT_BITSTREAM_READER_LE # define UPDATE_CACHE(name, gb)\ name##_cache= AV_RL32( ((const uint8_t *)(gb)->buffer)+(name##_index>>3) ) >> (name##_index&0x07);\ # define SKIP_CACHE(name, gb, num)\ name##_cache >>= (num); # else # define UPDATE_CACHE(name, gb)\ name##_cache= AV_RB32( ((const uint8_t *)(gb)->buffer)+(name##_index>>3) ) << (name##_index&0x07);\ # define SKIP_CACHE(name, gb, num)\ name##_cache <<= (num); # endif // FIXME name? # define SKIP_COUNTER(name, gb, num)\ name##_index += (num);\ # define SKIP_BITS(name, gb, num)\ {\ SKIP_CACHE(name, gb, num)\ SKIP_COUNTER(name, gb, num)\ }\ # define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num) # define LAST_SKIP_CACHE(name, gb, num) ; # ifdef ALT_BITSTREAM_READER_LE # define SHOW_UBITS(name, gb, num)\ zero_extend(name##_cache, num) # define SHOW_SBITS(name, gb, num)\ sign_extend(name##_cache, num) # else # define SHOW_UBITS(name, gb, num)\ NEG_USR32(name##_cache, num) # define SHOW_SBITS(name, gb, num)\ NEG_SSR32(name##_cache, num) # endif # define GET_CACHE(name, gb)\ ((uint32_t)name##_cache) static inline int get_bits_count(const GetBitContext *s){ return s->index; } static inline void skip_bits_long(GetBitContext *s, int n){ s->index += n; } #elif defined LIBMPEG2_BITSTREAM_READER //libmpeg2 like reader # define MIN_CACHE_BITS 17 # define OPEN_READER(name, gb)\ int name##_bit_count=(gb)->bit_count;\ int name##_cache= (gb)->cache;\ uint8_t * name##_buffer_ptr=(gb)->buffer_ptr;\ # define CLOSE_READER(name, gb)\ (gb)->bit_count= name##_bit_count;\ (gb)->cache= name##_cache;\ (gb)->buffer_ptr= name##_buffer_ptr;\ # define UPDATE_CACHE(name, gb)\ if(name##_bit_count >= 0){\ name##_cache+= AV_RB16(name##_buffer_ptr) << name##_bit_count; \ name##_buffer_ptr+=2;\ name##_bit_count-= 16;\ }\ # define SKIP_CACHE(name, gb, num)\ name##_cache <<= (num);\ # define SKIP_COUNTER(name, gb, num)\ name##_bit_count += (num);\ # define SKIP_BITS(name, gb, num)\ {\ SKIP_CACHE(name, gb, num)\ SKIP_COUNTER(name, gb, num)\ }\ # define LAST_SKIP_BITS(name, gb, num) SKIP_BITS(name, gb, num) # define LAST_SKIP_CACHE(name, gb, num) SKIP_CACHE(name, gb, num) # define SHOW_UBITS(name, gb, num)\ NEG_USR32(name##_cache, num) # define SHOW_SBITS(name, gb, num)\ NEG_SSR32(name##_cache, num) # define GET_CACHE(name, gb)\ ((uint32_t)name##_cache) static inline int get_bits_count(const GetBitContext *s){ return (s->buffer_ptr - s->buffer)*8 - 16 + s->bit_count; } static inline void skip_bits_long(GetBitContext *s, int n){ OPEN_READER(re, s) re_bit_count += n; re_buffer_ptr += 2*(re_bit_count>>4); re_bit_count &= 15; re_cache = ((re_buffer_ptr[-2]<<8) + re_buffer_ptr[-1]) << (16+re_bit_count); UPDATE_CACHE(re, s) CLOSE_READER(re, s) } #elif defined A32_BITSTREAM_READER # define MIN_CACHE_BITS 32 # define OPEN_READER(name, gb)\ int name##_bit_count=(gb)->bit_count;\ uint32_t name##_cache0= (gb)->cache0;\ uint32_t name##_cache1= (gb)->cache1;\ uint32_t * name##_buffer_ptr=(gb)->buffer_ptr;\ # define CLOSE_READER(name, gb)\ (gb)->bit_count= name##_bit_count;\ (gb)->cache0= name##_cache0;\ (gb)->cache1= name##_cache1;\ (gb)->buffer_ptr= name##_buffer_ptr;\ # define UPDATE_CACHE(name, gb)\ if(name##_bit_count > 0){\ const uint32_t next= be2me_32( *name##_buffer_ptr );\ name##_cache0 |= NEG_USR32(next,name##_bit_count);\ name##_cache1 |= next<<name##_bit_count;\ name##_buffer_ptr++;\ name##_bit_count-= 32;\ }\ #if ARCH_X86 # define SKIP_CACHE(name, gb, num)\ __asm__(\ "shldl %2, %1, %0 \n\t"\ "shll %2, %1 \n\t"\ : "+r" (name##_cache0), "+r" (name##_cache1)\ : "Ic" ((uint8_t)(num))\ ); #else # define SKIP_CACHE(name, gb, num)\ name##_cache0 <<= (num);\ name##_cache0 |= NEG_USR32(name##_cache1,num);\ name##_cache1 <<= (num); #endif # define SKIP_COUNTER(name, gb, num)\ name##_bit_count += (num);\ # define SKIP_BITS(name, gb, num)\ {\ SKIP_CACHE(name, gb, num)\ SKIP_COUNTER(name, gb, num)\ }\ # define LAST_SKIP_BITS(name, gb, num) SKIP_BITS(name, gb, num) # define LAST_SKIP_CACHE(name, gb, num) SKIP_CACHE(name, gb, num) # define SHOW_UBITS(name, gb, num)\ NEG_USR32(name##_cache0, num) # define SHOW_SBITS(name, gb, num)\ NEG_SSR32(name##_cache0, num) # define GET_CACHE(name, gb)\ (name##_cache0) static inline int get_bits_count(const GetBitContext *s){ return ((uint8_t*)s->buffer_ptr - s->buffer)*8 - 32 + s->bit_count; } static inline void skip_bits_long(GetBitContext *s, int n){ OPEN_READER(re, s) re_bit_count += n; re_buffer_ptr += re_bit_count>>5; re_bit_count &= 31; re_cache0 = be2me_32( re_buffer_ptr[-1] ) << re_bit_count; re_cache1 = 0; UPDATE_CACHE(re, s) CLOSE_READER(re, s) } #endif /** * read mpeg1 dc style vlc (sign bit + mantisse with no MSB). * if MSB not set it is negative * @param n length in bits * @author BERO */ static inline int get_xbits(GetBitContext *s, int n){ register int sign; register int32_t cache; OPEN_READER(re, s) UPDATE_CACHE(re, s) cache = GET_CACHE(re,s); sign=(~cache)>>31; LAST_SKIP_BITS(re, s, n) CLOSE_READER(re, s) return (NEG_USR32(sign ^ cache, n) ^ sign) - sign; } static inline int get_sbits(GetBitContext *s, int n){ register int tmp; OPEN_READER(re, s) UPDATE_CACHE(re, s) tmp= SHOW_SBITS(re, s, n); LAST_SKIP_BITS(re, s, n) CLOSE_READER(re, s) return tmp; } /** * reads 1-17 bits. * Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't */ static inline unsigned int get_bits(GetBitContext *s, int n){ register int tmp; OPEN_READER(re, s) UPDATE_CACHE(re, s) tmp= SHOW_UBITS(re, s, n); LAST_SKIP_BITS(re, s, n) CLOSE_READER(re, s) return tmp; } /** * shows 1-17 bits. * Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't */ static inline unsigned int show_bits(GetBitContext *s, int n){ register int tmp; OPEN_READER(re, s) UPDATE_CACHE(re, s) tmp= SHOW_UBITS(re, s, n); // CLOSE_READER(re, s) return tmp; } static inline void skip_bits(GetBitContext *s, int n){ //Note gcc seems to optimize this to s->index+=n for the ALT_READER :)) OPEN_READER(re, s) UPDATE_CACHE(re, s) LAST_SKIP_BITS(re, s, n) CLOSE_READER(re, s) } static inline unsigned int get_bits1(GetBitContext *s){ #ifdef ALT_BITSTREAM_READER unsigned int index= s->index; uint8_t result= s->buffer[ index>>3 ]; #ifdef ALT_BITSTREAM_READER_LE result>>= (index&0x07); result&= 1; #else result<<= (index&0x07); result>>= 8 - 1; #endif index++; s->index= index; return result; #else return get_bits(s, 1); #endif } static inline unsigned int show_bits1(GetBitContext *s){ return show_bits(s, 1); } static inline void skip_bits1(GetBitContext *s){ skip_bits(s, 1); } /** * reads 0-32 bits. */ static inline unsigned int get_bits_long(GetBitContext *s, int n){ if(n<=MIN_CACHE_BITS) return get_bits(s, n); else{ #ifdef ALT_BITSTREAM_READER_LE int ret= get_bits(s, 16); return ret | (get_bits(s, n-16) << 16); #else int ret= get_bits(s, 16) << (n-16); return ret | get_bits(s, n-16); #endif } } /** * reads 0-32 bits as a signed integer. */ static inline int get_sbits_long(GetBitContext *s, int n) { return sign_extend(get_bits_long(s, n), n); } /** * shows 0-32 bits. */ static inline unsigned int show_bits_long(GetBitContext *s, int n){ if(n<=MIN_CACHE_BITS) return show_bits(s, n); else{ GetBitContext gb= *s; return get_bits_long(&gb, n); } } static inline int check_marker(GetBitContext *s, const char *msg) { int bit= get_bits1(s); if(!bit) av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg); return bit; } /** * init GetBitContext. * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger then the actual read bits * because some optimized bitstream readers read 32 or 64 bit at once and could read over the end * @param bit_size the size of the buffer in bits * * While GetBitContext stores the buffer size, for performance reasons you are * responsible for checking for the buffer end yourself (take advantage of the padding)! */ static inline void init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size) { int buffer_size= (bit_size+7)>>3; if(buffer_size < 0 || bit_size < 0) { buffer_size = bit_size = 0; buffer = NULL; } s->buffer= buffer; s->size_in_bits= bit_size; s->buffer_end= buffer + buffer_size; #ifdef ALT_BITSTREAM_READER s->index=0; #elif defined LIBMPEG2_BITSTREAM_READER s->buffer_ptr = (uint8_t*)((intptr_t)buffer&(~1)); s->bit_count = 16 + 8*((intptr_t)buffer&1); skip_bits_long(s, 0); #elif defined A32_BITSTREAM_READER s->buffer_ptr = (uint32_t*)((intptr_t)buffer&(~3)); s->bit_count = 32 + 8*((intptr_t)buffer&3); skip_bits_long(s, 0); #endif } static inline void align_get_bits(GetBitContext *s) { int n= (-get_bits_count(s)) & 7; if(n) skip_bits(s, n); } #define init_vlc(vlc, nb_bits, nb_codes,\ bits, bits_wrap, bits_size,\ codes, codes_wrap, codes_size,\ flags)\ init_vlc_sparse(vlc, nb_bits, nb_codes,\ bits, bits_wrap, bits_size,\ codes, codes_wrap, codes_size,\ NULL, 0, 0, flags) int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags); #define INIT_VLC_LE 2 #define INIT_VLC_USE_NEW_STATIC 4 void free_vlc(VLC *vlc); #define INIT_VLC_STATIC(vlc, bits, a,b,c,d,e,f,g, static_size)\ {\ static VLC_TYPE table[static_size][2];\ (vlc)->table= table;\ (vlc)->table_allocated= static_size;\ init_vlc(vlc, bits, a,b,c,d,e,f,g, INIT_VLC_USE_NEW_STATIC);\ } /** * * If the vlc code is invalid and max_depth=1, then no bits will be removed. * If the vlc code is invalid and max_depth>1, then the number of bits removed * is undefined. */ #define GET_VLC(code, name, gb, table, bits, max_depth)\ {\ int n, nb_bits;\ unsigned int index;\ \ index= SHOW_UBITS(name, gb, bits);\ code = table[index][0];\ n = table[index][1];\ \ if(max_depth > 1 && n < 0){\ LAST_SKIP_BITS(name, gb, bits)\ UPDATE_CACHE(name, gb)\ \ nb_bits = -n;\ \ index= SHOW_UBITS(name, gb, nb_bits) + code;\ code = table[index][0];\ n = table[index][1];\ if(max_depth > 2 && n < 0){\ LAST_SKIP_BITS(name, gb, nb_bits)\ UPDATE_CACHE(name, gb)\ \ nb_bits = -n;\ \ index= SHOW_UBITS(name, gb, nb_bits) + code;\ code = table[index][0];\ n = table[index][1];\ }\ }\ SKIP_BITS(name, gb, n)\ } #define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)\ {\ int n, nb_bits;\ unsigned int index;\ \ index= SHOW_UBITS(name, gb, bits);\ level = table[index].level;\ n = table[index].len;\ \ if(max_depth > 1 && n < 0){\ SKIP_BITS(name, gb, bits)\ if(need_update){\ UPDATE_CACHE(name, gb)\ }\ \ nb_bits = -n;\ \ index= SHOW_UBITS(name, gb, nb_bits) + level;\ level = table[index].level;\ n = table[index].len;\ }\ run= table[index].run;\ SKIP_BITS(name, gb, n)\ } /** * parses a vlc code, faster then get_vlc() * @param bits is the number of bits which will be read at once, must be * identical to nb_bits in init_vlc() * @param max_depth is the number of times bits bits must be read to completely * read the longest vlc code * = (max_vlc_length + bits - 1) / bits */ static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2], int bits, int max_depth) { int code; OPEN_READER(re, s) UPDATE_CACHE(re, s) GET_VLC(code, re, s, table, bits, max_depth) CLOSE_READER(re, s) return code; } //#define TRACE #ifdef TRACE static inline void print_bin(int bits, int n){ int i; for(i=n-1; i>=0; i--){ av_log(NULL, AV_LOG_DEBUG, "%d", (bits>>i)&1); } for(i=n; i<24; i++) av_log(NULL, AV_LOG_DEBUG, " "); } static inline int get_bits_trace(GetBitContext *s, int n, char *file, const char *func, int line){ int r= get_bits(s, n); print_bin(r, n); av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d bit @%5d in %s %s:%d\n", r, n, r, get_bits_count(s)-n, file, func, line); return r; } static inline int get_vlc_trace(GetBitContext *s, VLC_TYPE (*table)[2], int bits, int max_depth, char *file, const char *func, int line){ int show= show_bits(s, 24); int pos= get_bits_count(s); int r= get_vlc2(s, table, bits, max_depth); int len= get_bits_count(s) - pos; int bits2= show>>(24-len); print_bin(bits2, len); av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d vlc @%5d in %s %s:%d\n", bits2, len, r, pos, file, func, line); return r; } static inline int get_xbits_trace(GetBitContext *s, int n, char *file, const char *func, int line){ int show= show_bits(s, n); int r= get_xbits(s, n); print_bin(show, n); av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d xbt @%5d in %s %s:%d\n", show, n, r, get_bits_count(s)-n, file, func, line); return r; } #define get_bits(s, n) get_bits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_bits1(s) get_bits_trace(s, 1, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_xbits(s, n) get_xbits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_vlc(s, vlc) get_vlc_trace(s, (vlc)->table, (vlc)->bits, 3, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_vlc2(s, tab, bits, max) get_vlc_trace(s, tab, bits, max, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define tprintf(p, ...) av_log(p, AV_LOG_DEBUG, __VA_ARGS__) #else //TRACE #define tprintf(p, ...) {} #endif static inline int decode012(GetBitContext *gb){ int n; n = get_bits1(gb); if (n == 0) return 0; else return get_bits1(gb) + 1; } static inline int decode210(GetBitContext *gb){ if (get_bits1(gb)) return 0; else return 2 - get_bits1(gb); } static inline int get_bits_left(GetBitContext *gb) { return gb->size_in_bits - get_bits_count(gb); } #endif /* AVCODEC_GET_BITS_H */
123linslouis-android-video-cutter
jni/libavcodec/get_bits.h
C
asf20
19,176
/* * rectangle filling function * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * useful rectangle filling function * @author Michael Niedermayer <michaelni@gmx.at> */ #ifndef AVCODEC_RECTANGLE_H #define AVCODEC_RECTANGLE_H #include <assert.h> #include "config.h" #include "libavutil/common.h" #include "dsputil.h" /** * fill a rectangle. * @param h height of the rectangle, should be a constant * @param w width of the rectangle, should be a constant * @param size the size of val (1, 2 or 4), should be a constant */ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ uint8_t *p= (uint8_t*)vp; assert(size==1 || size==2 || size==4); assert(w<=4); w *= size; stride *= size; assert((((long)vp)&(FFMIN(w, STRIDE_ALIGN)-1)) == 0); assert((stride&(w-1))==0); if(w==2){ const uint16_t v= size==4 ? val : val*0x0101; *(uint16_t*)(p + 0*stride)= v; if(h==1) return; *(uint16_t*)(p + 1*stride)= v; if(h==2) return; *(uint16_t*)(p + 2*stride)= v; *(uint16_t*)(p + 3*stride)= v; }else if(w==4){ const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; *(uint32_t*)(p + 0*stride)= v; if(h==1) return; *(uint32_t*)(p + 1*stride)= v; if(h==2) return; *(uint32_t*)(p + 2*stride)= v; *(uint32_t*)(p + 3*stride)= v; }else if(w==8){ //gcc can't optimize 64bit math on x86_32 #if HAVE_FAST_64BIT const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; *(uint64_t*)(p + 0*stride)= v; if(h==1) return; *(uint64_t*)(p + 1*stride)= v; if(h==2) return; *(uint64_t*)(p + 2*stride)= v; *(uint64_t*)(p + 3*stride)= v; }else if(w==16){ const uint64_t v= val*0x0100000001ULL; *(uint64_t*)(p + 0+0*stride)= v; *(uint64_t*)(p + 8+0*stride)= v; *(uint64_t*)(p + 0+1*stride)= v; *(uint64_t*)(p + 8+1*stride)= v; if(h==2) return; *(uint64_t*)(p + 0+2*stride)= v; *(uint64_t*)(p + 8+2*stride)= v; *(uint64_t*)(p + 0+3*stride)= v; *(uint64_t*)(p + 8+3*stride)= v; #else const uint32_t v= size==2 ? val*0x00010001 : val; *(uint32_t*)(p + 0+0*stride)= v; *(uint32_t*)(p + 4+0*stride)= v; if(h==1) return; *(uint32_t*)(p + 0+1*stride)= v; *(uint32_t*)(p + 4+1*stride)= v; if(h==2) return; *(uint32_t*)(p + 0+2*stride)= v; *(uint32_t*)(p + 4+2*stride)= v; *(uint32_t*)(p + 0+3*stride)= v; *(uint32_t*)(p + 4+3*stride)= v; }else if(w==16){ *(uint32_t*)(p + 0+0*stride)= val; *(uint32_t*)(p + 4+0*stride)= val; *(uint32_t*)(p + 8+0*stride)= val; *(uint32_t*)(p +12+0*stride)= val; *(uint32_t*)(p + 0+1*stride)= val; *(uint32_t*)(p + 4+1*stride)= val; *(uint32_t*)(p + 8+1*stride)= val; *(uint32_t*)(p +12+1*stride)= val; if(h==2) return; *(uint32_t*)(p + 0+2*stride)= val; *(uint32_t*)(p + 4+2*stride)= val; *(uint32_t*)(p + 8+2*stride)= val; *(uint32_t*)(p +12+2*stride)= val; *(uint32_t*)(p + 0+3*stride)= val; *(uint32_t*)(p + 4+3*stride)= val; *(uint32_t*)(p + 8+3*stride)= val; *(uint32_t*)(p +12+3*stride)= val; #endif }else assert(0); assert(h==4); } #endif /* AVCODEC_RECTANGLE_H */
123linslouis-android-video-cutter
jni/libavcodec/rectangle.h
C
asf20
4,298
/* * Copyright (C) 2003-2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * On2 VP3 Video Decoder * * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx) * For more information about the VP3 coding process, visit: * http://wiki.multimedia.cx/index.php?title=On2_VP3 * * Theora decoder by Alex Beregszaszi */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" #include "vp3data.h" #include "xiph.h" #define FRAGMENT_PIXELS 8 static av_cold int vp3_decode_end(AVCodecContext *avctx); //FIXME split things out into their own arrays typedef struct Vp3Fragment { int16_t dc; uint8_t coding_method; uint8_t qpi; } Vp3Fragment; #define SB_NOT_CODED 0 #define SB_PARTIALLY_CODED 1 #define SB_FULLY_CODED 2 // This is the maximum length of a single long bit run that can be encoded // for superblock coding or block qps. Theora special-cases this to read a // bit instead of flipping the current bit to allow for runs longer than 4129. #define MAXIMUM_LONG_BIT_RUN 4129 #define MODE_INTER_NO_MV 0 #define MODE_INTRA 1 #define MODE_INTER_PLUS_MV 2 #define MODE_INTER_LAST_MV 3 #define MODE_INTER_PRIOR_LAST 4 #define MODE_USING_GOLDEN 5 #define MODE_GOLDEN_MV 6 #define MODE_INTER_FOURMV 7 #define CODING_MODE_COUNT 8 /* special internal mode */ #define MODE_COPY 8 /* There are 6 preset schemes, plus a free-form scheme */ static const int ModeAlphabet[6][CODING_MODE_COUNT] = { /* scheme 1: Last motion vector dominates */ { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, MODE_INTRA, MODE_USING_GOLDEN, MODE_GOLDEN_MV, MODE_INTER_FOURMV }, /* scheme 2 */ { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, MODE_INTER_PLUS_MV, MODE_INTRA, MODE_USING_GOLDEN, MODE_GOLDEN_MV, MODE_INTER_FOURMV }, /* scheme 3 */ { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, MODE_INTRA, MODE_USING_GOLDEN, MODE_GOLDEN_MV, MODE_INTER_FOURMV }, /* scheme 4 */ { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST, MODE_INTRA, MODE_USING_GOLDEN, MODE_GOLDEN_MV, MODE_INTER_FOURMV }, /* scheme 5: No motion vector dominates */ { MODE_INTER_NO_MV, MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, MODE_INTRA, MODE_USING_GOLDEN, MODE_GOLDEN_MV, MODE_INTER_FOURMV }, /* scheme 6 */ { MODE_INTER_NO_MV, MODE_USING_GOLDEN, MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, MODE_INTRA, MODE_GOLDEN_MV, MODE_INTER_FOURMV }, }; static const uint8_t hilbert_offset[16][2] = { {0,0}, {1,0}, {1,1}, {0,1}, {0,2}, {0,3}, {1,3}, {1,2}, {2,2}, {2,3}, {3,3}, {3,2}, {3,1}, {2,1}, {2,0}, {3,0} }; #define MIN_DEQUANT_VAL 2 typedef struct Vp3DecodeContext { AVCodecContext *avctx; int theora, theora_tables; int version; int width, height; int chroma_x_shift, chroma_y_shift; AVFrame golden_frame; AVFrame last_frame; AVFrame current_frame; int keyframe; DSPContext dsp; int flipped_image; int last_slice_end; int qps[3]; int nqps; int last_qps[3]; int superblock_count; int y_superblock_width; int y_superblock_height; int y_superblock_count; int c_superblock_width; int c_superblock_height; int c_superblock_count; int u_superblock_start; int v_superblock_start; unsigned char *superblock_coding; int macroblock_count; int macroblock_width; int macroblock_height; int fragment_count; int fragment_width[2]; int fragment_height[2]; Vp3Fragment *all_fragments; int fragment_start[3]; int data_offset[3]; int8_t (*motion_val[2])[2]; ScanTable scantable; /* tables */ uint16_t coded_dc_scale_factor[64]; uint32_t coded_ac_scale_factor[64]; uint8_t base_matrix[384][64]; uint8_t qr_count[2][3]; uint8_t qr_size [2][3][64]; uint16_t qr_base[2][3][64]; /** * This is a list of all tokens in bitstream order. Reordering takes place * by pulling from each level during IDCT. As a consequence, IDCT must be * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32 * otherwise. The 32 different tokens with up to 12 bits of extradata are * collapsed into 3 types, packed as follows: * (from the low to high bits) * * 2 bits: type (0,1,2) * 0: EOB run, 14 bits for run length (12 needed) * 1: zero run, 7 bits for run length * 7 bits for the next coefficient (3 needed) * 2: coefficient, 14 bits (11 needed) * * Coefficients are signed, so are packed in the highest bits for automatic * sign extension. */ int16_t *dct_tokens[3][64]; int16_t *dct_tokens_base; #define TOKEN_EOB(eob_run) ((eob_run) << 2) #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1) #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2) /** * number of blocks that contain DCT coefficients at the given level or higher */ int num_coded_frags[3][64]; int total_num_coded_frags; /* this is a list of indexes into the all_fragments array indicating * which of the fragments are coded */ int *coded_fragment_list[3]; VLC dc_vlc[16]; VLC ac_vlc_1[16]; VLC ac_vlc_2[16]; VLC ac_vlc_3[16]; VLC ac_vlc_4[16]; VLC superblock_run_length_vlc; VLC fragment_run_length_vlc; VLC mode_code_vlc; VLC motion_vector_vlc; /* these arrays need to be on 16-byte boundaries since SSE2 operations * index into them */ DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; //<qmat[qpi][is_inter][plane] /* This table contains superblock_count * 16 entries. Each set of 16 * numbers corresponds to the fragment indexes 0..15 of the superblock. * An entry will be -1 to indicate that no entry corresponds to that * index. */ int *superblock_fragments; /* This is an array that indicates how a particular macroblock * is coded. */ unsigned char *macroblock_coding; uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16 /* Huffman decode */ int hti; unsigned int hbits; int entries; int huff_code_size; uint32_t huffman_table[80][32][2]; uint8_t filter_limit_values[64]; DECLARE_ALIGNED(8, int, bounding_values_array)[256+2]; } Vp3DecodeContext; /************************************************************************ * VP3 specific functions ************************************************************************/ /* * This function sets up all of the various blocks mappings: * superblocks <-> fragments, macroblocks <-> fragments, * superblocks <-> macroblocks * * Returns 0 is successful; returns 1 if *anything* went wrong. */ static int init_block_mapping(Vp3DecodeContext *s) { int sb_x, sb_y, plane; int x, y, i, j = 0; for (plane = 0; plane < 3; plane++) { int sb_width = plane ? s->c_superblock_width : s->y_superblock_width; int sb_height = plane ? s->c_superblock_height : s->y_superblock_height; int frag_width = s->fragment_width[!!plane]; int frag_height = s->fragment_height[!!plane]; for (sb_y = 0; sb_y < sb_height; sb_y++) for (sb_x = 0; sb_x < sb_width; sb_x++) for (i = 0; i < 16; i++) { x = 4*sb_x + hilbert_offset[i][0]; y = 4*sb_y + hilbert_offset[i][1]; if (x < frag_width && y < frag_height) s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x; else s->superblock_fragments[j++] = -1; } } return 0; /* successful path out */ } /* * This function sets up the dequantization tables used for a particular * frame. */ static void init_dequantizer(Vp3DecodeContext *s, int qpi) { int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]]; int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]]; int i, plane, inter, qri, bmi, bmj, qistart; for(inter=0; inter<2; inter++){ for(plane=0; plane<3; plane++){ int sum=0; for(qri=0; qri<s->qr_count[inter][plane]; qri++){ sum+= s->qr_size[inter][plane][qri]; if(s->qps[qpi] <= sum) break; } qistart= sum - s->qr_size[inter][plane][qri]; bmi= s->qr_base[inter][plane][qri ]; bmj= s->qr_base[inter][plane][qri+1]; for(i=0; i<64; i++){ int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i] - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i] + s->qr_size[inter][plane][qri]) / (2*s->qr_size[inter][plane][qri]); int qmin= 8<<(inter + !i); int qscale= i ? ac_scale_factor : dc_scale_factor; s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096); } // all DC coefficients use the same quant so as not to interfere with DC prediction s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; } } memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune } /* * This function initializes the loop filter boundary limits if the frame's * quality index is different from the previous frame's. * * The filter_limit_values may not be larger than 127. */ static void init_loop_filter(Vp3DecodeContext *s) { int *bounding_values= s->bounding_values_array+127; int filter_limit; int x; int value; filter_limit = s->filter_limit_values[s->qps[0]]; /* set up the bounding values */ memset(s->bounding_values_array, 0, 256 * sizeof(int)); for (x = 0; x < filter_limit; x++) { bounding_values[-x] = -x; bounding_values[x] = x; } for (x = value = filter_limit; x < 128 && value; x++, value--) { bounding_values[ x] = value; bounding_values[-x] = -value; } if (value) bounding_values[128] = value; bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202; } /* * This function unpacks all of the superblock/macroblock/fragment coding * information from the bitstream. */ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) { int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; int bit = 0; int current_superblock = 0; int current_run = 0; int num_partial_superblocks = 0; int i, j; int current_fragment; int plane; if (s->keyframe) { memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); } else { /* unpack the list of partially-coded superblocks */ bit = get_bits1(gb); while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (current_run == 34) current_run += get_bits(gb, 12); if (current_superblock + current_run > s->superblock_count) { av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n"); return -1; } memset(s->superblock_coding + current_superblock, bit, current_run); current_superblock += current_run; if (bit) num_partial_superblocks += current_run; if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) bit = get_bits1(gb); else bit ^= 1; } /* unpack the list of fully coded superblocks if any of the blocks were * not marked as partially coded in the previous step */ if (num_partial_superblocks < s->superblock_count) { int superblocks_decoded = 0; current_superblock = 0; bit = get_bits1(gb); while (superblocks_decoded < s->superblock_count - num_partial_superblocks && get_bits_left(gb) > 0) { current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (current_run == 34) current_run += get_bits(gb, 12); for (j = 0; j < current_run; current_superblock++) { if (current_superblock >= s->superblock_count) { av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n"); return -1; } /* skip any superblocks already marked as partially coded */ if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { s->superblock_coding[current_superblock] = 2*bit; j++; } } superblocks_decoded += current_run; if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) bit = get_bits1(gb); else bit ^= 1; } } /* if there were partial blocks, initialize bitstream for * unpacking fragment codings */ if (num_partial_superblocks) { current_run = 0; bit = get_bits1(gb); /* toggle the bit because as soon as the first run length is * fetched the bit will be toggled again */ bit ^= 1; } } /* figure out which fragments are coded; iterate through each * superblock (all planes) */ s->total_num_coded_frags = 0; memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); for (plane = 0; plane < 3; plane++) { int sb_start = superblock_starts[plane]; int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); int num_coded_frags = 0; for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { /* iterate through all 16 fragments in a superblock */ for (j = 0; j < 16; j++) { /* if the fragment is in bounds, check its coding status */ current_fragment = s->superblock_fragments[i * 16 + j]; if (current_fragment != -1) { int coded = s->superblock_coding[i]; if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { /* fragment may or may not be coded; this is the case * that cares about the fragment coding runs */ if (current_run-- == 0) { bit ^= 1; current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2); } coded = bit; } if (coded) { /* default mode; actual mode will be decoded in * the next phase */ s->all_fragments[current_fragment].coding_method = MODE_INTER_NO_MV; s->coded_fragment_list[plane][num_coded_frags++] = current_fragment; } else { /* not coded; copy this fragment from the prior frame */ s->all_fragments[current_fragment].coding_method = MODE_COPY; } } } } s->total_num_coded_frags += num_coded_frags; for (i = 0; i < 64; i++) s->num_coded_frags[plane][i] = num_coded_frags; if (plane < 2) s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags; } return 0; } /* * This function unpacks all the coding mode data for individual macroblocks * from the bitstream. */ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) { int i, j, k, sb_x, sb_y; int scheme; int current_macroblock; int current_fragment; int coding_mode; int custom_mode_alphabet[CODING_MODE_COUNT]; const int *alphabet; Vp3Fragment *frag; if (s->keyframe) { for (i = 0; i < s->fragment_count; i++) s->all_fragments[i].coding_method = MODE_INTRA; } else { /* fetch the mode coding scheme for this frame */ scheme = get_bits(gb, 3); /* is it a custom coding scheme? */ if (scheme == 0) { for (i = 0; i < 8; i++) custom_mode_alphabet[i] = MODE_INTER_NO_MV; for (i = 0; i < 8; i++) custom_mode_alphabet[get_bits(gb, 3)] = i; alphabet = custom_mode_alphabet; } else alphabet = ModeAlphabet[scheme-1]; /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { if (get_bits_left(gb) <= 0) return -1; for (j = 0; j < 4; j++) { int mb_x = 2*sb_x + (j>>1); int mb_y = 2*sb_y + (((j>>1)+j)&1); current_macroblock = mb_y * s->macroblock_width + mb_x; if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height) continue; #define BLOCK_X (2*mb_x + (k&1)) #define BLOCK_Y (2*mb_y + (k>>1)) /* coding modes are only stored if the macroblock has at least one * luma block coded, otherwise it must be INTER_NO_MV */ for (k = 0; k < 4; k++) { current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) break; } if (k == 4) { s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; continue; } /* mode 7 means get 3 bits for each coding mode */ if (scheme == 7) coding_mode = get_bits(gb, 3); else coding_mode = alphabet [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; s->macroblock_coding[current_macroblock] = coding_mode; for (k = 0; k < 4; k++) { frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (frag->coding_method != MODE_COPY) frag->coding_method = coding_mode; } #define SET_CHROMA_MODES \ if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ frag[s->fragment_start[1]].coding_method = coding_mode;\ if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ frag[s->fragment_start[2]].coding_method = coding_mode; if (s->chroma_y_shift) { frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x; SET_CHROMA_MODES } else if (s->chroma_x_shift) { frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x; for (k = 0; k < 2; k++) { SET_CHROMA_MODES frag += s->fragment_width[1]; } } else { for (k = 0; k < 4; k++) { frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X; SET_CHROMA_MODES } } } } } } return 0; } /* * This function unpacks all the motion vectors for the individual * macroblocks from the bitstream. */ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) { int j, k, sb_x, sb_y; int coding_mode; int motion_x[4]; int motion_y[4]; int last_motion_x = 0; int last_motion_y = 0; int prior_last_motion_x = 0; int prior_last_motion_y = 0; int current_macroblock; int current_fragment; int frag; if (s->keyframe) return 0; /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ coding_mode = get_bits1(gb); /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { if (get_bits_left(gb) <= 0) return -1; for (j = 0; j < 4; j++) { int mb_x = 2*sb_x + (j>>1); int mb_y = 2*sb_y + (((j>>1)+j)&1); current_macroblock = mb_y * s->macroblock_width + mb_x; if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height || (s->macroblock_coding[current_macroblock] == MODE_COPY)) continue; switch (s->macroblock_coding[current_macroblock]) { case MODE_INTER_PLUS_MV: case MODE_GOLDEN_MV: /* all 6 fragments use the same motion vector */ if (coding_mode == 0) { motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; } else { motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; } /* vector maintenance, only on MODE_INTER_PLUS_MV */ if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) { prior_last_motion_x = last_motion_x; prior_last_motion_y = last_motion_y; last_motion_x = motion_x[0]; last_motion_y = motion_y[0]; } break; case MODE_INTER_FOURMV: /* vector maintenance */ prior_last_motion_x = last_motion_x; prior_last_motion_y = last_motion_y; /* fetch 4 vectors from the bitstream, one for each * Y fragment, then average for the C fragment vectors */ for (k = 0; k < 4; k++) { current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) { if (coding_mode == 0) { motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; } else { motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)]; motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)]; } last_motion_x = motion_x[k]; last_motion_y = motion_y[k]; } else { motion_x[k] = 0; motion_y[k] = 0; } } break; case MODE_INTER_LAST_MV: /* all 6 fragments use the last motion vector */ motion_x[0] = last_motion_x; motion_y[0] = last_motion_y; /* no vector maintenance (last vector remains the * last vector) */ break; case MODE_INTER_PRIOR_LAST: /* all 6 fragments use the motion vector prior to the * last motion vector */ motion_x[0] = prior_last_motion_x; motion_y[0] = prior_last_motion_y; /* vector maintenance */ prior_last_motion_x = last_motion_x; prior_last_motion_y = last_motion_y; last_motion_x = motion_x[0]; last_motion_y = motion_y[0]; break; default: /* covers intra, inter without MV, golden without MV */ motion_x[0] = 0; motion_y[0] = 0; /* no vector maintenance */ break; } /* assign the motion vectors to the correct fragments */ for (k = 0; k < 4; k++) { current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { s->motion_val[0][current_fragment][0] = motion_x[k]; s->motion_val[0][current_fragment][1] = motion_y[k]; } else { s->motion_val[0][current_fragment][0] = motion_x[0]; s->motion_val[0][current_fragment][1] = motion_y[0]; } } if (s->chroma_y_shift) { if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2); motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2); } motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1); frag = mb_y*s->fragment_width[1] + mb_x; s->motion_val[1][frag][0] = motion_x[0]; s->motion_val[1][frag][1] = motion_y[0]; } else if (s->chroma_x_shift) { if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); } else { motion_x[1] = motion_x[0]; motion_y[1] = motion_y[0]; } motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1); frag = 2*mb_y*s->fragment_width[1] + mb_x; for (k = 0; k < 2; k++) { s->motion_val[1][frag][0] = motion_x[k]; s->motion_val[1][frag][1] = motion_y[k]; frag += s->fragment_width[1]; } } else { for (k = 0; k < 4; k++) { frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X; if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { s->motion_val[1][frag][0] = motion_x[k]; s->motion_val[1][frag][1] = motion_y[k]; } else { s->motion_val[1][frag][0] = motion_x[0]; s->motion_val[1][frag][1] = motion_y[0]; } } } } } } return 0; } static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) { int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi; int num_blocks = s->total_num_coded_frags; for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) { i = blocks_decoded = num_blocks_at_qpi = 0; bit = get_bits1(gb); do { run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (run_length == 34) run_length += get_bits(gb, 12); blocks_decoded += run_length; if (!bit) num_blocks_at_qpi += run_length; for (j = 0; j < run_length; i++) { if (i >= s->total_num_coded_frags) return -1; if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) { s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit; j++; } } if (run_length == MAXIMUM_LONG_BIT_RUN) bit = get_bits1(gb); else bit ^= 1; } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); num_blocks -= num_blocks_at_qpi; } return 0; } /* * This function is called by unpack_dct_coeffs() to extract the VLCs from * the bitstream. The VLCs encode tokens which are used to unpack DCT * data. This function unpacks all the VLCs for either the Y plane or both * C planes, and is called for DC coefficients or different AC coefficient * levels (since different coefficient types require different VLC tables. * * This function returns a residual eob run. E.g, if a particular token gave * instructions to EOB the next 5 fragments and there were only 2 fragments * left in the current fragment range, 3 would be returned so that it could * be passed into the next call to this same function. */ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run) { int i, j = 0; int token; int zero_run = 0; DCTELEM coeff = 0; int bits_to_get; int blocks_ended; int coeff_i = 0; int num_coeffs = s->num_coded_frags[plane][coeff_index]; int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; /* local references to structure members to avoid repeated deferences */ int *coded_fragment_list = s->coded_fragment_list[plane]; Vp3Fragment *all_fragments = s->all_fragments; VLC_TYPE (*vlc_table)[2] = table->table; if (num_coeffs < 0) av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index); if (eob_run > num_coeffs) { coeff_i = blocks_ended = num_coeffs; eob_run -= num_coeffs; } else { coeff_i = blocks_ended = eob_run; eob_run = 0; } // insert fake EOB token to cover the split between planes or zzi if (blocks_ended) dct_tokens[j++] = blocks_ended << 2; while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { /* decode a VLC into a token */ token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ if (token <= 6) { eob_run = eob_run_base[token]; if (eob_run_get_bits[token]) eob_run += get_bits(gb, eob_run_get_bits[token]); // record only the number of blocks ended in this plane, // any spill will be recorded in the next plane. if (eob_run > num_coeffs - coeff_i) { dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); blocks_ended += num_coeffs - coeff_i; eob_run -= num_coeffs - coeff_i; coeff_i = num_coeffs; } else { dct_tokens[j++] = TOKEN_EOB(eob_run); blocks_ended += eob_run; coeff_i += eob_run; eob_run = 0; } } else { bits_to_get = coeff_get_bits[token]; if (bits_to_get) bits_to_get = get_bits(gb, bits_to_get); coeff = coeff_tables[token][bits_to_get]; zero_run = zero_run_base[token]; if (zero_run_get_bits[token]) zero_run += get_bits(gb, zero_run_get_bits[token]); if (zero_run) { dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); } else { // Save DC into the fragment structure. DC prediction is // done in raster order, so the actual DC can't be in with // other tokens. We still need the token in dct_tokens[] // however, or else the structure collapses on itself. if (!coeff_index) all_fragments[coded_fragment_list[coeff_i]].dc = coeff; dct_tokens[j++] = TOKEN_COEFF(coeff); } if (coeff_index + zero_run > 64) { av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with" " %d coeffs left\n", zero_run, 64-coeff_index); zero_run = 64 - coeff_index; } // zero runs code multiple coefficients, // so don't try to decode coeffs for those higher levels for (i = coeff_index+1; i <= coeff_index+zero_run; i++) s->num_coded_frags[plane][i]--; coeff_i++; } } if (blocks_ended > s->num_coded_frags[plane][coeff_index]) av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n"); // decrement the number of blocks that have higher coeffecients for each // EOB run at this level if (blocks_ended) for (i = coeff_index+1; i < 64; i++) s->num_coded_frags[plane][i] -= blocks_ended; // setup the next buffer if (plane < 2) s->dct_tokens[plane+1][coeff_index] = dct_tokens + j; else if (coeff_index < 63) s->dct_tokens[0][coeff_index+1] = dct_tokens + j; return eob_run; } static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height); /* * This function unpacks all of the DCT coefficient data from the * bitstream. */ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) { int i; int dc_y_table; int dc_c_table; int ac_y_table; int ac_c_table; int residual_eob_run = 0; VLC *y_tables[64]; VLC *c_tables[64]; s->dct_tokens[0][0] = s->dct_tokens_base; /* fetch the DC table indexes */ dc_y_table = get_bits(gb, 4); dc_c_table = get_bits(gb, 4); /* unpack the Y plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, 0, residual_eob_run); /* reverse prediction of the Y-plane DC coefficients */ reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); /* unpack the C plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 1, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 2, residual_eob_run); /* reverse prediction of the C-plane DC coefficients */ if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { reverse_dc_prediction(s, s->fragment_start[1], s->fragment_width[1], s->fragment_height[1]); reverse_dc_prediction(s, s->fragment_start[2], s->fragment_width[1], s->fragment_height[1]); } /* fetch the AC table indexes */ ac_y_table = get_bits(gb, 4); ac_c_table = get_bits(gb, 4); /* build tables of AC VLC tables */ for (i = 1; i <= 5; i++) { y_tables[i] = &s->ac_vlc_1[ac_y_table]; c_tables[i] = &s->ac_vlc_1[ac_c_table]; } for (i = 6; i <= 14; i++) { y_tables[i] = &s->ac_vlc_2[ac_y_table]; c_tables[i] = &s->ac_vlc_2[ac_c_table]; } for (i = 15; i <= 27; i++) { y_tables[i] = &s->ac_vlc_3[ac_y_table]; c_tables[i] = &s->ac_vlc_3[ac_c_table]; } for (i = 28; i <= 63; i++) { y_tables[i] = &s->ac_vlc_4[ac_y_table]; c_tables[i] = &s->ac_vlc_4[ac_c_table]; } /* decode all AC coefficents */ for (i = 1; i <= 63; i++) { residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, 0, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 1, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 2, residual_eob_run); } return 0; } /* * This function reverses the DC prediction for each coded fragment in * the frame. Much of this function is adapted directly from the original * VP3 source code. */ #define COMPATIBLE_FRAME(x) \ (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) #define DC_COEFF(u) s->all_fragments[u].dc static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height) { #define PUL 8 #define PU 4 #define PUR 2 #define PL 1 int x, y; int i = first_fragment; int predicted_dc; /* DC values for the left, up-left, up, and up-right fragments */ int vl, vul, vu, vur; /* indexes for the left, up-left, up, and up-right fragments */ int l, ul, u, ur; /* * The 6 fields mean: * 0: up-left multiplier * 1: up multiplier * 2: up-right multiplier * 3: left multiplier */ static const int predictor_transform[16][4] = { { 0, 0, 0, 0}, { 0, 0, 0,128}, // PL { 0, 0,128, 0}, // PUR { 0, 0, 53, 75}, // PUR|PL { 0,128, 0, 0}, // PU { 0, 64, 0, 64}, // PU|PL { 0,128, 0, 0}, // PU|PUR { 0, 0, 53, 75}, // PU|PUR|PL {128, 0, 0, 0}, // PUL { 0, 0, 0,128}, // PUL|PL { 64, 0, 64, 0}, // PUL|PUR { 0, 0, 53, 75}, // PUL|PUR|PL { 0,128, 0, 0}, // PUL|PU {-104,116, 0,116}, // PUL|PU|PL { 24, 80, 24, 0}, // PUL|PU|PUR {-104,116, 0,116} // PUL|PU|PUR|PL }; /* This table shows which types of blocks can use other blocks for * prediction. For example, INTRA is the only mode in this table to * have a frame number of 0. That means INTRA blocks can only predict * from other INTRA blocks. There are 2 golden frame coding types; * blocks encoding in these modes can only predict from other blocks * that were encoded with these 1 of these 2 modes. */ static const unsigned char compatible_frame[9] = { 1, /* MODE_INTER_NO_MV */ 0, /* MODE_INTRA */ 1, /* MODE_INTER_PLUS_MV */ 1, /* MODE_INTER_LAST_MV */ 1, /* MODE_INTER_PRIOR_MV */ 2, /* MODE_USING_GOLDEN */ 2, /* MODE_GOLDEN_MV */ 1, /* MODE_INTER_FOUR_MV */ 3 /* MODE_COPY */ }; int current_frame_type; /* there is a last DC predictor for each of the 3 frame types */ short last_dc[3]; int transform = 0; vul = vu = vur = vl = 0; last_dc[0] = last_dc[1] = last_dc[2] = 0; /* for each fragment row... */ for (y = 0; y < fragment_height; y++) { /* for each fragment in a row... */ for (x = 0; x < fragment_width; x++, i++) { /* reverse prediction if this block was coded */ if (s->all_fragments[i].coding_method != MODE_COPY) { current_frame_type = compatible_frame[s->all_fragments[i].coding_method]; transform= 0; if(x){ l= i-1; vl = DC_COEFF(l); if(COMPATIBLE_FRAME(l)) transform |= PL; } if(y){ u= i-fragment_width; vu = DC_COEFF(u); if(COMPATIBLE_FRAME(u)) transform |= PU; if(x){ ul= i-fragment_width-1; vul = DC_COEFF(ul); if(COMPATIBLE_FRAME(ul)) transform |= PUL; } if(x + 1 < fragment_width){ ur= i-fragment_width+1; vur = DC_COEFF(ur); if(COMPATIBLE_FRAME(ur)) transform |= PUR; } } if (transform == 0) { /* if there were no fragments to predict from, use last * DC saved */ predicted_dc = last_dc[current_frame_type]; } else { /* apply the appropriate predictor transform */ predicted_dc = (predictor_transform[transform][0] * vul) + (predictor_transform[transform][1] * vu) + (predictor_transform[transform][2] * vur) + (predictor_transform[transform][3] * vl); predicted_dc /= 128; /* check for outranging on the [ul u l] and * [ul u ur l] predictors */ if ((transform == 15) || (transform == 13)) { if (FFABS(predicted_dc - vu) > 128) predicted_dc = vu; else if (FFABS(predicted_dc - vl) > 128) predicted_dc = vl; else if (FFABS(predicted_dc - vul) > 128) predicted_dc = vul; } } /* at long last, apply the predictor */ DC_COEFF(i) += predicted_dc; /* save the DC */ last_dc[current_frame_type] = DC_COEFF(i); } } } } static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend) { int x, y; int *bounding_values= s->bounding_values_array+127; int width = s->fragment_width[!!plane]; int height = s->fragment_height[!!plane]; int fragment = s->fragment_start [plane] + ystart * width; int stride = s->current_frame.linesize[plane]; uint8_t *plane_data = s->current_frame.data [plane]; if (!s->flipped_image) stride = -stride; plane_data += s->data_offset[plane] + 8*ystart*stride; for (y = ystart; y < yend; y++) { for (x = 0; x < width; x++) { /* This code basically just deblocks on the edges of coded blocks. * However, it has to be much more complicated because of the * braindamaged deblock ordering used in VP3/Theora. Order matters * because some pixels get filtered twice. */ if( s->all_fragments[fragment].coding_method != MODE_COPY ) { /* do not perform left edge filter for left columns frags */ if (x > 0) { s->dsp.vp3_h_loop_filter( plane_data + 8*x, stride, bounding_values); } /* do not perform top edge filter for top row fragments */ if (y > 0) { s->dsp.vp3_v_loop_filter( plane_data + 8*x, stride, bounding_values); } /* do not perform right edge filter for right column * fragments or if right fragment neighbor is also coded * in this frame (it will be filtered in next iteration) */ if ((x < width - 1) && (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) { s->dsp.vp3_h_loop_filter( plane_data + 8*x + 8, stride, bounding_values); } /* do not perform bottom edge filter for bottom row * fragments or if bottom fragment neighbor is also coded * in this frame (it will be filtered in the next row) */ if ((y < height - 1) && (s->all_fragments[fragment + width].coding_method == MODE_COPY)) { s->dsp.vp3_v_loop_filter( plane_data + 8*x + 8*stride, stride, bounding_values); } } fragment++; } plane_data += 8*stride; } } /** * Pulls DCT tokens from the 64 levels to decode and dequant the coefficients * for the next block in coding order */ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, DCTELEM block[64]) { int16_t *dequantizer = s->qmat[frag->qpi][inter][plane]; uint8_t *perm = s->scantable.permutated; int i = 0; do { int token = *s->dct_tokens[plane][i]; switch (token & 3) { case 0: // EOB if (--token < 4) // 0-3 are token types, so the EOB run must now be 0 s->dct_tokens[plane][i]++; else *s->dct_tokens[plane][i] = token & ~3; goto end; case 1: // zero run s->dct_tokens[plane][i]++; i += (token >> 2) & 0x7f; block[perm[i]] = (token >> 9) * dequantizer[perm[i]]; i++; break; case 2: // coeff block[perm[i]] = (token >> 2) * dequantizer[perm[i]]; s->dct_tokens[plane][i++]++; break; default: // shouldn't happen return i; } } while (i < 64); end: // the actual DC+prediction is in the fragment structure block[0] = frag->dc * s->qmat[0][inter][plane][0]; return i; } /** * called when all pixels up to row y are complete */ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) { int h, cy; int offset[4]; if(s->avctx->draw_horiz_band==NULL) return; h= y - s->last_slice_end; y -= h; if (!s->flipped_image) { if (y == 0) h -= s->height - s->avctx->height; // account for non-mod16 y = s->height - y - h; } cy = y >> 1; offset[0] = s->current_frame.linesize[0]*y; offset[1] = s->current_frame.linesize[1]*cy; offset[2] = s->current_frame.linesize[2]*cy; offset[3] = 0; emms_c(); s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); s->last_slice_end= y + h; } /* * Perform the final rendering for a particular slice of data. * The slice number ranges from 0..(c_superblock_height - 1). */ static void render_slice(Vp3DecodeContext *s, int slice) { int x, y, i, j; LOCAL_ALIGNED_16(DCTELEM, block, [64]); int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; int motion_halfpel_index; uint8_t *motion_source; int plane, first_pixel; if (slice >= s->c_superblock_height) return; for (plane = 0; plane < 3; plane++) { uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane]; uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane]; uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane]; int stride = s->current_frame.linesize[plane]; int plane_width = s->width >> (plane && s->chroma_x_shift); int plane_height = s->height >> (plane && s->chroma_y_shift); int8_t (*motion_val)[2] = s->motion_val[!!plane]; int sb_x, sb_y = slice << (!plane && s->chroma_y_shift); int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); int slice_width = plane ? s->c_superblock_width : s->y_superblock_width; int fragment_width = s->fragment_width[!!plane]; int fragment_height = s->fragment_height[!!plane]; int fragment_start = s->fragment_start[plane]; if (!s->flipped_image) stride = -stride; if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY)) continue; if(FFABS(stride) > 2048) return; //various tables are fixed size /* for each superblock row in the slice (both of them)... */ for (; sb_y < slice_height; sb_y++) { /* for each superblock in a row... */ for (sb_x = 0; sb_x < slice_width; sb_x++) { /* for each block in a superblock... */ for (j = 0; j < 16; j++) { x = 4*sb_x + hilbert_offset[j][0]; y = 4*sb_y + hilbert_offset[j][1]; i = fragment_start + y*fragment_width + x; // bounds check if (x >= fragment_width || y >= fragment_height) continue; first_pixel = 8*y*stride + 8*x; /* transform if this block was coded */ if (s->all_fragments[i].coding_method != MODE_COPY) { if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) motion_source= golden_plane; else motion_source= last_plane; motion_source += first_pixel; motion_halfpel_index = 0; /* sort out the motion vector if this fragment is coded * using a motion vector method */ if ((s->all_fragments[i].coding_method > MODE_INTRA) && (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { int src_x, src_y; motion_x = motion_val[y*fragment_width + x][0]; motion_y = motion_val[y*fragment_width + x][1]; src_x= (motion_x>>1) + 8*x; src_y= (motion_y>>1) + 8*y; motion_halfpel_index = motion_x & 0x01; motion_source += (motion_x >> 1); motion_halfpel_index |= (motion_y & 0x01) << 1; motion_source += ((motion_y >> 1) * stride); if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ uint8_t *temp= s->edge_emu_buffer; if(stride<0) temp -= 9*stride; else temp += 9*stride; ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); motion_source= temp; } } /* first, take care of copying a block from either the * previous or the golden frame */ if (s->all_fragments[i].coding_method != MODE_INTRA) { /* Note, it is possible to implement all MC cases with put_no_rnd_pixels_l2 which would look more like the VP3 source but this would be slower as put_no_rnd_pixels_tab is better optimzed */ if(motion_halfpel_index != 3){ s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index]( output_plane + first_pixel, motion_source, stride, 8); }else{ int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 s->dsp.put_no_rnd_pixels_l2[1]( output_plane + first_pixel, motion_source - d, motion_source + stride + 1 + d, stride, 8); } } s->dsp.clear_block(block); /* invert DCT and place (or add) in final output */ if (s->all_fragments[i].coding_method == MODE_INTRA) { vp3_dequant(s, s->all_fragments + i, plane, 0, block); if(s->avctx->idct_algo!=FF_IDCT_VP3) block[0] += 128<<3; s->dsp.idct_put( output_plane + first_pixel, stride, block); } else { if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) { s->dsp.idct_add( output_plane + first_pixel, stride, block); } else { s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block); } } } else { /* copy directly from the previous frame */ s->dsp.put_pixels_tab[1][0]( output_plane + first_pixel, last_plane + first_pixel, stride, 8); } } } // Filter up to the last row in the superblock row apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); } } /* this looks like a good place for slice dispatch... */ /* algorithm: * if (slice == s->macroblock_height - 1) * dispatch (both last slice & 2nd-to-last slice); * else if (slice > 0) * dispatch (slice - 1); */ vp3_draw_horiz_band(s, FFMIN(64*slice + 64-16, s->height-16)); } /* * This is the ffmpeg/libavcodec API init function. */ static av_cold int vp3_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; int i, inter, plane; int c_width; int c_height; int y_fragment_count, c_fragment_count; if (avctx->codec_tag == MKTAG('V','P','3','0')) s->version = 0; else s->version = 1; s->avctx = avctx; s->width = FFALIGN(avctx->width, 16); s->height = FFALIGN(avctx->height, 16); if (avctx->pix_fmt == PIX_FMT_NONE) avctx->pix_fmt = PIX_FMT_YUV420P; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_VP3; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); /* initialize to an impossible value which will force a recalculation * in the first frame decode */ for (i = 0; i < 3; i++) s->qps[i] = -1; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); s->y_superblock_width = (s->width + 31) / 32; s->y_superblock_height = (s->height + 31) / 32; s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; /* work out the dimensions for the C planes */ c_width = s->width >> s->chroma_x_shift; c_height = s->height >> s->chroma_y_shift; s->c_superblock_width = (c_width + 31) / 32; s->c_superblock_height = (c_height + 31) / 32; s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2); s->u_superblock_start = s->y_superblock_count; s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; s->superblock_coding = av_malloc(s->superblock_count); s->macroblock_width = (s->width + 15) / 16; s->macroblock_height = (s->height + 15) / 16; s->macroblock_count = s->macroblock_width * s->macroblock_height; s->fragment_width[0] = s->width / FRAGMENT_PIXELS; s->fragment_height[0] = s->height / FRAGMENT_PIXELS; s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; /* fragment count covers all 8x8 blocks for all 3 planes */ y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; s->fragment_count = y_fragment_count + 2*c_fragment_count; s->fragment_start[1] = y_fragment_count; s->fragment_start[2] = y_fragment_count + c_fragment_count; s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0])); s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1])); if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base || !s->coded_fragment_list[0] || !s->motion_val[0] || !s->motion_val[1]) { vp3_decode_end(avctx); return -1; } if (!s->theora_tables) { for (i = 0; i < 64; i++) { s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; s->base_matrix[0][i] = vp31_intra_y_dequant[i]; s->base_matrix[1][i] = vp31_intra_c_dequant[i]; s->base_matrix[2][i] = vp31_inter_dequant[i]; s->filter_limit_values[i] = vp31_filter_limit_values[i]; } for(inter=0; inter<2; inter++){ for(plane=0; plane<3; plane++){ s->qr_count[inter][plane]= 1; s->qr_size [inter][plane][0]= 63; s->qr_base [inter][plane][0]= s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter; } } /* init VLC tables */ for (i = 0; i < 16; i++) { /* DC histograms */ init_vlc(&s->dc_vlc[i], 11, 32, &dc_bias[i][0][1], 4, 2, &dc_bias[i][0][0], 4, 2, 0); /* group 1 AC histograms */ init_vlc(&s->ac_vlc_1[i], 11, 32, &ac_bias_0[i][0][1], 4, 2, &ac_bias_0[i][0][0], 4, 2, 0); /* group 2 AC histograms */ init_vlc(&s->ac_vlc_2[i], 11, 32, &ac_bias_1[i][0][1], 4, 2, &ac_bias_1[i][0][0], 4, 2, 0); /* group 3 AC histograms */ init_vlc(&s->ac_vlc_3[i], 11, 32, &ac_bias_2[i][0][1], 4, 2, &ac_bias_2[i][0][0], 4, 2, 0); /* group 4 AC histograms */ init_vlc(&s->ac_vlc_4[i], 11, 32, &ac_bias_3[i][0][1], 4, 2, &ac_bias_3[i][0][0], 4, 2, 0); } } else { for (i = 0; i < 16; i++) { /* DC histograms */ if (init_vlc(&s->dc_vlc[i], 11, 32, &s->huffman_table[i][0][1], 8, 4, &s->huffman_table[i][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 1 AC histograms */ if (init_vlc(&s->ac_vlc_1[i], 11, 32, &s->huffman_table[i+16][0][1], 8, 4, &s->huffman_table[i+16][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 2 AC histograms */ if (init_vlc(&s->ac_vlc_2[i], 11, 32, &s->huffman_table[i+16*2][0][1], 8, 4, &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 3 AC histograms */ if (init_vlc(&s->ac_vlc_3[i], 11, 32, &s->huffman_table[i+16*3][0][1], 8, 4, &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0) goto vlc_fail; /* group 4 AC histograms */ if (init_vlc(&s->ac_vlc_4[i], 11, 32, &s->huffman_table[i+16*4][0][1], 8, 4, &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0) goto vlc_fail; } } init_vlc(&s->superblock_run_length_vlc, 6, 34, &superblock_run_length_vlc_table[0][1], 4, 2, &superblock_run_length_vlc_table[0][0], 4, 2, 0); init_vlc(&s->fragment_run_length_vlc, 5, 30, &fragment_run_length_vlc_table[0][1], 4, 2, &fragment_run_length_vlc_table[0][0], 4, 2, 0); init_vlc(&s->mode_code_vlc, 3, 8, &mode_code_vlc_table[0][1], 2, 1, &mode_code_vlc_table[0][0], 2, 1, 0); init_vlc(&s->motion_vector_vlc, 6, 63, &motion_vector_vlc_table[0][1], 2, 1, &motion_vector_vlc_table[0][0], 2, 1, 0); /* work out the block mapping tables */ s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); s->macroblock_coding = av_malloc(s->macroblock_count + 1); if (!s->superblock_fragments || !s->macroblock_coding) { vp3_decode_end(avctx); return -1; } init_block_mapping(s); for (i = 0; i < 3; i++) { s->current_frame.data[i] = NULL; s->last_frame.data[i] = NULL; s->golden_frame.data[i] = NULL; } return 0; vlc_fail: av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n"); return -1; } /* * This is the ffmpeg/libavcodec API frame decode function. */ static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; static int counter = 0; int i; init_get_bits(&gb, buf, buf_size * 8); if (s->theora && get_bits1(&gb)) { av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n"); return -1; } s->keyframe = !get_bits1(&gb); if (!s->theora) skip_bits(&gb, 1); for (i = 0; i < 3; i++) s->last_qps[i] = s->qps[i]; s->nqps=0; do{ s->qps[s->nqps++]= get_bits(&gb, 6); } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); for (i = s->nqps; i < 3; i++) s->qps[i] = -1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", s->keyframe?"key":"", counter, s->qps[0]); counter++; if (s->qps[0] != s->last_qps[0]) init_loop_filter(s); for (i = 0; i < s->nqps; i++) // reinit all dequantizers if the first one changed, because // the DC of the first quantizer must be used for all matrices if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) init_dequantizer(s, i); if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) return buf_size; s->current_frame.reference = 3; s->current_frame.pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE; if (avctx->get_buffer(avctx, &s->current_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); goto error; } if (s->keyframe) { if (!s->theora) { skip_bits(&gb, 4); /* width code */ skip_bits(&gb, 4); /* height code */ if (s->version) { s->version = get_bits(&gb, 5); if (counter == 1) av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); } } if (s->version || s->theora) { if (get_bits1(&gb)) av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); skip_bits(&gb, 2); /* reserved? */ } } else { if (!s->golden_frame.data[0]) { av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n"); s->golden_frame.reference = 3; s->golden_frame.pict_type = FF_I_TYPE; if (avctx->get_buffer(avctx, &s->golden_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); goto error; } s->last_frame = s->golden_frame; s->last_frame.type = FF_BUFFER_TYPE_COPY; } } s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame s->current_frame.qstride= 0; memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); if (unpack_superblocks(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); goto error; } if (unpack_modes(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); goto error; } if (unpack_vectors(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); goto error; } if (unpack_block_qpis(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); goto error; } if (unpack_dct_coeffs(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); goto error; } for (i = 0; i < 3; i++) { int height = s->height >> (i && s->chroma_y_shift); if (s->flipped_image) s->data_offset[i] = 0; else s->data_offset[i] = (height-1) * s->current_frame.linesize[i]; } s->last_slice_end = 0; for (i = 0; i < s->c_superblock_height; i++) render_slice(s, i); // filter the last row for (i = 0; i < 3; i++) { int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1; apply_loop_filter(s, i, row, row+1); } vp3_draw_horiz_band(s, s->height); *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; /* release the last frame, if it is allocated and if it is not the * golden frame */ if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) avctx->release_buffer(avctx, &s->last_frame); /* shuffle frames (last = current) */ s->last_frame= s->current_frame; if (s->keyframe) { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); s->golden_frame = s->current_frame; s->last_frame.type = FF_BUFFER_TYPE_COPY; } s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ return buf_size; error: if (s->current_frame.data[0]) avctx->release_buffer(avctx, &s->current_frame); return -1; } /* * This is the ffmpeg/libavcodec API module cleanup function. */ static av_cold int vp3_decode_end(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; int i; av_free(s->superblock_coding); av_free(s->all_fragments); av_free(s->coded_fragment_list[0]); av_free(s->dct_tokens_base); av_free(s->superblock_fragments); av_free(s->macroblock_coding); av_free(s->motion_val[0]); av_free(s->motion_val[1]); for (i = 0; i < 16; i++) { free_vlc(&s->dc_vlc[i]); free_vlc(&s->ac_vlc_1[i]); free_vlc(&s->ac_vlc_2[i]); free_vlc(&s->ac_vlc_3[i]); free_vlc(&s->ac_vlc_4[i]); } free_vlc(&s->superblock_run_length_vlc); free_vlc(&s->fragment_run_length_vlc); free_vlc(&s->mode_code_vlc); free_vlc(&s->motion_vector_vlc); /* release all frames */ if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) avctx->release_buffer(avctx, &s->last_frame); /* no need to release the current_frame since it will always be pointing * to the same frame as either the golden or last frame */ return 0; } static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; if (get_bits1(gb)) { int token; if (s->entries >= 32) { /* overflow */ av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); return -1; } token = get_bits(gb, 5); //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size); s->huffman_table[s->hti][token][0] = s->hbits; s->huffman_table[s->hti][token][1] = s->huff_code_size; s->entries++; } else { if (s->huff_code_size >= 32) {/* overflow */ av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); return -1; } s->huff_code_size++; s->hbits <<= 1; if (read_huffman_tree(avctx, gb)) return -1; s->hbits |= 1; if (read_huffman_tree(avctx, gb)) return -1; s->hbits >>= 1; s->huff_code_size--; } return 0; } #if CONFIG_THEORA_DECODER static const enum PixelFormat theora_pix_fmts[4] = { PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P }; static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; int visible_width, visible_height, colorspace; int offset_x = 0, offset_y = 0; AVRational fps; s->theora = get_bits_long(gb, 24); av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */ /* but previous versions have the image flipped relative to vp3 */ if (s->theora < 0x030200) { s->flipped_image = 1; av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n"); } visible_width = s->width = get_bits(gb, 16) << 4; visible_height = s->height = get_bits(gb, 16) << 4; if(avcodec_check_dimensions(avctx, s->width, s->height)){ av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height); s->width= s->height= 0; return -1; } if (s->theora >= 0x030200) { visible_width = get_bits_long(gb, 24); visible_height = get_bits_long(gb, 24); offset_x = get_bits(gb, 8); /* offset x */ offset_y = get_bits(gb, 8); /* offset y, from bottom */ } fps.num = get_bits_long(gb, 32); fps.den = get_bits_long(gb, 32); if (fps.num && fps.den) { av_reduce(&avctx->time_base.num, &avctx->time_base.den, fps.den, fps.num, 1<<30); } avctx->sample_aspect_ratio.num = get_bits_long(gb, 24); avctx->sample_aspect_ratio.den = get_bits_long(gb, 24); if (s->theora < 0x030200) skip_bits(gb, 5); /* keyframe frequency force */ colorspace = get_bits(gb, 8); skip_bits(gb, 24); /* bitrate */ skip_bits(gb, 6); /* quality hint */ if (s->theora >= 0x030200) { skip_bits(gb, 5); /* keyframe frequency force */ avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)]; skip_bits(gb, 3); /* reserved */ } // align_get_bits(gb); if ( visible_width <= s->width && visible_width > s->width-16 && visible_height <= s->height && visible_height > s->height-16 && !offset_x && (offset_y == s->height - visible_height)) avcodec_set_dimensions(avctx, visible_width, visible_height); else avcodec_set_dimensions(avctx, s->width, s->height); if (colorspace == 1) { avctx->color_primaries = AVCOL_PRI_BT470M; } else if (colorspace == 2) { avctx->color_primaries = AVCOL_PRI_BT470BG; } if (colorspace == 1 || colorspace == 2) { avctx->colorspace = AVCOL_SPC_BT470BG; avctx->color_trc = AVCOL_TRC_BT709; } return 0; } static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) { Vp3DecodeContext *s = avctx->priv_data; int i, n, matrices, inter, plane; if (s->theora >= 0x030200) { n = get_bits(gb, 3); /* loop filter limit values table */ for (i = 0; i < 64; i++) { s->filter_limit_values[i] = get_bits(gb, n); if (s->filter_limit_values[i] > 127) { av_log(avctx, AV_LOG_ERROR, "filter limit value too large (%i > 127), clamping\n", s->filter_limit_values[i]); s->filter_limit_values[i] = 127; } } } if (s->theora >= 0x030200) n = get_bits(gb, 4) + 1; else n = 16; /* quality threshold table */ for (i = 0; i < 64; i++) s->coded_ac_scale_factor[i] = get_bits(gb, n); if (s->theora >= 0x030200) n = get_bits(gb, 4) + 1; else n = 16; /* dc scale factor table */ for (i = 0; i < 64; i++) s->coded_dc_scale_factor[i] = get_bits(gb, n); if (s->theora >= 0x030200) matrices = get_bits(gb, 9) + 1; else matrices = 3; if(matrices > 384){ av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n"); return -1; } for(n=0; n<matrices; n++){ for (i = 0; i < 64; i++) s->base_matrix[n][i]= get_bits(gb, 8); } for (inter = 0; inter <= 1; inter++) { for (plane = 0; plane <= 2; plane++) { int newqr= 1; if (inter || plane > 0) newqr = get_bits1(gb); if (!newqr) { int qtj, plj; if(inter && get_bits1(gb)){ qtj = 0; plj = plane; }else{ qtj= (3*inter + plane - 1) / 3; plj= (plane + 2) % 3; } s->qr_count[inter][plane]= s->qr_count[qtj][plj]; memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0])); memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0])); } else { int qri= 0; int qi = 0; for(;;){ i= get_bits(gb, av_log2(matrices-1)+1); if(i>= matrices){ av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n"); return -1; } s->qr_base[inter][plane][qri]= i; if(qi >= 63) break; i = get_bits(gb, av_log2(63-qi)+1) + 1; s->qr_size[inter][plane][qri++]= i; qi += i; } if (qi > 63) { av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi); return -1; } s->qr_count[inter][plane]= qri; } } } /* Huffman tables */ for (s->hti = 0; s->hti < 80; s->hti++) { s->entries = 0; s->huff_code_size = 1; if (!get_bits1(gb)) { s->hbits = 0; if(read_huffman_tree(avctx, gb)) return -1; s->hbits = 1; if(read_huffman_tree(avctx, gb)) return -1; } } s->theora_tables = 1; return 0; } static av_cold int theora_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; int ptype; uint8_t *header_start[3]; int header_len[3]; int i; s->theora = 1; if (!avctx->extradata_size) { av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n"); return -1; } if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size, 42, header_start, header_len) < 0) { av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); return -1; } for(i=0;i<3;i++) { init_get_bits(&gb, header_start[i], header_len[i] * 8); ptype = get_bits(&gb, 8); if (!(ptype & 0x80)) { av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n"); // return -1; } // FIXME: Check for this as well. skip_bits_long(&gb, 6*8); /* "theora" */ switch(ptype) { case 0x80: theora_decode_header(avctx, &gb); break; case 0x81: // FIXME: is this needed? it breaks sometimes // theora_decode_comments(avctx, gb); break; case 0x82: if (theora_decode_tables(avctx, &gb)) return -1; break; default: av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); break; } if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb)) av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype); if (s->theora < 0x030200) break; } return vp3_decode_init(avctx); } AVCodec theora_decoder = { "theora", AVMEDIA_TYPE_VIDEO, CODEC_ID_THEORA, sizeof(Vp3DecodeContext), theora_decode_init, NULL, vp3_decode_end, vp3_decode_frame, CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, NULL, .long_name = NULL_IF_CONFIG_SMALL("Theora"), }; #endif AVCodec vp3_decoder = { "vp3", AVMEDIA_TYPE_VIDEO, CODEC_ID_VP3, sizeof(Vp3DecodeContext), vp3_decode_init, NULL, vp3_decode_end, vp3_decode_frame, CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, NULL, .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), };
123linslouis-android-video-cutter
jni/libavcodec/vp3.c
C
asf20
78,843
/* * Colorspace conversion defines * Copyright (c) 2001, 2002, 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Various defines for YUV<->RGB conversion */ #ifndef AVCODEC_COLORSPACE_H #define AVCODEC_COLORSPACE_H #define SCALEBITS 10 #define ONE_HALF (1 << (SCALEBITS - 1)) #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5)) #define YUV_TO_RGB1_CCIR(cb1, cr1)\ {\ cb = (cb1) - 128;\ cr = (cr1) - 128;\ r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\ g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \ ONE_HALF;\ b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\ } #define YUV_TO_RGB2_CCIR(r, g, b, y1)\ {\ y = ((y1) - 16) * FIX(255.0/219.0);\ r = cm[(y + r_add) >> SCALEBITS];\ g = cm[(y + g_add) >> SCALEBITS];\ b = cm[(y + b_add) >> SCALEBITS];\ } #define YUV_TO_RGB1(cb1, cr1)\ {\ cb = (cb1) - 128;\ cr = (cr1) - 128;\ r_add = FIX(1.40200) * cr + ONE_HALF;\ g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\ b_add = FIX(1.77200) * cb + ONE_HALF;\ } #define YUV_TO_RGB2(r, g, b, y1)\ {\ y = (y1) << SCALEBITS;\ r = cm[(y + r_add) >> SCALEBITS];\ g = cm[(y + g_add) >> SCALEBITS];\ b = cm[(y + b_add) >> SCALEBITS];\ } #define Y_CCIR_TO_JPEG(y)\ cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS] #define Y_JPEG_TO_CCIR(y)\ (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS) #define C_CCIR_TO_JPEG(y)\ cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS] /* NOTE: the clamp is really necessary! */ static inline int C_JPEG_TO_CCIR(int y) { y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS); if (y < 16) y = 16; return y; } #define RGB_TO_Y(r, g, b) \ ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \ FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS) #define RGB_TO_U(r1, g1, b1, shift)\ (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \ FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128) #define RGB_TO_V(r1, g1, b1, shift)\ (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \ FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128) #define RGB_TO_Y_CCIR(r, g, b) \ ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \ FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS) #define RGB_TO_U_CCIR(r1, g1, b1, shift)\ (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \ FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128) #define RGB_TO_V_CCIR(r1, g1, b1, shift)\ (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \ FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128) #endif /* AVCODEC_COLORSPACE_H */
123linslouis-android-video-cutter
jni/libavcodec/colorspace.h
C
asf20
3,674
/* * Copyright (c) 2004 Roman Shaposhnik * * Many thanks to Steven M. Schultz for providing clever ideas and * to Michael Niedermayer <michaelni@gmx.at> for writing initial * implementation. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <pthread.h> #include "avcodec.h" typedef int (action_func)(AVCodecContext *c, void *arg); typedef int (action_func2)(AVCodecContext *c, void *arg, int jobnr, int threadnr); typedef struct ThreadContext { pthread_t *workers; action_func *func; action_func2 *func2; void *args; int *rets; int rets_count; int job_count; int job_size; pthread_cond_t last_job_cond; pthread_cond_t current_job_cond; pthread_mutex_t current_job_lock; int current_job; int done; } ThreadContext; static void* attribute_align_arg worker(void *v) { AVCodecContext *avctx = v; ThreadContext *c = avctx->thread_opaque; int our_job = c->job_count; int thread_count = avctx->thread_count; int self_id; pthread_mutex_lock(&c->current_job_lock); self_id = c->current_job++; for (;;){ while (our_job >= c->job_count) { if (c->current_job == thread_count + c->job_count) pthread_cond_signal(&c->last_job_cond); pthread_cond_wait(&c->current_job_cond, &c->current_job_lock); our_job = self_id; if (c->done) { pthread_mutex_unlock(&c->current_job_lock); return NULL; } } pthread_mutex_unlock(&c->current_job_lock); c->rets[our_job%c->rets_count] = c->func ? c->func(avctx, (char*)c->args + our_job*c->job_size): c->func2(avctx, c->args, our_job, self_id); pthread_mutex_lock(&c->current_job_lock); our_job = c->current_job++; } } static av_always_inline void avcodec_thread_park_workers(ThreadContext *c, int thread_count) { pthread_cond_wait(&c->last_job_cond, &c->current_job_lock); pthread_mutex_unlock(&c->current_job_lock); } void avcodec_thread_free(AVCodecContext *avctx) { ThreadContext *c = avctx->thread_opaque; int i; pthread_mutex_lock(&c->current_job_lock); c->done = 1; pthread_cond_broadcast(&c->current_job_cond); pthread_mutex_unlock(&c->current_job_lock); for (i=0; i<avctx->thread_count; i++) pthread_join(c->workers[i], NULL); pthread_mutex_destroy(&c->current_job_lock); pthread_cond_destroy(&c->current_job_cond); pthread_cond_destroy(&c->last_job_cond); av_free(c->workers); av_freep(&avctx->thread_opaque); } static int avcodec_thread_execute(AVCodecContext *avctx, action_func* func, void *arg, int *ret, int job_count, int job_size) { ThreadContext *c= avctx->thread_opaque; int dummy_ret; if (job_count <= 0) return 0; pthread_mutex_lock(&c->current_job_lock); c->current_job = avctx->thread_count; c->job_count = job_count; c->job_size = job_size; c->args = arg; c->func = func; if (ret) { c->rets = ret; c->rets_count = job_count; } else { c->rets = &dummy_ret; c->rets_count = 1; } pthread_cond_broadcast(&c->current_job_cond); avcodec_thread_park_workers(c, avctx->thread_count); return 0; } static int avcodec_thread_execute2(AVCodecContext *avctx, action_func2* func2, void *arg, int *ret, int job_count) { ThreadContext *c= avctx->thread_opaque; c->func2 = func2; return avcodec_thread_execute(avctx, NULL, arg, ret, job_count, 0); } int avcodec_thread_init(AVCodecContext *avctx, int thread_count) { int i; ThreadContext *c; avctx->thread_count = thread_count; if (thread_count <= 1) return 0; c = av_mallocz(sizeof(ThreadContext)); if (!c) return -1; c->workers = av_mallocz(sizeof(pthread_t)*thread_count); if (!c->workers) { av_free(c); return -1; } avctx->thread_opaque = c; c->current_job = 0; c->job_count = 0; c->job_size = 0; c->done = 0; pthread_cond_init(&c->current_job_cond, NULL); pthread_cond_init(&c->last_job_cond, NULL); pthread_mutex_init(&c->current_job_lock, NULL); pthread_mutex_lock(&c->current_job_lock); for (i=0; i<thread_count; i++) { if(pthread_create(&c->workers[i], NULL, worker, avctx)) { avctx->thread_count = i; pthread_mutex_unlock(&c->current_job_lock); avcodec_thread_free(avctx); return -1; } } avcodec_thread_park_workers(c, thread_count); avctx->execute = avcodec_thread_execute; avctx->execute2 = avcodec_thread_execute2; return 0; }
123linslouis-android-video-cutter
jni/libavcodec/pthread.c
C
asf20
5,431
/* * Generate a header file for hardcoded AAC cube-root table * * Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #include "cbrt_tablegen.h" #include "tableprint.h" int main(void) { cbrt_tableinit(); write_fileheader(); printf("static const uint32_t cbrt_tab[1<<13] = {\n"); write_uint32_array(cbrt_tab, 1 << 13); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/cbrt_tablegen.c
C
asf20
1,207
/* * imx dump header bitstream filter * Copyright (c) 2007 Baptiste Coudurier * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * imx dump header bitstream filter * modifies bitstream to fit in mov and be decoded by final cut pro decoder */ #include "avcodec.h" #include "bytestream.h" static int imx_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { /* MXF essence element key */ static const uint8_t imx_header[16] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x05,0x01,0x01,0x00 }; uint8_t *poutbufp; if (avctx->codec_id != CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, "imx bitstream filter only applies to mpeg2video codec\n"); return 0; } *poutbuf = av_malloc(buf_size + 20 + FF_INPUT_BUFFER_PADDING_SIZE); poutbufp = *poutbuf; bytestream_put_buffer(&poutbufp, imx_header, 16); bytestream_put_byte(&poutbufp, 0x83); /* KLV BER long form */ bytestream_put_be24(&poutbufp, buf_size); bytestream_put_buffer(&poutbufp, buf, buf_size); *poutbuf_size = poutbufp - *poutbuf; return 1; } AVBitStreamFilter imx_dump_header_bsf = { "imxdump", 0, imx_dump_header, };
123linslouis-android-video-cutter
jni/libavcodec/imx_dump_header_bsf.c
C
asf20
2,081
/* * Misc image conversion routines * most functionality is exported to the public API, see avcodec.h * * Copyright (c) 2008 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_IMGCONVERT_H #define AVCODEC_IMGCONVERT_H #include <stdint.h> #include "avcodec.h" int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width); int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt, int height); int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane); int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt); #endif /* AVCODEC_IMGCONVERT_H */
123linslouis-android-video-cutter
jni/libavcodec/imgconvert.h
C
asf20
1,365
/* * CCITT Fax Group 3 and 4 decompression * Copyright (c) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * CCITT Fax Group 3 and 4 decompression * @file * @author Konstantin Shishkov */ #include "avcodec.h" #include "get_bits.h" #include "put_bits.h" #include "faxcompr.h" #define CCITT_SYMS 104 static const uint16_t ccitt_syms[CCITT_SYMS] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920, 1984, 2048, 2112, 2176, 2240, 2304, 2368, 2432, 2496, 2560 }; static const uint8_t ccitt_codes_bits[2][CCITT_SYMS] = { { 0x35, 0x07, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, 0x13, 0x14, 0x07, 0x08, 0x08, 0x03, 0x34, 0x35, 0x2A, 0x2B, 0x27, 0x0C, 0x08, 0x17, 0x03, 0x04, 0x28, 0x2B, 0x13, 0x24, 0x18, 0x02, 0x03, 0x1A, 0x1B, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x04, 0x05, 0x0A, 0x0B, 0x52, 0x53, 0x54, 0x55, 0x24, 0x25, 0x58, 0x59, 0x5A, 0x5B, 0x4A, 0x4B, 0x32, 0x33, 0x34, 0x1B, 0x12, 0x17, 0x37, 0x36, 0x37, 0x64, 0x65, 0x68, 0x67, 0xCC, 0xCD, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0x98, 0x99, 0x9A, 0x18, 0x9B, 0x08, 0x0C, 0x0D, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F }, { 0x37, 0x02, 0x03, 0x02, 0x03, 0x03, 0x02, 0x03, 0x05, 0x04, 0x04, 0x05, 0x07, 0x04, 0x07, 0x18, 0x17, 0x18, 0x08, 0x67, 0x68, 0x6C, 0x37, 0x28, 0x17, 0x18, 0xCA, 0xCB, 0xCC, 0xCD, 0x68, 0x69, 0x6A, 0x6B, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0x6C, 0x6D, 0xDA, 0xDB, 0x54, 0x55, 0x56, 0x57, 0x64, 0x65, 0x52, 0x53, 0x24, 0x37, 0x38, 0x27, 0x28, 0x58, 0x59, 0x2B, 0x2C, 0x5A, 0x66, 0x67, 0x0F, 0xC8, 0xC9, 0x5B, 0x33, 0x34, 0x35, 0x6C, 0x6D, 0x4A, 0x4B, 0x4C, 0x4D, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x52, 0x53, 0x54, 0x55, 0x5A, 0x5B, 0x64, 0x65, 0x08, 0x0C, 0x0D, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F } }; static const uint8_t ccitt_codes_lens[2][CCITT_SYMS] = { { 8, 6, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 6, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 6, 9, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12 }, { 10, 3, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 7, 8, 8, 9, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 10, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12 } }; static const uint8_t ccitt_group3_2d_bits[11] = { 1, 1, 2, 2, 2, 1, 3, 3, 3, 1, 1 }; static const uint8_t ccitt_group3_2d_lens[11] = { 4, 3, 7, 6, 3, 1, 3, 6, 7, 7, 9 }; static VLC ccitt_vlc[2], ccitt_group3_2d_vlc; av_cold void ff_ccitt_unpack_init(void) { static VLC_TYPE code_table1[528][2]; static VLC_TYPE code_table2[648][2]; int i; static int initialized = 0; if(initialized) return; ccitt_vlc[0].table = code_table1; ccitt_vlc[0].table_allocated = 528; ccitt_vlc[1].table = code_table2; ccitt_vlc[1].table_allocated = 648; for(i = 0; i < 2; i++){ init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS, ccitt_codes_lens[i], 1, 1, ccitt_codes_bits[i], 1, 1, ccitt_syms, 2, 2, INIT_VLC_USE_NEW_STATIC); } INIT_VLC_STATIC(&ccitt_group3_2d_vlc, 9, 11, ccitt_group3_2d_lens, 1, 1, ccitt_group3_2d_bits, 1, 1, 512); initialized = 1; } static int decode_group3_1d_line(AVCodecContext *avctx, GetBitContext *gb, unsigned int pix_left, int *runs, const int *runend) { int mode = 0; unsigned int run=0; unsigned int t; for(;;){ t = get_vlc2(gb, ccitt_vlc[mode].table, 9, 2); run += t; if(t < 64){ *runs++ = run; if(runs >= runend){ av_log(avctx, AV_LOG_ERROR, "Run overrun\n"); return -1; } if(pix_left <= run){ if(pix_left == run) break; av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } pix_left -= run; run = 0; mode = !mode; }else if((int)t == -1){ av_log(avctx, AV_LOG_ERROR, "Incorrect code\n"); return -1; } } *runs++ = 0; return 0; } static int decode_group3_2d_line(AVCodecContext *avctx, GetBitContext *gb, unsigned int width, int *runs, const int *runend, const int *ref) { int mode = 0, saved_run = 0, t; int run_off = *ref++; unsigned int offs=0, run= 0; runend--; // for the last written 0 while(offs < width){ int cmode = get_vlc2(gb, ccitt_group3_2d_vlc.table, 9, 1); if(cmode == -1){ av_log(avctx, AV_LOG_ERROR, "Incorrect mode VLC\n"); return -1; } if(!cmode){//pass mode run_off += *ref++; run = run_off - offs; offs= run_off; run_off += *ref++; if(offs > width){ av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } saved_run += run; }else if(cmode == 1){//horizontal mode int k; for(k = 0; k < 2; k++){ run = 0; for(;;){ t = get_vlc2(gb, ccitt_vlc[mode].table, 9, 2); if(t == -1){ av_log(avctx, AV_LOG_ERROR, "Incorrect code\n"); return -1; } run += t; if(t < 64) break; } *runs++ = run + saved_run; if(runs >= runend){ av_log(avctx, AV_LOG_ERROR, "Run overrun\n"); return -1; } saved_run = 0; offs += run; if(offs > width || run > width){ av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } mode = !mode; } }else if(cmode == 9 || cmode == 10){ av_log(avctx, AV_LOG_ERROR, "Special modes are not supported (yet)\n"); return -1; }else{//vertical mode run = run_off - offs + (cmode - 5); run_off -= *--ref; offs += run; if(offs > width || run > width){ av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } *runs++ = run + saved_run; if(runs >= runend){ av_log(avctx, AV_LOG_ERROR, "Run overrun\n"); return -1; } saved_run = 0; mode = !mode; } //sync line pointers while(run_off <= offs){ run_off += *ref++; run_off += *ref++; } } *runs++ = saved_run; *runs++ = 0; return 0; } static void put_line(uint8_t *dst, int size, int width, const int *runs) { PutBitContext pb; int run, mode = ~0, pix_left = width, run_idx = 0; init_put_bits(&pb, dst, size*8); while(pix_left > 0){ run = runs[run_idx++]; mode = ~mode; pix_left -= run; for(; run > 16; run -= 16) put_sbits(&pb, 16, mode); if(run) put_sbits(&pb, run, mode); } flush_put_bits(&pb); } static int find_group3_syncmarker(GetBitContext *gb, int srcsize) { unsigned int state = -1; srcsize -= get_bits_count(gb); while(srcsize-- > 0){ state+= state + get_bits1(gb); if((state & 0xFFF) == 1) return 0; } return -1; } int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts) { int j; GetBitContext gb; int *runs, *ref, *runend; int ret; int runsize= avctx->width + 2; runs = av_malloc(runsize * sizeof(runs[0])); ref = av_malloc(runsize * sizeof(ref[0])); ref[0] = avctx->width; ref[1] = 0; ref[2] = 0; init_get_bits(&gb, src, srcsize*8); for(j = 0; j < height; j++){ runend = runs + runsize; if(compr == TIFF_G4){ ret = decode_group3_2d_line(avctx, &gb, avctx->width, runs, runend, ref); if(ret < 0){ av_free(runs); av_free(ref); return -1; } }else{ int g3d1 = (compr == TIFF_G3) && !(opts & 1); if(compr!=TIFF_CCITT_RLE && find_group3_syncmarker(&gb, srcsize*8) < 0) break; if(compr==TIFF_CCITT_RLE || g3d1 || get_bits1(&gb)) ret = decode_group3_1d_line(avctx, &gb, avctx->width, runs, runend); else ret = decode_group3_2d_line(avctx, &gb, avctx->width, runs, runend, ref); if(compr==TIFF_CCITT_RLE) align_get_bits(&gb); } if(ret < 0){ put_line(dst, stride, avctx->width, ref); }else{ put_line(dst, stride, avctx->width, runs); FFSWAP(int*, runs, ref); } dst += stride; } av_free(runs); av_free(ref); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/faxcompr.c
C
asf20
11,121
/* * Raw Video Decoder * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Raw Video Decoder */ #include "avcodec.h" #include "raw.h" #include "libavutil/intreadwrite.h" typedef struct RawVideoContext { unsigned char * buffer; /* block of memory for holding one frame */ int length; /* number of bytes in buffer */ int flip; AVFrame pic; ///< AVCodecContext.coded_frame } RawVideoContext; static const PixelFormatTag pixelFormatBpsAVI[] = { { PIX_FMT_PAL8, 4 }, { PIX_FMT_PAL8, 8 }, { PIX_FMT_RGB555, 15 }, { PIX_FMT_RGB555, 16 }, { PIX_FMT_BGR24, 24 }, { PIX_FMT_RGB32, 32 }, { PIX_FMT_NONE, 0 }, }; static const PixelFormatTag pixelFormatBpsMOV[] = { { PIX_FMT_MONOWHITE, 1 }, { PIX_FMT_PAL8, 2 }, { PIX_FMT_PAL8, 4 }, { PIX_FMT_PAL8, 8 }, // FIXME swscale does not support 16 bit in .mov, sample 16bit.mov // http://developer.apple.com/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html { PIX_FMT_RGB555BE, 16 }, { PIX_FMT_RGB24, 24 }, { PIX_FMT_ARGB, 32 }, { PIX_FMT_NONE, 0 }, }; static enum PixelFormat findPixelFormat(const PixelFormatTag *tags, unsigned int fourcc) { while (tags->pix_fmt >= 0) { if (tags->fourcc == fourcc) return tags->pix_fmt; tags++; } return PIX_FMT_YUV420P; } static av_cold int raw_init_decoder(AVCodecContext *avctx) { RawVideoContext *context = avctx->priv_data; if (avctx->codec_tag == MKTAG('r','a','w',' ')) avctx->pix_fmt = findPixelFormat(pixelFormatBpsMOV, avctx->bits_per_coded_sample); else if (avctx->codec_tag) avctx->pix_fmt = findPixelFormat(ff_raw_pixelFormatTags, avctx->codec_tag); else if (avctx->bits_per_coded_sample) avctx->pix_fmt = findPixelFormat(pixelFormatBpsAVI, avctx->bits_per_coded_sample); context->length = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); context->buffer = av_malloc(context->length); context->pic.pict_type = FF_I_TYPE; context->pic.key_frame = 1; avctx->coded_frame= &context->pic; if (!context->buffer) return -1; if((avctx->extradata_size >= 9 && !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) || avctx->codec_tag == MKTAG( 3 , 0 , 0 , 0 )) context->flip=1; return 0; } static void flip(AVCodecContext *avctx, AVPicture * picture){ picture->data[0] += picture->linesize[0] * (avctx->height-1); picture->linesize[0] *= -1; } static int raw_decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; RawVideoContext *context = avctx->priv_data; AVFrame * frame = (AVFrame *) data; AVPicture * picture = (AVPicture *) data; frame->interlaced_frame = avctx->coded_frame->interlaced_frame; frame->top_field_first = avctx->coded_frame->top_field_first; //2bpp and 4bpp raw in avi and mov (yes this is ugly ...) if((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) && avctx->pix_fmt==PIX_FMT_PAL8 && (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))){ int i; uint8_t *dst = context->buffer + 256*4; buf_size = context->length - 256*4; if (avctx->bits_per_coded_sample == 4){ for(i=0; 2*i+1 < buf_size; i++){ dst[2*i+0]= buf[i]>>4; dst[2*i+1]= buf[i]&15; } } else for(i=0; 4*i+3 < buf_size; i++){ dst[4*i+0]= buf[i]>>6; dst[4*i+1]= buf[i]>>4&3; dst[4*i+2]= buf[i]>>2&3; dst[4*i+3]= buf[i] &3; } buf= dst; } if(avctx->codec_tag == MKTAG('A', 'V', '1', 'x') || avctx->codec_tag == MKTAG('A', 'V', 'u', 'p')) buf += buf_size - context->length; if(buf_size < context->length - (avctx->pix_fmt==PIX_FMT_PAL8 ? 256*4 : 0)) return -1; avpicture_fill(picture, buf, avctx->pix_fmt, avctx->width, avctx->height); if(avctx->pix_fmt==PIX_FMT_PAL8 && buf_size < context->length){ frame->data[1]= context->buffer; } if (avctx->palctrl && avctx->palctrl->palette_changed) { memcpy(frame->data[1], avctx->palctrl->palette, AVPALETTE_SIZE); avctx->palctrl->palette_changed = 0; } if(avctx->pix_fmt==PIX_FMT_BGR24 && ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size) frame->linesize[0] = (frame->linesize[0]+3)&~3; if(context->flip) flip(avctx, picture); if ( avctx->codec_tag == MKTAG('Y', 'V', '1', '2') || avctx->codec_tag == MKTAG('Y', 'V', 'U', '9')) FFSWAP(uint8_t *, picture->data[1], picture->data[2]); if(avctx->codec_tag == AV_RL32("yuv2") && avctx->pix_fmt == PIX_FMT_YUYV422) { int x, y; uint8_t *line = picture->data[0]; for(y = 0; y < avctx->height; y++) { for(x = 0; x < avctx->width; x++) line[2*x + 1] ^= 0x80; line += picture->linesize[0]; } } *data_size = sizeof(AVPicture); return buf_size; } static av_cold int raw_close_decoder(AVCodecContext *avctx) { RawVideoContext *context = avctx->priv_data; av_freep(&context->buffer); return 0; } AVCodec rawvideo_decoder = { "rawvideo", AVMEDIA_TYPE_VIDEO, CODEC_ID_RAWVIDEO, sizeof(RawVideoContext), raw_init_decoder, NULL, raw_close_decoder, raw_decode, .long_name = NULL_IF_CONFIG_SMALL("raw video"), };
123linslouis-android-video-cutter
jni/libavcodec/rawdec.c
C
asf20
6,443
/* * exp golomb vlc stuff * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2004 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief * exp golomb vlc stuff * @author Michael Niedermayer <michaelni@gmx.at> and Alex Beregszaszi */ #ifndef AVCODEC_GOLOMB_H #define AVCODEC_GOLOMB_H #include <stdint.h> #include "get_bits.h" #include "put_bits.h" #define INVALID_VLC 0x80000000 extern const uint8_t ff_golomb_vlc_len[512]; extern const uint8_t ff_ue_golomb_vlc_code[512]; extern const int8_t ff_se_golomb_vlc_code[512]; extern const uint8_t ff_ue_golomb_len[256]; extern const uint8_t ff_interleaved_golomb_vlc_len[256]; extern const uint8_t ff_interleaved_ue_golomb_vlc_code[256]; extern const int8_t ff_interleaved_se_golomb_vlc_code[256]; extern const uint8_t ff_interleaved_dirac_golomb_vlc_code[256]; /** * read unsigned exp golomb code. */ static inline int get_ue_golomb(GetBitContext *gb){ unsigned int buf; int log; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); if(buf >= (1<<27)){ buf >>= 32 - 9; LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]); CLOSE_READER(re, gb); return ff_ue_golomb_vlc_code[buf]; }else{ log= 2*av_log2(buf) - 31; buf>>= log; buf--; LAST_SKIP_BITS(re, gb, 32 - log); CLOSE_READER(re, gb); return buf; } } /** * read unsigned exp golomb code, constraint to a max of 31. * the return value is undefined if the stored value exceeds 31. */ static inline int get_ue_golomb_31(GetBitContext *gb){ unsigned int buf; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); buf >>= 32 - 9; LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]); CLOSE_READER(re, gb); return ff_ue_golomb_vlc_code[buf]; } static inline int svq3_get_ue_golomb(GetBitContext *gb){ uint32_t buf; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); if(buf&0xAA800000){ buf >>= 32 - 8; LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]); CLOSE_READER(re, gb); return ff_interleaved_ue_golomb_vlc_code[buf]; }else{ int ret = 1; while (1) { buf >>= 32 - 8; LAST_SKIP_BITS(re, gb, FFMIN(ff_interleaved_golomb_vlc_len[buf], 8)); if (ff_interleaved_golomb_vlc_len[buf] != 9){ ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1; ret |= ff_interleaved_dirac_golomb_vlc_code[buf]; break; } ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf]; UPDATE_CACHE(re, gb); buf = GET_CACHE(re, gb); } CLOSE_READER(re, gb); return ret - 1; } } /** * read unsigned truncated exp golomb code. */ static inline int get_te0_golomb(GetBitContext *gb, int range){ assert(range >= 1); if(range==1) return 0; else if(range==2) return get_bits1(gb)^1; else return get_ue_golomb(gb); } /** * read unsigned truncated exp golomb code. */ static inline int get_te_golomb(GetBitContext *gb, int range){ assert(range >= 1); if(range==2) return get_bits1(gb)^1; else return get_ue_golomb(gb); } /** * read signed exp golomb code. */ static inline int get_se_golomb(GetBitContext *gb){ unsigned int buf; int log; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); if(buf >= (1<<27)){ buf >>= 32 - 9; LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]); CLOSE_READER(re, gb); return ff_se_golomb_vlc_code[buf]; }else{ log= 2*av_log2(buf) - 31; buf>>= log; LAST_SKIP_BITS(re, gb, 32 - log); CLOSE_READER(re, gb); if(buf&1) buf= -(buf>>1); else buf= (buf>>1); return buf; } } static inline int svq3_get_se_golomb(GetBitContext *gb){ unsigned int buf; int log; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); if(buf&0xAA800000){ buf >>= 32 - 8; LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]); CLOSE_READER(re, gb); return ff_interleaved_se_golomb_vlc_code[buf]; }else{ LAST_SKIP_BITS(re, gb, 8); UPDATE_CACHE(re, gb); buf |= 1 | (GET_CACHE(re, gb) >> 8); if((buf & 0xAAAAAAAA) == 0) return INVALID_VLC; for(log=31; (buf & 0x80000000) == 0; log--){ buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30); } LAST_SKIP_BITS(re, gb, 63 - 2*log - 8); CLOSE_READER(re, gb); return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1; } } static inline int dirac_get_se_golomb(GetBitContext *gb){ uint32_t buf; uint32_t ret; ret = svq3_get_ue_golomb(gb); if (ret) { OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf = SHOW_SBITS(re, gb, 1); LAST_SKIP_BITS(re, gb, 1); ret = (ret ^ buf) - buf; CLOSE_READER(re, gb); } return ret; } /** * read unsigned golomb rice code (ffv1). */ static inline int get_ur_golomb(GetBitContext *gb, int k, int limit, int esc_len){ unsigned int buf; int log; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); log= av_log2(buf); if(log > 31-limit){ buf >>= log - k; buf += (30-log)<<k; LAST_SKIP_BITS(re, gb, 32 + k - log); CLOSE_READER(re, gb); return buf; }else{ LAST_SKIP_BITS(re, gb, limit); UPDATE_CACHE(re, gb); buf = SHOW_UBITS(re, gb, esc_len); LAST_SKIP_BITS(re, gb, esc_len); CLOSE_READER(re, gb); return buf + limit - 1; } } /** * read unsigned golomb rice code (jpegls). */ static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int esc_len){ unsigned int buf; int log; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); buf=GET_CACHE(re, gb); log= av_log2(buf); if(log - k >= 32-MIN_CACHE_BITS+(MIN_CACHE_BITS==32) && 32-log < limit){ buf >>= log - k; buf += (30-log)<<k; LAST_SKIP_BITS(re, gb, 32 + k - log); CLOSE_READER(re, gb); return buf; }else{ int i; for(i=0; SHOW_UBITS(re, gb, 1) == 0; i++){ LAST_SKIP_BITS(re, gb, 1); UPDATE_CACHE(re, gb); } SKIP_BITS(re, gb, 1); if(i < limit - 1){ if(k){ buf = SHOW_UBITS(re, gb, k); LAST_SKIP_BITS(re, gb, k); }else{ buf=0; } CLOSE_READER(re, gb); return buf + (i<<k); }else if(i == limit - 1){ buf = SHOW_UBITS(re, gb, esc_len); LAST_SKIP_BITS(re, gb, esc_len); CLOSE_READER(re, gb); return buf + 1; }else return -1; } } /** * read signed golomb rice code (ffv1). */ static inline int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len){ int v= get_ur_golomb(gb, k, limit, esc_len); v++; if (v&1) return v>>1; else return -(v>>1); // return (v>>1) ^ -(v&1); } /** * read signed golomb rice code (flac). */ static inline int get_sr_golomb_flac(GetBitContext *gb, int k, int limit, int esc_len){ int v= get_ur_golomb_jpegls(gb, k, limit, esc_len); return (v>>1) ^ -(v&1); } /** * read unsigned golomb rice code (shorten). */ static inline unsigned int get_ur_golomb_shorten(GetBitContext *gb, int k){ return get_ur_golomb_jpegls(gb, k, INT_MAX, 0); } /** * read signed golomb rice code (shorten). */ static inline int get_sr_golomb_shorten(GetBitContext* gb, int k) { int uvar = get_ur_golomb_jpegls(gb, k + 1, INT_MAX, 0); if (uvar & 1) return ~(uvar >> 1); else return uvar >> 1; } #ifdef TRACE static inline int get_ue(GetBitContext *s, char *file, const char *func, int line){ int show= show_bits(s, 24); int pos= get_bits_count(s); int i= get_ue_golomb(s); int len= get_bits_count(s) - pos; int bits= show>>(24-len); print_bin(bits, len); av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d ue @%5d in %s %s:%d\n", bits, len, i, pos, file, func, line); return i; } static inline int get_se(GetBitContext *s, char *file, const char *func, int line){ int show= show_bits(s, 24); int pos= get_bits_count(s); int i= get_se_golomb(s); int len= get_bits_count(s) - pos; int bits= show>>(24-len); print_bin(bits, len); av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d se @%5d in %s %s:%d\n", bits, len, i, pos, file, func, line); return i; } static inline int get_te(GetBitContext *s, int r, char *file, const char *func, int line){ int show= show_bits(s, 24); int pos= get_bits_count(s); int i= get_te0_golomb(s, r); int len= get_bits_count(s) - pos; int bits= show>>(24-len); print_bin(bits, len); av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d te @%5d in %s %s:%d\n", bits, len, i, pos, file, func, line); return i; } #define get_ue_golomb(a) get_ue(a, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_se_golomb(a) get_se(a, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_te_golomb(a, r) get_te(a, r, __FILE__, __PRETTY_FUNCTION__, __LINE__) #define get_te0_golomb(a, r) get_te(a, r, __FILE__, __PRETTY_FUNCTION__, __LINE__) #endif /** * write unsigned exp golomb code. */ static inline void set_ue_golomb(PutBitContext *pb, int i){ int e; assert(i>=0); #if 0 if(i=0){ put_bits(pb, 1, 1); return; } #endif if(i<256) put_bits(pb, ff_ue_golomb_len[i], i+1); else{ e= av_log2(i+1); put_bits(pb, 2*e+1, i+1); } } /** * write truncated unsigned exp golomb code. */ static inline void set_te_golomb(PutBitContext *pb, int i, int range){ assert(range >= 1); assert(i<=range); if(range==2) put_bits(pb, 1, i^1); else set_ue_golomb(pb, i); } /** * write signed exp golomb code. 16 bits at most. */ static inline void set_se_golomb(PutBitContext *pb, int i){ // if (i>32767 || i<-32767) // av_log(NULL,AV_LOG_ERROR,"value out of range %d\n", i); #if 0 if(i<=0) i= -2*i; else i= 2*i-1; #elif 1 i= 2*i-1; if(i<0) i^= -1; //FIXME check if gcc does the right thing #else i= 2*i-1; i^= (i>>31); #endif set_ue_golomb(pb, i); } /** * write unsigned golomb rice code (ffv1). */ static inline void set_ur_golomb(PutBitContext *pb, int i, int k, int limit, int esc_len){ int e; assert(i>=0); e= i>>k; if(e<limit){ put_bits(pb, e + k + 1, (1<<k) + (i&((1<<k)-1))); }else{ put_bits(pb, limit + esc_len, i - limit + 1); } } /** * write unsigned golomb rice code (jpegls). */ static inline void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k, int limit, int esc_len){ int e; assert(i>=0); e= (i>>k) + 1; if(e<limit){ while(e > 31) { put_bits(pb, 31, 0); e -= 31; } put_bits(pb, e, 1); if(k) put_sbits(pb, k, i); }else{ while(limit > 31) { put_bits(pb, 31, 0); limit -= 31; } put_bits(pb, limit , 1); put_bits(pb, esc_len, i - 1); } } /** * write signed golomb rice code (ffv1). */ static inline void set_sr_golomb(PutBitContext *pb, int i, int k, int limit, int esc_len){ int v; v = -2*i-1; v ^= (v>>31); set_ur_golomb(pb, v, k, limit, esc_len); } /** * write signed golomb rice code (flac). */ static inline void set_sr_golomb_flac(PutBitContext *pb, int i, int k, int limit, int esc_len){ int v; v = -2*i-1; v ^= (v>>31); set_ur_golomb_jpegls(pb, v, k, limit, esc_len); } #endif /* AVCODEC_GOLOMB_H */
123linslouis-android-video-cutter
jni/libavcodec/golomb.h
C
asf20
12,757
/* * SVQ1 decoder * ported to MPlayer by Arpi <arpi@thot.banki.hu> * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru> * * Copyright (C) 2002 the xine project * Copyright (C) 2002 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * svq1 code books. */ #ifndef AVCODEC_SVQ1_CB_H #define AVCODEC_SVQ1_CB_H #include <stdint.h> #include <stdlib.h> #include "libavutil/mem.h" /* 6x16-entry codebook for inter-coded 4x2 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_4x2)[768] = { 7, 2, -6, -7, 7, 3, -3, -4, -7, -2, 7, 8, -8, -4, 3, 4, 19, 17, 9, 3,-14,-16,-12, -8,-18,-16, -8, -3, 11, 14, 12, 8, 7,-16,-10, 20, 7,-17,-10, 20, -6, 18, 8,-21, -7, 18, 9,-20, 25, 3,-20,-14, 29, 7,-18,-13,-29, -4, 21, 14,-31, -6, 20, 14, -19,-26,-28,-24, 31, 32, 22, 10, 15, 24, 31, 28,-32,-32,-22,-13, 2, -8,-23,-26, -9, 3, 27, 35, 3, 11, 21, 21, 8, -4,-27,-34, -30,-31, 12, 47,-29,-30, 13, 47, 38, 30,-17,-46, 34, 26,-19,-46, -42,-50,-51,-43, 34, 48, 55, 48, 48, 54, 51, 42,-44,-52,-53,-47, 4, 5, 0, -6, -2, -2, 0, 1,-11, -6, -1, -2, 1, 8, 9, 1, 0, 1, -6, 5, 8, 1,-12, 2, 7,-14, -7, 8, 5, -8, 0, 8, 1, 4, 11, 8,-12, -8, 0, -5, -1, 1, 0, 4,-15, -8, 3, 16, 17, 8, -4, -6, 9, -4,-13, -8, 2, 6, 1,-18, -1, 11, 11,-12, 6, 0, 2, 0, 14, 6, -7,-21, 1, -1,-13,-20, 1, 1, 10, 21, -22, -5, 7, 13,-11, -1, 4, 12, -7, 0, 14, 19, -4, 3, -5,-19, -26,-14, 10, 15, 18, 4, -6, -2, 25, 19, -5,-18,-20, -7, 4, 2, -13, -6, -1, -4, 25, 37, -2,-35, 5, 4, 1, 1,-21,-36, 2, 43, 2, -2, -1, 3, 8, -2, -6, -1, -2, -3, 2, 12, -5, -2, -2, -1, -3, -1, -1, -5, -1, 7, 8, -2, 2, 7, 5, -3, 1, 1, -3, -8, -3, -1, -3, -2, -2, -3, 2, 13, 15, 0,-11, -6, 3, 0, 0, 0, -6, -9, -5, -4, 18, 4, 1, 3, 12, 3, 0, 4,-16, -3, 3, -3, -17, 3, 18, 2, -1, -3, -1, -1, -6, 16, -8, 0, -9, 14, -7, 0, 3,-13, 14, -5, 3,-13, 14, -4, -7, 20, 14,-23, 8, -7, -8, 4, 8,-15,-19, 16,-10, 13, 11, -3, 9, -1, 1, 26, 5,-15,-27, 2, -20, 7, 16, -4,-40, 9, 31, 1, 26,-12,-30, -7, 40, -2,-19, 4, 6, 0, 0, 0, -6, -2, 1, 2, 0, -1, 0, -6, 9, 0, -2, -1, -7, 8, 2, -3, -1, 2, -3, 2, 7, -4, -2, 4, 2, 0, 0, -6, -3, -2, 9, 2, -2, -1, 0, -4, -3, -3, 0, -3, -6, 2, 10, 4, 3, 0,-10, 8, 0, 0, -4, 4, -1, 1, 4, 2, 3, -7, -9, 7, 2, 1, -9, -4, -1, 12, 0, 0, 3, -1, 7, -4, 3,-14, 4, 2, -12, -9, 1, 11, 2, 5, 1, 0, 3, 1, 0, 2, 0, 8, 6,-19, -6,-10, -7, -4, 9, 7, 5, 7, 6, 21, 3, -3,-11, -9, -5, -2, -4, -9,-16, -1, -2, -5, 1, 36, 8, 11, 19, 0, 2, 5, -4,-41, -1, -1, -2, -1, -2, -2, 1, 6, 0, 4, 1, -8, 1, 1, 1, 0, -2, -3, 4, 0, 2, -1, 3, -3, 1, 3, -4, 1, -1, 3, 0, -5, 3, 4, 2, 3, -2, -3, -6, -1, -2, -3, -2, 2, -4, 8, 1, 0, -7, 4, 2, 6, -7, -1, 1, 0, -2, 2, -4, 1, 8, -6, 2, -1, -6, 2, 0, 2, 5, 4, -8, -1, -1,-11, 0, 9, 0, -2, 2, 2, 17, -5, -4, -1, -1, -4, -2, -2, 0,-13, 9, -3, -1, 12, -7, 2, 0, -2, -5, 2, -7, -5, 20, -3, 7, 7, -1,-30, 3, 5, 8, 1, -6, 3, -1, -4, 2, -2,-11, 18, 0, -7, 3, 14, 20, -3,-18, -9, 7, -2, 0, -1, -2, 0, 0, -1, -4, -1, 1, 0, -2, 2, 0, 4, 1, -3, 2, 1, 3, 1, -5, 1, -3, 0, -1, -2, 7, 1, 0, -3, 2, 5, 0, -2, 2, -5, -1, 1, -1, -2, 4, -1, 0, -3, 5, 0, 0, 3, -1, -2, -4, 1, 5, -1, -1, 0, -1, 9, -1, -2, -1, -1, -2, 5, 5, -1, -2, 2, -3, -2, 1, 2,-11, 1, 2, 1, 3, 2, 2,-10, -1, -2, 4, 2, 4, 1, 4, 5, -5, 1, 0, 6,-11, 1, 1, 0, 6, 6, 0, 2, 1,-15, 7, 3, 5, 9,-30, 2, 2, 2, -34, 1, 9, 2, 5, 8, 8, 2, 7, 2, 6, 6, 2,-27, 1, 4 }; /* 6x16-entry codebook for inter-coded 4x4 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_4x4)[1536] = { 4, 0, -6, -7, -4, -8,-13, -9, -8, -8, -1, 6, -2, 5, 22, 27, -16, -7, 11, 10,-18, -7, 13, 10,-15, -4, 12, 8, -9, -1, 9, 5, -2, 2, 15,-16, -3, 2, 19,-19, -3, 2, 19,-19, -2, 3, 15,-14, 17, 22, 22, 16, -6, -7, -5, -2,-12,-16,-16,-12, 1, 1, -1, -3, 11,-17, 0, 8, 14,-21, -1, 9, 14,-21, -2, 8, 11,-16, -2, 6, 7, -2,-16, 11, 9, -2,-21, 14, 10, -1,-22, 14, 8, -1,-18, 10, -10, 16, 3, -9,-13, 20, 4,-11,-14, 21, 4,-10,-11, 16, 3, -8, 11, 4, -9, -9, 15, 6,-12,-14, 17, 8,-12,-14, 16, 10, -7,-11, 4, 10, 14, 13, -1, 7, 15, 16,-12, -7, 3, 8,-20,-23,-18,-10, -10,-18,-26,-25, 4, 1, -6,-11, 13, 15, 11, 3, 12, 15, 13, 8, -16,-19,-16,-11, 7, 12, 15, 11, 11, 16, 16, 11, -6, -9,-11,-10, 18, 19, 12, 5, 18, 16, 5, -4, 6, 0,-10,-15, -9,-17,-23,-22, -10,-14, -1, 21,-11,-17, 0, 29,-11,-16, 1, 30,-10,-14, 0, 23, -16,-17,-12, -6,-19,-19,-14, -7, -3, -1, 1, 2, 27, 35, 29, 19, -37, -8, 23, 23,-42, -9, 28, 29,-43,-10, 26, 28,-38,-11, 19, 22, 32, 16,-16,-33, 39, 20,-18,-37, 38, 19,-19,-38, 32, 15,-17,-34, 24, 9, -6, -4, -1,-10, -6, 3, -8, -9, -1, 3, 3, 7, 2, -6, -1, -3, -1, 0, -1, 4, 2, -7, -3, 11, 3,-16, 1, 20, 9,-18, -3, -8, 6, 12, -5,-10, 7, 13, -6, -9, 5, 7, -5, -5, 2, -1, -8, 12, -3, -1,-10, 15, -3, 1,-11, 13, -4, 1,-11, 8, -3, 2, 9, 6, -5,-12, 3, 0, -8,-13, -4, -4, -1, -1, -4, 1, 15, 18, 9, 13, 14, 12, 4, 3, -1, -2, -2, -5, -8, -5, -7,-11, -9, -4, 7, -5, -7, -4, 14, -2, -7, -4, 17, 0, -8, -5, 15, 1, -7, -5, -10, -1, 6, 4,-15, -9, 2, 4, 2, -1, -3, 0, 25, 13, -8,-10, 7, 11, -3,-16, 7, 11, -3,-15, 6, 7, -2, -9, 4, 2, -3, -5, -7, -1, -1, 0, -9, -2, 2, 6,-12, -4, 6, 14,-13, -6, 8, 19, -18,-18,-11, -5, -3, 0, 3, 4, 6, 8, 6, 6, 6, 6, 6, 6, -5, 3, 13,-10, -6, 1, 15, -9, -6, -3, 15, -6, -6, -6, 10, -3, 9, 1, -9, -9, 11, 9, 6, 5, 0, 3, 8, 7,-15,-14, -6, -5, -11, -6, 11, 19, -2, -5, -9, -8, 6, 2, -9,-10, 6, 5, 4, 5, -7, -3, 8, 15, -1, 3, 10, 15, 5, 5, -1, -2, 4, -2,-21,-25, 6, -6, -6, 5, 8, -9, -7, 9, 8,-12, -7, 13, 4,-14, -7, 14, -4, -3, 1, 1, -3, -5, -2, -3, 7, 0, -2, -4, 20, 7, -4, -4, -3,-20, -6, 10, 6, 0, 0, 1, 5, 8, 5, -1, -3, 0, 0, -2, 13, 6, -1, 2, 5, 3, 2, 3, -3, 0, 3, 0,-16, -8, -2, -5, -2, -7, -6, 0, -3, -6, -3, 1, -5, -1, 2, -1, -1, 12, 16, 5, -7, 1, 9, 8,-10, -2, 5, 3, -6, 2, 7, 3, -4, 0, -1, -7, 3, 4, -9,-24, 0, 2, 6, 3, -1, -1, 4, 7, 5, 3, -1, -2, 3, 6, -9, 2, 1, 6,-13, 1, 1, 8,-10, 2, 1, 8, -7, 1, -3, -3, 2, 22, -2, -3, -5, 12, -2, -3,-10, 2, -3, -1, -4, 2, 11, 12, 8, 2, -5, -5, -5, -8, -6, -4, 0, -3, -2, -1, 3, 3, 12, -6, -2, -1, 12, -8, -2, -2, 9, -7, 0, -3, 4, -6, 2, -2, -19, 1, 12, -3, -4, 4, 5, -4, 6, 1, -2, -1, 4, -4, -2, 7, -3, -4, -7, -8, -4, -4, -2, 0, -1, 2, 14, 16, -4, -2, 4, 4, -1, 7, 2, -5, -2, 0, -1, 1, 4, -3, -1, 13, 6,-12,-14, 8, -1, 5, 4, -5, -2, 5, 3, -9, -2, 7, 4,-12, -1, 7, 4, -9, -6, -3, 1, 1, 11, 11, 0, -6, 6, 4, -2, -7,-12,-10, 3, 10, -2, -3, -3, -2, 6, 11, 14, 10, -9,-11,-10,-10, 2, 2, 3, 2, -7, -5, -7, -1, -1, 2, 0, 7, -1, 1, 0, 9, 3, 4, -5, -1, 10, -1,-15, -1, 4, 1, -5, 2, -3, 1, -1, 1, -3, 1, 4, 4, 2, -1, 4, 10, 6, 2, -1, 0, 2, 2, -7,-12, -4, 2, 0, -3, -1, -4, -1, -8, 3, -1, 2, -9, 4, 0, 5, -5, 2, 0, 8, 3, 3, 2, 1, 1, 4, -2, 0, 3, 2, -1, 4, 1, 0, 6, -1,-25, -1, -2, -2, -4, -3, 0, -1, -4, -1, -1, -4, 2, 0, -6, 2, 25, -11, -1, 5, 0, 7, 0, -2, 2, 10, -1, -3, 4, -5, -5, -2, -1, 0, 6, 3, -1, -2, -1, -1, 1, -1, -7,-12, -5, 8, 6, 2, 4, 2, 6, -1, -6, 9, 10, -1, -4, 1, 0, -4, 0, 3, -2, -9, -5, -4, 3, 4, 0, -4, 3, 3, 0,-11, 0, 3, 2,-11, 3, 7, 2, 2, -4, 7, 3, 1, -8, 7, 1, -1,-12, 4, 1, 3, -9, 2, 2, 2, -2, -2, 9,-17, -3, 3, 1, -4, 7, 1, -6, 5, 4, -1, 3, -1, 2, 0, -4, -7, 8, 12, -1, -2, 5, 4, -5, 3, -5, -8, -2, 0, 0, -5, -2, -2, -8, 3, 27, -1, -4, -3, 6, -3, 1, -2, -7, 4, 4, 1, -1, -7,-10, -7, -3, 10, 10, 5, 3, -2, -2, -4, -3, 0, 1, 5, 7, 4, -2,-16,-20, 0, 4, 7, 8, 2, 0, -2, -1, -2, 1, 3, 17, -3, 1, -2, -1, -1, -2, -1, -2, -1, -5, -1, 0, 5, -3, 1, 0, 6, -2, 0, 0, -1, -2, 0, -3,-11, 1, 8, -1, 3, 0, 0, 0, 0, 2, 4, 1, 2, 0, 6, 1, -2,-18, -3, 2, -14, 0, 6, 1, -5, -2, -1, 1, -1, 1, 0, 1, 1, 7, 4, 0, -1, 0, 1, -4, 1, 8, 3, -4, -3, 4, 1, 3, -6, 1, -4, 1, 1,-12, 3, 3, -1,-10, 0, -1, 2, 0, 2, 1, 3, 2, 2, 4, 3, 0, 0, 3, 2, 0, -2, 1, 5, 2, -5, 0, 6, -1,-14, -1, -2, -6, -3, -3, 2, -1, 4, 5, 6, -1, -2, 0, 4, 4, -1, -5, -4, 1,-11, 0, -1, 2, -4, 1, 2, -3, 3, -1, 1, -2, 15, 0, 1, -1, 0, -2, 1, -4, -7, 1, -2, -6, -1, 21, -2, 2, -1, 1, 21, -1, -2, 0, -1, -3, 1, -2, -9, -2, 2, -1, 2, 1, -4, -1, 1, 8, 2, -6,-10, -1, 4, 0, -4, -3, 3, 3, 5, 0, -1, -1, 3, 2, 1, -2, -2, -2, 4, 3, 5, 2, -4,-17, 0, -2, 4, 3, -7, -4, 0, 3, 9, 9, 2, -1,-11, -6, 0, -1, 5, 1, 0, 1, 0, 17, 5,-11, 3, -2, -6, 0, 2, -2, -4, 1, -4, 1, 2, -1, -5, -1, -5, -3, -3, 5, -3, -2, 4, 16, 2, -5, -2, 5, -1, -1, 0, 0, -4, 1, -1, 2, 5, 11, -1, -1, -2, 1, -4, -2, -3, -1, -5, -1, 10, 0, 6, 1, 0, -3, 0, -4, 1, 0, -2, -4, 3, -1, 6, 9, 3, 0, -2, 1, -2, 0, -2, -3, -2, -2, 1, 0, 1, -6, 1, 0, 2, 1, -1, 3, -2, 1, 0, -1,-15, 0, -1, 5, 2, 6, 2, 0, 2, 2, 0,-12, -4, 6, 0, 1, 4, -1, 1, 2, 1, -4, 1, -2, -7, 0, 0, 0, 0, -1, -5, 2, 11, 3, 1, 3, 0, -6, 0, -3, -9, -4, 1, 3, -1, 0, 4, 1, -2, 0, 7, -3, -1, 6, 1, -2, 6, 2, 0, -1, 3, -2, -2, 4, 0, 2, -1, 2,-14, 2, 2, 2, 0, -1, -2, 3, -3,-14, 0, 2, 3, -3, 5, 1, 3, 2, 1, -3, 4,-14, 1, -2, 11, -1, 0, -1, 3, 0, -1, 1, 0, 2, -2, 3, -3, 2, -4, -1, -4, 3, -1, 2, 1, 3, -6, -2, 2, 7, -2, 1, 2, 0, -2, 0, 0, -1, 12, 5, -1, 2, -8, -1, 1, -7, 2, -2, -4, 2, 11, 0,-11, -2, 3, 1, -3, -1, 0, 3, 1, -1, 0, 3, 0, -2, 0, -6, -1, -3, 12, -7, -2, 0, 7, -2, 1, 1, 1, 2, 2, 2, -1, 2, 0, 2,-23, 0, 4, 0, 3, 2, 1, 3, -4, -5, -1, 5, -3, 5, 10, -1, 0, 0, 3, -4, 1, -1, 2, -5 }; /* 6x16-entry codebook for inter-coded 8x4 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_8x4)[3072] = { 9, 8, 4, 0, -3, -4, -4, -3, 9, 8, 4, -1, -4, -5, -5, -3, 8, 7, 3, -2, -5, -5, -5, -4, 6, 4, 1, -2, -4, -5, -4, -3, -12,-14,-11, -4, 1, 5, 6, 6, -8,-10, -7, -5, -2, 1, 1, 1, 5, 4, 3, 1, 0, 0, -1, -1, 13, 13, 9, 6, 3, 0, -1, -2, -4, -4, -3, -1, 1, 4, 8, 11, -5, -6, -4, -2, 0, 3, 8, 12, -7, -7, -6, -4, -2, 2, 7, 10, -7, -7, -5, -4, -2, 1, 5, 8, -3, -2, -1, 1, 3, 6, 7, 6, 2, 3, 5, 7, 8, 8, 6, 4, 4, 5, 4, 3, 1, -2, -6, -7, 1, 0, -2, -7,-10,-14,-17,-16, -5, -4, 1, 8, 9, 3, -3, -7, -7, -6, 1, 11, 12, 5, -3, -8, -8, -7, 0, 9, 11, 5, -3, -7, -8, -6, -1, 5, 8, 4, -2, -6, -4, -5, -7, -8, -9, -9, -8, -6, -4, -5, -6, -7, -7, -6, -4, -2, 0, 1, 2, 3, 5, 8, 10, 9, 1, 2, 3, 6, 9, 12, 14, 13, 5, 6, 6, 5, 4, 3, 2, 1, 5, 6, 7, 7, 6, 6, 6, 4, -1, 0, 1, 1, 3, 5, 5, 5,-13,-16,-17,-17,-14,-10, -6, -4, 9, 11, 13, 16, 15, 13, 12, 10, -4, -5, -6, -7, -7, -7, -6, -5, -6, -6, -7, -7, -7, -7, -6, -5, -2, -1, 0, 0, 0, 0, 0, -1, -11,-13,-15,-16,-16,-14,-12,-10, 2, 3, 4, 5, 4, 3, 3, 3, 6, 7, 8, 8, 8, 7, 6, 5, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 1, -2, -7,-13,-17, 5, 7, 7, 5, 1, -5,-13,-19, 6, 8, 9, 8, 5, -1, -9,-16, 6, 8, 10, 10, 7, 2, -4,-11, 18, 9, -1,-10,-13, -9, -4, 0, 22, 12, -1,-12,-15,-10, -4, 2, 23, 13, 0,-10,-13, -9, -3, 2, 20, 12, 2, -6, -9, -6, -2, 2, -6, -6, -6, -7, -7, -7, -7, -6, -6, -7, -8, -8, -9, -9, -9, -8, -3, -3, -3, -3, -3, -3, -3, -3, 12, 15, 18, 21, 21, 19, 17, 14, 14, 16, 18, 18, 18, 16, 15, 13, 5, 6, 6, 5, 5, 4, 4, 3, -6, -7, -9,-10,-10,-10, -9, -7,-10,-11,-13,-14,-14,-13,-12,-10, -27,-17, -4, 5, 9, 10, 10, 7,-32,-19, -3, 7, 11, 12, 11, 8, -30,-16, -2, 8, 12, 12, 10, 7,-23,-12, 0, 7, 10, 11, 9, 6, 16, 17, 16, 12, 6, -1, -8,-12, 17, 18, 15, 10, 1, -8,-15,-18, 15, 14, 10, 4, -5,-14,-20,-23, 10, 8, 4, -1, -9,-16,-21,-22, -10,-12,-12,-11, -5, 4, 14, 20,-11,-13,-15,-12, -4, 7, 19, 27, -11,-13,-14,-11, -3, 8, 21, 28,-10,-11,-12, -9, -2, 8, 18, 25, -1, -1, -1, 1, 4, 6, 6, 5, 0, 0, 0, 2, 4, 3, 1, -2, 0, 0, 2, 4, 4, -1, -7,-10, 0, 0, 3, 5, 3, -3,-11,-15, -14,-13, -8, -1, 3, 3, -1, -4, -5, -4, -1, 4, 8, 8, 3, 0, 3, 2, 2, 3, 4, 5, 3, 1, 5, 3, 0, -2, -2, -1, -1, -1, 9, 1, -6, -6, -5, -3, -2, -1, 12, 1, -6, -6, -4, -2, -1, 0, 14, 4, -4, -4, -2, -2, -1, -1, 14, 6, -1, -1, -1, -1, -1, -1, 4, 6, 8, 10, 11, 9, 7, 5, -1, -1, -1, 0, 0, -1, -1, -2, -2, -4, -4, -5, -5, -5, -5, -4, -2, -3, -3, -4, -4, -3, -2, -1, 2, 3, 4, 4, 3, 1, 0, 0, -1, 1, 4, 5, 6, 5, 4, 3, -8, -6, -2, 2, 3, 4, 4, 3,-14,-13, -9, -5, -2, -1, 0, 0, -3, -4, -5, -4, 0, 7, 12, 13, -3, -4, -5, -5, -2, 4, 9, 10, -2, -3, -4, -5, -4, -1, 3, 4, -1, -1, -2, -3, -3, -2, 0, 1, 9, 5, -2, -8,-11,-10, -7, -4, 12, 10, 6, 2, 0, -1, 0, 0, 2, 2, 3, 4, 3, 1, 1, 1, -9, -8, -4, 0, 1, 2, 1, 0, 6, 8, 8, 5, 1, -5,-11,-13, 0, 1, 2, 2, -1, -4, -8,-11, -3, -2, 1, 3, 3, 1, -1, -4, -2, -1, 2, 5, 6, 6, 4, 1, 3, 4, 5, 5, 4, 1, -3, -6, 5, 6, 4, 2, 2, 2, 0, -3, 6, 5, 0, -5, -5, -2, -1, -2, 7, 4, -3,-11,-12, -7, -3, -2, 1, 0, -1, -1, -1, 0, 0, 0, 2, 3, 4, 4, 5, 5, 4, 3, -7, -9, -9,-10,-10, -9, -7, -6, 3, 4, 5, 6, 5, 5, 5, 5, -7, -7, -7, -7, -6, -6, -5, -4, -5, -4, -3, -1, -1, -1, 0, 0, -3, -2, 1, 4, 5, 5, 5, 5, -2, -1, 3, 6, 9, 10, 10, 9, -14, 1, 10, 3, -2, 0, 1, 1,-16, 2, 13, 3, -3, -1, 1, 0, -15, 2, 12, 3, -4, -2, 1, 1,-10, 3, 10, 2, -3, -1, 1, 1, 0, 1, 4, 2, -5,-10, -3, 11, -1, 1, 4, 2, -6,-13, -2, 15, -1, 0, 3, 1, -6,-12, -1, 15, -1, 1, 2, 1, -4, -8, 0, 11, 10, 5, -2, -2, 2, 5, 1, -4, 7, 0, -8, -6, 1, 5, 2, -4, 2, -5,-12, -7, 2, 7, 4, -1, -1, -7,-10, -4, 4, 9, 7, 2, -5, -5, -4, -6, -6, -5, -5, -3, -1, -2, -2, -4, -5, -6, -5, -4, 6, 7, 7, 4, 0, -2, -3, -3, 13, 14, 13, 10, 5, 1, -1, -2, 1, 1, 2, 2, 2, 2, 2, 2, -5, -6, -8, -9, -9, -8, -7, -6, 7, 9, 10, 11, 11, 9, 7, 5, -1, -2, -3, -3, -4, -4, -4, -3, -1, -1, 0, 0, 0, 0, -1, -1, -3, -3, -4, -5, -4, -3, -3, -2, 2, 1, -1, -3, -3, -2, -1, 0, 12, 12, 8, 3, 1, 0, 0, 1, -6, -8, -8, -6, -2, 2, 6, 8, 1, 1, -1, -2, 0, 3, 5, 7, 3, 3, 1, -1, -1, 0, 0, 2, 0, 1, 0, -1, -1, -1, -2, -1, 1, 0, 0, 0, 0, 0, 2, 4, 2, 1, 3, 4, 3, 1, 0, 2, 2, 1, 0, 0, -1, -1, 0, 3, 5, 1, -6,-12,-13, -8, -1, 4, -2, 0, -1, -2, -1, 0, 2, 3, -6, -3, -2, 0, 1, 1, 1, 1, -9, -5, 0, 4, 5, 3, 1, 0, -8, -3, 3, 7, 8, 4, 1, 0, 1, 2, 2, 3, 3, 1, -1, -3, 4, 5, 5, 6, 6, 5, 2, 0, 0, 0, 0, 0, 1, 0, -2, -4, -3, -3, -4, -3, -3, -4, -7, -8, 14, 12, 6, -1, -3, -3, 0, 0, 7, 5, 1, -3, -5, -4, -2, -1, -2, -2, -2, -2, -2, -2, -1, -1, -6, -4, -1, 1, 1, 1, 0, -1, 2, 2, 1, -3, -6, -7, -6, -3, 1, 0, -1, -3, -2, 1, 4, 6, 0, 0, 1, 2, 4, 7, 8, 7, 0, 0, 0, 0, -1, -4, -7, -8, 0, 2, 1, -2, -3, -3, -2, -1, -1, 1, 0, -3, -5, -2, 0, 2, -2, -1, -2, -5, -4, 1, 6, 9, -3, -2, -3, -4, -2, 5, 11, 13, -4, -2, 2, 6, 4, -3,-10,-14, -2, -1, 1, 4, 4, 1, -1, -2, 0, 0, -1, -2, -2, 0, 4, 6, 2, 2, 0, -3, -3, 0, 5, 9, -4, -4, -2, 1, 6, 9, 3, -7, -2, -2, -2, -1, 4, 8, 0,-11, 1, 1, 0, 0, 2, 6, -1,-10, 2, 2, 1, 0, 2, 4, 0, -7, -1, -2, -3, -6, -7, -8, -8, -8, 2, 3, 3, 1, -1, -2, -3, -4, 5, 5, 5, 4, 3, 2, 0, -1, 3, 3, 3, 3, 2, 2, 1, 1, 3, 3, 2, -2, -3, 0, 7, 10, 1, 2, 2, -2, -5, -4, 0, 3, 0, 3, 4, 2, -3, -5, -6, -4, 0, 2, 4, 4, 1, -4, -7, -7, 2, 4, 5, 5, 5, 5, 6, 6, -4, -4, -3, -5, -5, -3, -3, -2, -3, -4, -4, -5, -4, -2, -2, -2, 1, 1, 0, 0, 2, 4, 5, 4, -2, 0, 3, 4, 4, 3, 2, 2, -9, -7, -4, 0, 3, 6, 6, 6, -5, -5, -3, -2, 0, 1, 3, 4, 5, 5, 2, -2, -4, -6, -5, -3, 1, -6, -4, 7, 5, -2, -2, 1, 5, -5, -4, 6, 4, -5, -4, 1, 5, -5, -4, 6, 4, -5, -3, 1, 1, -7, -3, 8, 7, -1, -3, 1, -8, -7, -4, 0, 2, 4, 5, 5, 5, 6, 5, 2, -1, -5, -7, -7, 5, 6, 4, 1, -3, -5, -6, -5, -7, -7, -5, -2, 1, 6, 9, 10, 6, 3, 0, 1, 3, 0, -8,-14, 3, 0, -1, 1, 4, 3, 0, -4, 1, 0, 0, 1, 2, 1, 1, 1, -1, -1, 1, 2, 1, -1, -1, 0, 1, 1, 1, 1, 0, -2, -3, 0, 1, 2, 1, 0, -2, -8, -9, -4, 1, 3, 3, 2, 1, -3, -3, 1, 0, 1, 1, 1, 1, 1, 4, 8, 2, 5, 9, 7, 2, -1, -1, 1, -4, -1, 1, 0, -3, -4, -1, 2, -3, 0, 3, 3, 0, -1, 0, 2, -4, -1, 1, 1, -2, -4, -5, -4, 1, -1, -2, -2, -1, 2, 4, 5, 2, 1, 1, 0, -1, -1, 0, 0, 2, 3, 4, 5, 4, 2, 1, 0, -9, -9, -6, -3, -1, -1, -1, -1, -6, -6, 4, 7, 0, -2, -1, -2, -1, -2, 5, 6, -1, -2, 0, -1, 4, -1, 1, 0, -4, -2, 0, -2, 7, 1, -1, -2, -3, 1, 3, 1, 4, 2, 1, 3, 3, 1, 1, 2, 2, -2, -4, 0, 3, 1, 0, 0, 1, -4, -8, -4, 1, 2, 1, 0, 2, -3, -9, -6, 0, 3, 3, 2, -1, -1, 0, -1, -1, 0, 1, 2, 3, 1, -4, -8, -7, -3, 1, 2, 2, -1, -3, -2, -1, 0, 1, 0, -1, 0, 5, 11, 9, 3, -1, -3, -1, -2, -2, -1, 1, 1, 1, 1, 0, -1, 0, 3, 6, 6, 5, 5, 2, 1, -1, -1, -2, -5, -6, -4, 2, 2, 2, 1, -1, -4, -5, -5, -1, -3, -6, -7, -6, -4, -1, 1, 5, 5, 3, 4, 4, 3, 4, 5, -1, -2, -3, -2, -2, -2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 3, -6, -6, -4, -1, 2, 2, 2, 2, -6, -7, -5, -2, 0, -1, -1, 0, 2, 2, 2, 4, 4, 3, 3, 4, 2, 1, 0, -1, 0, 0, 2, 4, 12, 5, -5, -8, -5, 0, 2, 2, 2, -3, -6, -3, 0, 0, -1, -2, -2, -3, -1, 3, 4, 1, -2, -3, 2, 2, 3, 4, 3, 1, -1, -1, 3, 2, 1, 0, 1, 4, 3, 0, 4, 3, 0, -5, -6, 0, 3, 3, 2, 3, 1, -7,-12, -6, 1, 3, 1, 3, 4, -1, -6, -4, 0, 1, -9, -4, 2, 6, 7, 4, 1, 0, -7, -1, 4, 6, 4, 0, -3, -3, -6, 0, 4, 4, 1, -2, -3, -2, -4, 1, 3, 2, 0, -2, -1, 0, 0, 5, 2, -5, -3, 3, 1, -4, -2, 4, 2, -6, -3, 6, 4, -3, -1, 5, 3, -5, -1, 7, 3, -4, -1, 2, 0, -6, -3, 5, 3, -3, -8, -3, 3, 5, 3, 1, -2, -2, 2, 4, 4, -2, -4, -3, 1, 3, 2, 1, -3, -5, -3, 3, 4, 3, -5, -6, -5, 3, 10, 8, -1, -5, 0, 3, 2, -4, -9, -7, 0, 6, -5, -1, 5, 7, 4, -1, -3, -3, -5, -5, -2, 3, 6, 5, -1, -4, 9, 6, 0, -4, -2, 1, 1, -1, -1, -1, -1, 1, 1, 0, -1, 0, -1, 0, 0, 0, 0, -1, -1, 0, 2, 1, -2, -1, 1, 1, 0, 0, 12, 8, 2, -1, -1, -4, -7, -7, 2, 1, 3, 6, 7, 4, 2, 0, 1, 0, -1, 0, -1, -4, -7, -8, 0, 0, -1, 0, 0, 0, -1, -3, 0, 0, 0, 0, 1, 1, 0, -2, -1, 0, 1, 1, 0, 0, -1, -2, 0, 0, -1, -3, -4, -3, -1, 1, -1, 0, 0, 0, 1, 4, 10, 12, -1, 0, -2, -2, -3, -3, -1, 1, -3, -1, -2, -4, 2, 9, 9, 7, -3, 0, -1, -3, 0, 2, -1, 1, -1, 1, -2, -3, 0, -1, -3, 0, 0, 0, -3, -2, 0, -1, -1, 1, -1, -2, -1, -1, -2, -1, -1, -2, 2, -1, -2, -1, 0, 1, 0, -2, 3, -1, -2, 2, 5, 3, -1, -3, 1, -5, -5, 1, 6, 6, 2, 0, 1, 2, 0, -1, 0, 1, 0, -2, -5, -3, -1, 0, 1, 2, 1, -2, -7, -5, -2, -2, -2, -2, 0, 1, -1, 0, 1, 1, 0, 3, 9, 12, 0, 6, 5, 1, -2, -3, 0, 3, 0, 6, 5, 1, 1, 1, 2, 3, -5, -2, -2, -3, 0, 0, 0, 0, -6, -3, -3, -2, 0, 0, -1, -2, 4, 4, 2, 1, 0, -1, -1, 0, -2, -2, 0, 1, 2, 1, 1, 0, 2, 2, 1, -1, -3, -5, -9,-10, 2, 1, -1, -1, 1, 4, 4, 1, 4, 0, -2, -2, -2, -2, -1, 0, 7, 1, -4, -3, -2, 0, 1, 1, 10, 5, -1, -2, 0, 1, 1, 0, 5, 1, -3, -4, -3, -1, -1, -2, 2, 1, -1, -3, -3, 1, 1, -1, -2, -1, 3, 0, -1, 1, 1, 0, -3, 1, 7, 2, -3, -2, -1, 0, -2, 4, 8, -1, -8, -5, 0, 2, -4, -1, 1, 2, 1, -3, -4, -2, -5, -3, -2, 1, 4, 4, 4, 6, -3, -2, -4, -3, 0, 1, 1, 2, 2, 2, 2, 1, 2, 1, -1, -1, -4, -1, 0, -1, -3, -3, -1, -1, 1, 4, 4, 2, 0, -1, -2, -3, 4, 6, 5, 3, 2, 1, -2, -4, 0, 1, 1, 1, 1, -1, -4, -6, 1, 2, 2, -1, -6, -5, -1, 2, -3, -2, 1, 1, -4, -3, 2, 5, -2, -1, 2, 2, -3, -4, 0, 3, -2, -2, 2, 6, 5, 2, 1, 2, 2, -3, -3, 0, 0, 2, 3, 1, 3, -1, 1, 3, 1, 2, -1, -5, -5, -7, -4, -2, 1, 8, 8, 1, -1, 0, 2, 0, -3, 0, 1, -3, -2, -5, -5, -2, -3, -1, 0, -2, -1, -4, 0, 4, 0, 2, 4, 0, 0, 0, 8, 10, 2, 1, 3, -1, -4, -3, 2, 3, -3, -3, 1, -1, 1, -2, -4, 2, 7, 3, -2, -1, 6, 4, -2, -1, 2, 0, -1, 3, 1, 1, -2, -2, -2, -5, -3, 4, -6, -2, 1, 1, -1, -4, -2, 4, -2, -1, -2, -2, 0, 1, 0, -2, -1, 1, 0, -1, 0, 0, -1, -3, 0, 1, -2, -4, -3, -1, 0, 0, 6, 8, 5, 0, 0, 1, 2, 3, -2, -2, 2, 5, 2, 0, 0, 1, 2, -2, -2, -1, -1, 1, 2, 4, 2, -1, 0, 1, 0, 0, 0, 1, -8, -7, -1, 1, -1, -1, 1, 3, 0, 3, 6, 2, -2, 1, 2, 0,-10, -7, -1, 0, -3, -1, 2, 1, 0, 0, 2, 2, 1, 1, 1, -1, 3, 0, -2, -2, 0, 2, 1, 0, 8, 1, 0, 0, -2, -3, -1, 0, 2, -2, 2, 5, 1, -2, -1, 1, -3, -6, -3, -1, -3, -3, -1, 2, 2, 0, 1, 2, 2, 1, 0, 0, 1, -1, -1, -2, -1, 0, 1, 0, 15, 9, 2, -1, -2, -3, -3, -3, 0, -3, -2, 0, 0, -1, -1, -1, 1, 0, 1, 0, 0, -1, -1, -1, 0, 2, 2, -2, -3, -3, -7, -8, 0, 2, 2, 0, 1, 2, 1, 1, 1, 2, 2, 2, 3, 1, 0, 3, 1, 0, -1, -2, -1, -2, 0, 5, -11, -6, -1, 1, 2, 3, 1, -3, 1, 4, 3, -1, -2, 1, 2, -1, 2, 2, 1, -1, -2, 0, 1, -1, 0, 0, -1, -1, 0, 2, 3, 2, 1, 1, 2, 1, -1, 1, 0, -4, 0, 0, 0, -2, -2, 2, 4, -2, -2, -3, 0, 0, -1, 2, 1, -6, 0, 2, 5, 5, 3, 2, -1, -7, 4, 2, 0, 0, 3, 3, 1, -1, 0, -1, -1, 3, 6, 4, 1, -1, -2, -2, 0, 2, 2, 0, -2, -2, -1, 0, -1, -5, -7, -5, -1, 1, 5, -1, -2, 0, 2, 4, 2, -5, 0, -5, -2, 2, 1, 2, 0, -6, 6, 1, 0, 1, -2, -1, 4, 2, 2, -3, -3, 0, -1, -2, 0, 0, 1, -1, 0, 2, 0, 0, 6, 11, 2, -1, -1, 0, -3, -2, 3, 5, 0, -2, -1, 0, -1, 0, 0, -3, 1, -1, -1, -1, -2, -1, -3, -7, 1, 1, -2, -2, 1, 3, 1, -2, -1, 2, 0, -1, -1, 1, 0, 0, -4, 2, 3, -1, -2, -2, 0, 1,-11, -2, 4, 5, 6, 2, -1, -2, -6, -2, 1, -1, -3, -4, 1, 9, -3, 0, 3, 3, 2, -3, -3, 3, 1, 1, 0, 0, 1, -1, -2, 3, 2, 0, -3, -3, 0, -1, -1, 3, 1, -1, -3, 1, 2, -6, -4, 6, 0, -2, -5, -2, 0, -3, -2, 3, 2, 2, 1, -2, -2, 1, 2, -1, -1, 1, 1, -2, -1, 6, 7, -1, 1, 0, -4, -2, 1, -2, -3, 1, -4, 0, -3, -2, 2, 0, -3, 0, -3, 4, 3, 1, 8, 7, 0, -1, -3, 4, 1, -4, 2, 3, -2, -3, -3, 6, 1, -4, 1, 1, -1, -1, -2, 4, -3, -3, 3, 0, -1, -1, 1, 2, -4, 2, 4, -3, -1, 2, 3, -1, -4, 5, 4, -6, -3, 2 }; /* 6x16-entry codebook for inter-coded 8x8 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_inter_codebook_8x8)[6144] = { -4, -3, 4, 5, 2, 1, 1, 0, -5, -3, 5, 5, 2, 1, 0, 0, -6, -4, 5, 5, 2, 1, 0, 0, -7, -4, 4, 5, 2, 1, 0, 0, -8, -5, 3, 4, 2, 1, 0, 0, -8, -6, 3, 4, 1, 1, 1, 0, -8, -6, 2, 4, 2, 1, 1, 0, -8, -6, 2, 4, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3, -2, -3, -3, -3, -3, -3, -4, -3, -2, -2, -2, -2, -2, -3, -3, -2, 1, 1, 1, 1, 1, 0, -1, -1, 4, 5, 5, 5, 4, 3, 3, 2, 7, 7, 8, 8, 8, 7, 6, 5, 2, 1, 2, 4, 4, 0, -4, -6, 1, 1, 2, 5, 5, 1, -5, -7, 1, 2, 1, 4, 5, 1, -5, -8, 1, 1, 1, 5, 5, 0, -6, -8, 0, 1, 1, 5, 6, 1, -6, -9, 0, 0, 1, 4, 5, 0, -5, -8, 0, 0, 1, 4, 5, 0, -5, -7, 0, 0, 1, 4, 4, 1, -4, -7, 1, 2, 3, 0, -3, -4, -3, -1, 1, 3, 4, 0, -3, -4, -3, -1, 2, 4, 5, 1, -3, -4, -3, -2, 2, 5, 6, 1, -3, -5, -4, -2, 3, 6, 6, 1, -3, -5, -4, -2, 3, 6, 6, 1, -3, -5, -4, -2, 3, 6, 6, 1, -3, -5, -4, -2, 3, 5, 5, 1, -3, -4, -4, -2, 2, 2, 2, 2, 1, 0, 0, -1, 4, 4, 4, 3, 2, 1, 1, 0, 4, 5, 4, 4, 3, 3, 2, 1, 4, 4, 4, 4, 4, 3, 2, 2, 2, 3, 3, 3, 3, 3, 2, 1, -1, -1, -1, -1, 0, 0, 0, 0, -5, -6, -6, -5, -5, -4, -3, -3, -7, -9, -9, -8, -7, -6, -6, -5, 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 0, -1, -1, -1, -2, -2, -1, -1, -3, -5, -6, -6, -6, -6, -5, -4, -3, -5, -6, -7, -6, -6, -5, -4, -1, -2, -2, -2, -2, -2, -1, -1, 0, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, -2, -5, -4, 0, 2, 5, 2, 1, -2, -6, -5, 0, 3, 5, 2, 1, -2, -6, -6, -1, 3, 6, 3, 2, -2, -7, -6, 0, 4, 7, 2, 1, -2, -7, -5, 0, 5, 7, 2, 1, -2, -6, -5, 0, 4, 7, 2, 1, -2, -6, -4, 0, 4, 6, 1, 1, -2, -5, -4, 0, 3, 6, -10, -9, -6, -4, -1, 2, 3, 2,-10, -9, -5, -3, 0, 4, 4, 3, -9, -7, -3, -1, 2, 5, 5, 3, -7, -5, -2, 0, 3, 5, 5, 3, -6, -3, 0, 1, 4, 6, 5, 3, -4, -2, 1, 2, 3, 5, 4, 2, -2, 0, 1, 2, 2, 4, 3, 1, -1, 1, 2, 2, 2, 3, 3, 1, -4, -5, -5, -6, -6, -6, -6, -5, -3, -3, -4, -4, -4, -4, -4, -4, 0, 0, 0, 0, -1, -1, -1, -1, 5, 5, 6, 5, 5, 4, 3, 2, 5, 6, 7, 7, 7, 6, 5, 4, 3, 3, 4, 4, 4, 4, 3, 2, 0, -1, 0, 0, -1, -1, 0, -1, -3, -3, -4, -4, -4, -4, -3, -3, 1, -2, -5, 1, 5, 4, 2, 0, 1, -3, -6, 1, 6, 5, 2, 0, 0, -4, -7, 0, 6, 6, 2, 1, -1, -5, -9, -1, 6, 6, 3, 1, -1, -6,-10, -2, 6, 6, 3, 1, -1, -6, -9, -2, 5, 6, 3, 1, -2, -6, -9, -2, 5, 5, 3, 1, -2, -6, -7, -2, 4, 4, 2, 1, -5, -7, -8, -9, -9, -8, -7, -6, -5, -6, -6, -7, -7, -6, -6, -5, -3, -3, -3, -4, -5, -5, -4, -4, -1, 0, 0, -1, -1, -1, -1, -1, 0, 1, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 5, 5, 5, 4, 3, 4, 5, 6, 8, 8, 8, 7, 3, 4, 5, 6, 7, 7, 7, 6, 5, 6, 7, 8, 9, 10, 10, 9, 3, 4, 6, 7, 8, 9, 9, 8, 0, 1, 2, 3, 4, 5, 5, 5, -1, -2, -1, -1, 0, 1, 2, 2, -2, -3, -3, -3, -3, -2, -1, 0, -3, -4, -5, -5, -5, -5, -5, -4, -4, -5, -5, -6, -7, -7, -6, -5, -3, -4, -5, -6, -7, -7, -6, -6, 13, 7, 0, -3, -3, -4, -4, -5, 14, 7, 0, -3, -3, -4, -4, -4, 15, 8, -1, -4, -4, -4, -5, -4, 15, 8, -1, -4, -4, -5, -4, -3, 15, 7, -1, -4, -5, -5, -5, -4, 14, 7, -1, -4, -4, -4, -4, -3, 12, 6, -1, -4, -4, -4, -4, -3, 11, 5, -1, -4, -4, -4, -4, -3, -17, -4, 5, 4, 4, 4, 3, 3,-18, -5, 5, 4, 4, 4, 3, 3, -19, -5, 6, 4, 4, 4, 3, 2,-20, -5, 6, 4, 4, 4, 3, 3, -20, -4, 6, 4, 4, 5, 3, 3,-19, -5, 6, 4, 4, 5, 3, 3, -18, -4, 5, 4, 4, 4, 3, 2,-17, -5, 4, 3, 4, 4, 3, 3, -6, -6, -6, -4, -2, 1, 6, 11, -6, -7, -7, -4, -2, 2, 8, 13, -8, -8, -7, -4, -2, 3, 9, 14, -8, -8, -7, -5, -1, 4, 10, 16, -8, -8, -7, -5, -1, 4, 10, 17, -8, -8, -7, -4, 0, 5, 10, 16, -8, -8, -6, -3, 0, 4, 9, 15, -7, -7, -5, -3, 0, 4, 8, 12, 8, 7, 7, 5, 2, -2, -8,-14, 8, 8, 7, 5, 2, -2, -8,-15, 8, 8, 7, 5, 1, -3, -9,-16, 8, 8, 7, 5, 1, -3,-10,-17, 8, 9, 8, 5, 1, -3,-10,-17, 8, 8, 7, 4, 1, -4,-10,-16, 7, 7, 7, 4, 1, -3, -9,-14, 6, 7, 6, 3, 0, -3, -9,-13, 5, 1, -4, -4, -3, -1, 0, 0, 7, 2, -3, -3, -2, -1, 1, 0, 7, 1, -3, -3, -1, 0, 1, 1, 6, 1, -3, -2, -1, 1, 1, 0, 6, 0, -4, -2, -1, 0, 1, 0, 5, 0, -4, -3, -1, 0, 0, -1, 5, 0, -3, -1, 0, 0, 0, -2, 4, 1, -2, -1, 0, 1, 0, -1, 2, 2, 1, 1, -2, -6, -8, -8, 1, 1, 1, 1, -2, -5, -8, -8, 1, 1, 1, 0, -1, -3, -5, -5, 0, 0, 0, 0, -1, -1, -1, -2, 0, -1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 2, 3, 2, 2, 1, 1, 1, 2, 3, 4, 3, 3, 3, 3, 3, 4, 4, 5, 4, -4, -4, -3, -2, 0, 0, 1, 1, -4, -4, -3, -2, -1, 0, 0, 1, -2, -2, -2, -1, -1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 2, 2, 2, 2, 2, 2, 1, 1, 3, 4, 4, 4, 4, 4, 4, 3, 1, 1, 1, 3, 3, 4, 3, 3, -5, -6, -5, -4, -3, -3, -2, -2, -4, -2, -1, -1, -1, -1, 0, 1, -4, -2, -1, -1, -1, -1, 0, 1, -3, -2, -1, -1, -1, 0, 1, 2, -4, -3, -2, -1, -1, 1, 3, 3, -4, -3, -3, -1, -1, 1, 4, 5, -4, -3, -2, -2, -1, 1, 4, 7, -2, -2, -1, -1, 0, 2, 6, 8, -1, 0, 0, 1, 1, 4, 7, 8, -3, -3, -3, -2, -2, -1, -1, 0, -1, -1, 0, 1, 2, 2, 3, 3, 0, 1, 2, 4, 5, 6, 6, 5, -1, 0, 2, 3, 5, 6, 5, 3, -1, -1, 0, 2, 3, 3, 2, 1, -2, -2, -1, 0, -1, -3, -4, -4, 0, 0, -1, -1, -2, -4, -8, -7, 1, 2, 1, 0, -1, -4, -6, -7, -2, 4, 1, -6, 0, 3, 0, 0, -2, 5, 1, -7, 0, 3, 0, 0, -3, 5, 1, -8, 0, 3, -1, -1, -2, 6, 1, -9, 0, 3, 0, -1, -2, 6, 2, -8, 0, 4, 0, -1, -3, 5, 1, -7, 1, 4, 0, 0, -2, 4, 1, -7, 0, 4, 1, 0, -1, 4, 1, -6, 0, 3, 1, 0, 0, 0, 0, 3, 4, 5, 4, 1, 1, 1, 1, 2, 3, 3, 2, 0, 2, 2, 1, 2, 2, 1, -1, -2, 4, 3, 1, 1, 0, -1, -3, -5, 5, 3, 1, -1, -2, -3, -4, -6, 5, 3, 0, -2, -3, -5, -6, -7, 4, 3, 0, -2, -3, -4, -5, -5, 4, 3, 0, -1, -2, -2, -3, -3, 0, 0, 0, 0, -1, -5, -2, 6, 0, 0, 0, 1, -1, -6, -2, 8, 0, 0, 0, 2, 0, -6, -3, 9, 0, -1, 0, 2, 0, -7, -2, 10, 0, -1, 0, 2, -1, -8, -3, 10, 0, -1, -1, 2, -1, -7, -3, 9, 0, -1, 0, 1, -1, -6, -3, 8, 0, 0, 0, 1, 0, -5, -2, 7, 2, 3, 3, 2, 1, 0, -1, -1, 3, 4, 3, 2, 1, 0, -1, -2, 3, 4, 4, 2, 1, -1, -2, -3, 2, 3, 3, 2, 0, -1, -2, -3, -1, 0, 1, 1, 0, -1, -2, -2, -5, -4, -3, -1, 0, 1, 1, 1, -8, -8, -5, -1, 1, 3, 4, 3,-10, -9, -5, 0, 3, 5, 6, 5, -5, -1, 4, 5, 3, 1, 0, 0, -6, -1, 4, 5, 2, 0, -1, -2, -6, -1, 5, 4, 2, -1, -2, -2, -7, -1, 4, 4, 1, -2, -3, -3, -6, -1, 5, 4, 1, -2, -3, -3, -5, 0, 4, 4, 1, -1, -2, -2, -4, 0, 5, 4, 1, -1, -1, -2, -3, 1, 4, 3, 1, -1, -1, -2, -2, -3, -2, 1, 4, 6, 5, 3, -3, -4, -4, 0, 3, 5, 4, 2, -3, -5, -5, -1, 2, 4, 3, 1, -4, -6, -4, -1, 2, 4, 2, -1, -2, -4, -3, 1, 2, 4, 2, -1, -2, -4, -2, 1, 3, 3, 1, -2, -2, -3, -2, 1, 3, 3, 1, -2, -2, -2, -1, 1, 3, 3, 0, -2, -4, -4, -3, -2, -1, 2, 5, 7, -4, -4, -3, -3, -2, 1, 5, 7, -2, -3, -2, -3, -3, -1, 3, 5, -1, -1, 0, -2, -3, -2, 2, 4, 1, 1, 1, -1, -4, -3, 1, 3, 4, 3, 2, -1, -4, -3, -1, 1, 6, 4, 3, 0, -3, -3, -2, 0, 6, 5, 3, 1, -2, -3, -2, -1, 12, 11, 8, 4, 0, -2, -2, -1, 10, 9, 6, 2, -1, -2, -1, 0, 4, 3, 2, 0, -1, -1, 0, 1, -1, -1, -1, -1, -2, 0, 1, 2, -3, -5, -4, -2, -2, 0, 2, 3, -5, -5, -4, -2, -1, 0, 1, 2, -5, -5, -4, -2, -1, 0, 1, 1, -4, -4, -3, -2, -2, -1, 0, 0, 3, 3, 2, -1, -3, -4, -3, -2, 3, 2, 0, -2, -4, -4, -3, -2, 2, 2, 1, -1, -3, -5, -4, -3, 3, 3, 3, 1, -2, -3, -3, -3, 4, 4, 4, 3, 0, -2, -2, -2, 5, 5, 5, 3, 0, -1, -2, -2, 5, 5, 4, 2, -1, -2, -3, -2, 3, 3, 3, 0, -2, -4, -4, -4, -1, -1, 4, -2, -2, 6, 2, -5, -1, 0, 4, -2, -3, 6, 2, -6, -1, 0, 4, -2, -3, 7, 3, -7, -1, -1, 4, -3, -4, 8, 3, -7, 0, -1, 4, -3, -4, 7, 3, -6, -1, -1, 4, -3, -4, 7, 3, -6, -1, -1, 3, -3, -4, 6, 3, -6, -1, 0, 3, -2, -3, 6, 3, -5, 1, -2, -7, 2, 5, -2, -1, 1, 1, -2, -8, 3, 6, -3, -1, 2, 2, -2, -9, 4, 7, -4, -2, 2, 3, -1, -9, 5, 7, -4, -1, 3, 3, -1, -9, 4, 7, -4, -2, 2, 3, -1, -7, 4, 6, -4, -2, 1, 2, 0, -6, 4, 6, -4, -1, 1, 2, 0, -5, 3, 4, -3, -1, 1, -2, 2, 2, 0, 0, -1, -3, -4, -2, 2, 2, 1, 1, 0, -2, -4, -2, 2, 2, 2, 2, 1, -1, -2, -3, 2, 3, 3, 4, 2, 0, -2, -3, 2, 3, 2, 4, 2, 0, -3, -4, 1, 2, 1, 2, 1, -1, -3, -5, 0, 1, 0, 1, 1, -2, -3, -4, 0, 0, 0, 1, 0, -2, -3, 0, 0, -1, -2, -2, 2, 7, 8, 0, 0, -1, -3, -2, 1, 6, 7, 0, 1, -1, -3, -3, 0, 4, 5, 0, 1, 0, -1, -1, 0, 1, 3, 0, 2, 1, 1, 0, -1, 0, 1, -2, 0, 1, 2, 1, 0, -1, -1, -5, -2, 0, 1, 1, 0, -3, -3, -6, -4, -1, 1, 1, -1, -3, -4, -4, -2, 2, 5, 6, 4, 3, 2, -5, -3, 1, 4, 4, 2, 0, 0, -4, -2, 0, 2, 1, -1, -2, -2, -2, -1, 0, 1, 0, -2, -3, -2, -2, 0, 0, 0, -1, -1, -2, -1, -2, -1, -1, 0, 0, 0, 1, 2, -2, -2, -1, -1, 0, 1, 3, 4, -2, -3, -2, -1, 0, 2, 4, 5, 2, 1, -2, -2, -1, 0, 1, 0, 1, 0, -3, -3, -1, 0, 1, 0, 0, -1, -3, -3, -1, 1, 1, 1, 0, 0, -3, -1, 1, 2, 3, 3, 0, -1, -3, -1, 1, 3, 3, 3, -2, -2, -4, -2, 1, 3, 4, 4, -3, -3, -4, -2, 1, 3, 3, 4, -2, -3, -5, -2, 1, 2, 3, 3, 4, 5, 3, 4, 4, 4, 4, 5, 3, 3, 1, 0, 0, 0, 0, 1, 1, 1, -1, -2, -3, -4, -3, -2, 2, 2, 0, -2, -2, -4, -3, -2, 2, 3, 1, -1, -1, -3, -3, -2, 1, 2, 0, 0, -1, -2, -2, -1, 0, 1, 0, -1, -1, -3, -2, -1, 1, 1, 0, -1, -1, -2, -2, -2, -2, -1, -1, 0, 1, 2, 1, 0, 1, 2, 3, 5, 6, 5, 5, 3, 1, 2, 3, 4, 5, 5, 4, 3, -2, -2, -3, -3, -2, -1, 0, 0, -3, -3, -4, -5, -4, -3, -2, -1, -1, -1, -2, -2, -2, -1, 0, 0, 0, 1, 0, -1, -1, 0, 0, 1, -1, 0, -1, -2, -3, -2, -2, -1, 7, 7, 6, 5, 4, 2, -1, -2, 3, 3, 2, 2, 1, 0, -2, -3, 0, -1, -1, -1, 0, -1, -2, -2, -1, -3, -2, -1, 0, 0, 0, 1, 0, -2, -2, -1, -1, 1, 2, 2, 3, 1, -1, -1, -1, 1, 2, 2, 3, 1, -2, -3, -2, -1, 1, 2, 1, -2, -5, -6, -5, -3, -2, 0, 0, -1, -2, -3, -1, 0, -2, -2, 0, 0, -1, -1, 0, 1, -1, -2, 0, 0, -2, -1, 0, 0, 0, -2, -1, -2, -3, -3, -2, -1, -3, -3, -1, -2, -3, -3, -2, -2, -3, -4, 2, 2, 0, 0, 0, 0, -1, -2, 5, 5, 3, 2, 2, 2, 0, -1, 8, 8, 6, 5, 4, 4, 2, 1, -7, -8, -6, -3, -1, -1, -2, -1, -5, -5, -3, 0, 2, 1, 0, 0, -1, -1, 0, 3, 4, 3, 1, 1, 2, 1, 1, 3, 4, 3, 2, 2, 3, 2, 0, 2, 3, 2, 1, 2, 4, 2, -1, -1, 0, 1, 1, 1, 3, 2, -2, -3, -2, -1, 0, 1, 3, 1, -3, -4, -3, -2, 0, 1, -4, -2, -1, 2, 3, 3, 1, 0, -7, -5, -4, -2, 0, 0, -1, -2, -6, -5, -5, -4, -2, -2, -2, -3, -1, 0, -1, -1, 0, 0, 0, -1, 2, 3, 2, 2, 2, 2, 1, 0, 3, 5, 4, 3, 1, 0, 1, 0, 3, 4, 3, 2, 0, -1, -1, -1, 5, 5, 3, 1, 0, -1, -1, -1, 1, 1, 0, -1, -3, -5, -6, -4, 1, 1, 0, 0, 0, -3, -3, -1, 0, -1, -1, 0, 1, 0, 1, 3, -2, -2, -3, -1, 2, 2, 4, 7, -2, -2, -2, 0, 2, 2, 3, 6, -1, 0, 0, 1, 1, 0, 0, 3, 0, 3, 3, 3, 1, -2, -3, -1, 1, 3, 4, 3, 0, -3, -5, -4, 0, 2, 0, -1, -3, -4, -2, -2, 1, 4, 2, 0, -2, -3, -2, -1, 3, 6, 3, 1, -2, -2, 0, -1, 4, 7, 4, 1, -2, -3, -1, 0, 3, 6, 3, 0, -3, -3, -1, 0, 1, 3, 0, -1, -3, -2, 1, 1, 0, 1, -1, -2, -3, -1, 2, 2, -2, -1, -3, -3, -3, -1, 1, 2, 3, 1, -1, 0, 1, 0, 0, 0, 2, -1, -2, -1, 1, 0, -1, -1, 1, -1, -2, 0, 1, 0, -2, -3, 0, -2, -1, 1, 3, 1, -3, -5, 0, -2, -1, 2, 5, 2, -3, -5, 0, -2, -1, 4, 6, 3, -2, -5, 0, -2, 0, 4, 7, 4, -2, -4, 0, -2, 0, 4, 6, 4, -2, -4, -2, -2, -3, -4, -3, -2, -1, 0, 1, 1, 0, -1, -1, -1, 0, 1, 3, 3, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 0, 0, 1, 0, 0, 0, 0, -1, -1, -1, -1, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -2, -3, -2, -1, -1, 0, 3, 4, 4, 5, 5, 6, 6, 7, -1, -2, 7, -2, -4, -1, -1, 0, -1, -2, 9, -1, -4, -1, -1, 0, -1, -3, 10, -1, -4, -1, -1, 1, -1, -3, 10, -2, -3, -1, -1, 2, -1, -2, 10, -2, -4, -1, -1, 2, -1, -2, 9, -2, -4, -1, -1, 2, -1, -2, 8, -2, -4, 0, -1, 1, 0, -2, 7, -2, -3, -1, 0, 2, 3, -4, 1, 3, -3, -2, 1, 0, 3, -5, 1, 4, -3, -2, 1, 0, 3, -6, 2, 5, -3, -1, 3, 0, 3, -6, 2, 5, -3, -1, 2, 0, 3, -6, 1, 5, -4, -2, 3, 0, 3, -6, 1, 5, -3, -2, 2, 0, 2, -6, 1, 4, -3, -1, 1, 0, 2, -6, 1, 4, -2, -1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 2, 0, -1, 1, 1, 1, 0, 0, 2, 0, -1, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, -1, -2, -1, 3, 1, 1, 0, 0, -2, -4, -3, 5, 3, 2, 1, 0, -3, -5, -4, 5, 4, 2, 0, -1, -4, -5, -5, 1, 0, -1, -2, -2, -3, -6, -9, 2, 0, -1, -1, 0, 0, -3, -6, 1, 0, 0, -1, 0, 0, -2, -5, 2, 1, 1, 1, 1, 2, -1, -3, 1, 1, 2, 1, 2, 2, 1, -1, 1, 1, 2, 1, 1, 1, 1, 1, 0, 0, 2, 1, 0, 0, 2, 2, 0, 1, 2, 2, 0, 0, 2, 2, -4, -3, 0, 1, 4, 6, 4, 3, -3, -2, 0, 0, 2, 4, 1, 0, -1, -1, 0, 0, 1, 1, -2, -3, 1, 1, 1, 0, 1, 1, -3, -5, 1, 1, 1, 0, 1, 1, -3, -5, -1, 0, 0, -1, 1, 1, -2, -4, -1, 0, 0, -1, 1, 2, 0, -2, -1, 0, 0, 0, 2, 3, 1, 0, -1, 0, 3, 4, 0, -4, -5, -5, 0, 0, 4, 5, 2, -2, -3, -2, 0, -1, 2, 4, 2, -1, -1, 0, 0, -2, -1, 1, 0, -2, 0, 1, 1, -2, -2, 0, 0, -1, -1, 1, 1, -2, -3, 0, 1, 0, -1, 0, 1, -2, -2, 1, 3, 1, 0, 0, 1, -2, -1, 2, 4, 2, 0, 0, 1, 2, 3, 2, 0, 2, 2, 1, -1, 0, 1, 0, -3, 1, 1, 1, -1, 0, 0, -2, -4, 0, 2, 1, -1, 2, 2, -1, -5, 0, 2, 1, -1, 3, 4, -1, -5, 0, 2, 1, -2, 2, 4, 0, -4, -1, 0, 0, -4, 0, 2, 0, -4, -2, 0, 0, -5, -1, 2, 1, -2, 1, 3, 2, 1, 0, 1, 0, 1, 2, -1, -2, 2, 0, -1, -2, 1, 3, 0, -1, 3, 0, -2, -4, 0, 3, 1, 0, 5, 1, -3, -5, -2, 2, 1, 1, 6, 1, -2, -5, -2, 1, 0, 1, 5, 1, -1, -5, -2, 0, -1, 0, 3, 0, -2, -4, -2, 0, -1, 0, 1, -1, 0, -2, 0, 1, 0, 1, 1, 1, 2, 3, 2, 1, 1, 2, -1, -1, 0, 1, 1, 0, 1, 1, -4, -3, 0, 0, 1, 1, 1, 2, -4, -3, 0, 2, 2, 2, 3, 2, -5, -4, 0, 1, 1, 1, 1, 2, -5, -4, -1, -1, -2, -2, -1, 0, -3, -2, 0, 0, -2, -3, -2, -1, 2, 3, 4, 4, 2, 0, 0, 0, -4, -2, 0, 1, 0, 0, 0, 0, -3, -1, 1, 1, 0, 0, 0, 0, -2, 0, 2, 2, 0, 0, 0, 2, -1, 1, 2, 1, -1, 0, 3, 5, 0, 2, 1, -1, -2, 0, 5, 6, 0, 1, 0, -3, -3, 0, 4, 6, 1, 1, -2, -4, -4, -3, 1, 2, 1, 0, -2, -4, -5, -4, -2, 0, -1, -3, -3, -3, -3, -2, -1, -1, 3, 2, 1, 0, 0, 1, 1, 1, 5, 4, 3, 2, 1, 1, 2, 2, 2, 1, 0, -2, -2, -2, -1, -1, 0, 0, 0, -1, -2, -2, -2, -2, 0, 1, 3, 3, 2, 1, -1, -1, 0, 1, 3, 4, 3, 2, 1, -1, -4, -3, -1, 1, 0, -2, -3, -3, -3, -4, -7, -8, -7, -4, -1, 2, 0, -1, -3, -4, -4, -2, 0, 2, 1, 0, 0, -1, -3, -2, 0, 2, 2, 1, 1, 0, -1, -1, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 0, 0, 1, 3, 4, 4, 3, 2, 3, 3, 3, 0, -1, 0, 1, 2, 1, 1, 1, -1, -2, -1, -1, 1, -2, -2, -1, -3, -3, -2, -2, 0, -4, -4, -2, -2, -2, -2, -3, 0, -4, -4, -1, 1, 1, 0, -1, 2, -3, -1, 2, 3, 4, 3, 3, 5, -2, 0, 2, 3, 3, 3, 3, 3, -2, -2, 0, 0, 0, 0, 0, 1, 0, 2, 1, -1, -3, -1, 3, -2, -1, 0, -1, -1, -3, 0, 4, -2, -2, -2, -2, -2, -2, 1, 5, -2, -3, -2, -3, -1, -2, 1, 4, -3, -2, 0, -1, 0, -1, 0, 3, -5, 1, 2, 1, 2, 0, 0, 2, -5, 2, 4, 2, 3, 1, 1, 3, -3, 1, 2, 1, 1, 0, 1, 4, -2, 4, -3, -4, -1, 3, 3, 1, 3, 4, -4, -4, -1, 3, 2, 0, 2, 4, -3, -4, 0, 2, 2, -1, 1, 4, -3, -2, 1, 2, 1, -2, 0, 2, -4, -2, 1, 2, 0, -3, 0, 2, -3, -2, 0, 1, 0, -2, 2, 3, -1, -1, 0, 0, 0, 0, 3, 2, -2, -2, -2, -1, -1, -1, 2, 2, 2, 3, 4, 3, 1, 0, -1, 1, 0, 1, 2, 1, -1, -2, -2, 2, 1, 2, 1, 1, 0, -1, -1, 4, 3, 4, 3, 2, 1, 1, 1, 3, 2, 2, 2, 1, 1, 1, 1, -1, -2, -1, 0, -1, -1, -1, -1, -3, -3, -2, -1, -2, -2, -2, -2, -4, -4, -3, -3, -4, -4, -3, -3, 2, 1, -1, -3, -4, -2, 3, 4, 2, 2, 1, -1, -3, -2, 1, 2, 1, 2, 3, 3, 0, -2, -1, -2, -1, 0, 2, 4, 2, 0, -1, -3, -2, -2, 0, 3, 3, 2, 0, -3, 0, -2, -3, -1, 1, 2, 2, -1, 3, -1, -4, -5, -3, 0, 2, 0, 6, 3, -2, -6, -5, 0, 3, 1, -2, 3, -2, 0, 3, -2, -2, 1, -3, 4, -3, 0, 3, -2, -1, 2, -3, 5, -3, 0, 4, -2, -1, 2, -2, 4, -4, -1, 3, -3, -2, 2, -3, 4, -3, 0, 3, -3, -1, 2, -2, 5, -2, 0, 3, -3, -1, 2, -2, 4, -3, 1, 3, -2, -1, 2, -2, 3, -2, 1, 3, -2, 0, 2, 1, 0, 0, -1, 1, 2, -4, -1, 2, 0, 0, -1, 1, 2, -4, -2, 1, 1, 1, -1, 2, 4, -2, 0, 0, -1, 1, -1, 2, 5, -1, 1, 0, -1, 0, -2, 1, 5, -1, 1, 0, -1, -1, -2, 0, 3, -3, -1, 1, 1, 0, -2, 0, 3, -3, -1, 1, 1, 0, -3, 0, 3, -2, 0, 1, 0, -1, 1, 1, 2, 4, 5, 1, 0, -1, 1, 1, 1, 5, 7, 0, 0, -2, -1, -1, 0, 3, 5, 0, -1, -2, -1, -1, -1, 2, 3, 0, -1, -3, -1, -1, -1, 1, 2, -1, -2, -4, -2, -2, -2, 0, 0, -1, -2, -2, -1, -2, -2, 0, 0, 0, -1, -1, 0, -1, -1, 0, 0, 3, 3, 0, -1, -1, 1, 4, 4, 2, 3, 0, -2, -2, 0, 1, 1, 2, 3, 1, -1, -1, 0, 1, 0, 1, 2, 0, -1, -1, -1, 0, -2, 0, 1, 0, -1, -2, -1, 0, -2, 0, 1, 0, -1, -2, -1, 1, 0, 1, 1, -1, -3, -4, -3, 1, 3, 1, 2, -1, -3, -5, -4, 1, 3, -3, -2, 0, 1, 1, 1, 0, -2, 0, 1, 1, 1, 0, 0, -1, -3, 1, 2, 1, 1, 0, -1, -1, -2, 0, -1, -3, -1, -1, -1, 0, -1, 0, -3, -6, -3, -2, -1, 1, 1, 2, -1, -4, -3, -2, 0, 2, 2, 5, 4, 1, 1, 0, 1, 3, 2, 5, 4, 2, 1, 0, -1, 0, 1, -2, 0, -2, -5, -6, -3, 0, 0, -2, 0, 1, 0, -1, 1, 2, 2, -2, 0, 1, 3, 2, 2, 2, 1, -2, 0, 2, 4, 3, 2, 1, 1, -2, 0, 2, 3, 2, 0, -1, 0, -3, -1, 1, 1, 0, -1, -1, 1, -4, -1, 1, 0, -1, -2, 0, 2, -4, -1, 0, -1, -1, -2, 1, 4, -3, 0, 0, -1, 1, 1, 1, 0, -3, 1, 0, -1, 0, 0, -1, -1, -1, 3, 3, 0, 1, 0, 0, 1, -3, 2, 2, -2, -1, 0, 0, 1, -5, 0, 0, -2, -1, 1, 0, 2, -7, -2, 1, 0, 1, 2, 2, 2, -5, 0, 3, 2, 3, 3, 2, 2, -3, 2, 4, 1, 0, 0, -2, -3, 5, 2, -2, -2, 0, -1, -1, -1, 2, -1, -4, -3, -1, -2, -1, -1, 0, -2, -2, 1, 2, -1, 0, 1, -1, -2, -1, 3, 3, -1, 0, 2, 1, 0, 0, 3, 3, -2, -1, 2, 2, 1, 1, 3, 2, -2, -2, 0, 1, 0, -1, 1, 1, -3, -3, -2, 1, 0, 1, 2, 3, 0, 0, 0, -4, -5, -3, 0, 1, -1, -2, -1, -2, -3, -1, 1, 2, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1, 3, 4, 3, 1, 0, -2, -1, -1, 3, 3, 2, 0, -2, -3, -3, -2, 1, 1, 0, -1, -2, -4, -2, -2, 2, 1, 0, 0, 0, -1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 3, 0, 0, 0, -1, -2, -1, 1, 0, -2, -1, -1, -2, -3, -2, 0, 0, -1, 0, 0, -1, -2, 0, 1, 1, 1, 1, 0, -1, -1, 1, 3, 1, 2, 2, 0, -2, -1, 2, 3, 0, 3, 1, -1, -1, 1, 4, 2, -2, 2, 0, -3, -1, 3, 5, 0, -5, 1, -1, -2, 0, 3, 3, -1, -6, -1, 0, 3, 4, 2, 0, 1, 2, -2, -1, 0, 1, -1, -2, 0, 1, -2, -3, -2, -3, -6, -7, -6, -3, 2, 2, 3, 1, -1, -2, -3, -2, 2, 2, 3, 1, 0, 0, 0, 0, 2, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 2, 1, 0, -1, 0, 0, 2, 2, 1, 1, 1, 3, 1, -1, -1, -1, 1, -2, -1, 0, 0, -2, -2, -1, 2, -2, -2, 1, 1, 1, 0, 1, 3, -2, -2, 0, -1, 0, -1, 0, 2, 0, 0, 1, 0, -1, -1, -2, 1, 3, 2, 2, 1, 0, -2, -2, 1, 5, 3, 3, 2, 1, 1, 1, 4, 0, -3, -4, -5, -4, -3, -1, 1, -6, -4, -1, 2, 2, 0, 0, -1, -4, -2, 1, 3, 3, 2, 2, 0, -3, -2, -1, 2, 3, 3, 2, 0, -3, -2, -2, 1, 2, 1, 1, -1, -2, -2, -2, 0, 2, 2, 1, -1, -1, -1, -1, 1, 2, 3, 2, 0, -1, -1, -2, 1, 2, 2, 2, -1, 0, -1, -2, 0, 2, 1, 0, -1, 6, 4, 2, 1, 0, 0, 0, 1, 4, 2, -1, -2, -2, -2, -1, -1, 2, 1, -1, -2, -2, -2, -2, -1, 2, 2, 0, -2, -2, -2, -1, 0, 0, 0, -1, -2, -2, -1, 0, 1, -3, -3, -2, -1, -1, -2, -1, 0, -3, -2, 2, 3, 2, 0, -1, -2, -2, 0, 4, 5, 5, 2, 0, -1, 5, 4, 2, 0, -1, -2, -1, -1, 4, 3, 2, 1, 0, -1, 0, -1, 1, 1, 0, 1, 1, 0, 1, -1, -2, -1, -1, 0, 0, -2, -2, -3, -1, 0, 0, 0, -1, -3, -3, -5, 0, 1, 1, -1, -1, -2, -2, -3, -1, -1, -1, -2, -1, 1, 3, 1, -1, -2, -2, -1, 2, 5, 6, 5, -3, -3, -2, 1, 1, -2, -1, -1, 1, 2, 3, 4, 1, -3, -1, -3, 3, 2, 0, 1, -1, -3, -1, -3, 1, 0, -1, 0, -1, -1, 1, 0, 1, 1, 0, 1, 2, 2, 5, 3, 1, 1, 1, 2, 2, 2, 3, 0, -3, -1, -2, -2, -3, -3, -1, -3, -1, 1, 1, 0, -1, -1, 0, -2, 2, 0, -2, -2, 2, 4, 1, -2, 1, 0, -2, -1, 3, 5, 2, -1, -1, -2, -3, -2, 1, 3, 1, -2, -1, -2, -1, -1, 0, 2, 1, -1, 0, 0, 1, 1, 1, 2, 2, 0, 0, 1, 4, 4, 2, 2, 3, 1, -2, -1, 2, 1, -2, -3, -2, -3, -1, 0, 1, 0, -3, -4, -4, -5, 4, 0, -3, -4, -4, -4, -2, -1, 5, 0, -1, 0, -1, -3, -2, -1, 4, 0, 0, 1, 1, 0, 0, 0, 0, -3, -2, -1, 0, 0, 1, 0, 0, -2, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 1, 1, 1, 0, 2, 0, -1, -1, 1, 1, 1, 0, 1, -1, -2, -2, 0, 2, 2, 2, -3, -5, -2, 0, -1, -3, -3, 0, 0, -2, 0, 2, 2, 0, 0, 3, 2, -1, -2, 0, 0, -1, -1, 2, 5, 2, -1, -1, -1, -1, -1, 2, 5, 2, 0, -1, -1, 0, -1, 2, 2, 1, 0, 0, 0, 1, 0, 2, -1, -1, 1, 1, 2, 2, 1, 2, -3, -2, 0, 0, 0, 0, -2, -1, 0, 3, 2, 0, -2, -3, -3, -3, 0, 3, 3, 1, 0, 0, 1, 2, -1, 0, -1, -2, -1, -1, 1, 3, -1, 0, -1, -2, -1, -1, 0, 2, -1, 0, -1, -2, 0, 0, -1, 2, -1, 0, -1, -2, -1, -1, -2, 1, 0, 1, 0, -3, -1, -1, -1, 2, 5, 5, 2, -1, -1, -1, 1, 3, 0, 0, 1, -1, -3, -2, 0, 2, 1, 1, 3, 0, -2, -2, 0, 1, 1, 1, 3, 1, 0, 0, -1, -1, 0, -1, 2, 1, 1, 0, -1, -3, -1, -2, 1, 1, 1, 0, -2, -4, -1, 0, 2, 1, 1, 0, -1, -3, 1, 1, 3, 2, 1, 0, -2, -3, 2, 2, 4, 2, 1, -1, -2, -4, 1, 2, 2, 2, 0, -2, 0, 2, -1, -1, -2, -3, -4, -5, -3, 1, 0, 1, 1, 0, -1, -1, -1, 1, 0, 1, 1, 1, 0, 0, 0, 2, 0, 1, 1, 2, 1, 1, 1, 2, -1, -1, 0, 2, 2, 2, 2, 3, -2, -4, -4, -1, -2, -2, -2, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, -1, -3, -2, 0, 2, 2, 1, 0, -1, -2, -3, 0, 1, 1, 2, 1, 0, -2, -3, -1, 0, 0, 1, -1, 0, -1, -2, 0, 0, -1, 0, -1, 1, 1, 0, 2, 2, 0, 0, 0, 2, 3, 1, 3, 5, 3, 2, -1, 1, 1, -2, 0, 3, 1, 1, -1, 0, 0, -4, -4, -1, -1, -1, -1, 1, 1, 0, 1, 2, 1, 2, -3, 0, 1, 0, 1, 1, 0, 2, -5, -3, -1, -1, 0, 1, 0, 1, -4, -3, -2, -3, -2, -1, -1, 0, 0, 0, -1, -2, -2, -2, -2, 0, 3, 4, 2, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, -1, 0, 0, 1, 2, 3, 4, 4, 3, 2, -1, 4, 7, 4, 0, 0, 0, 0, -1, 4, 6, 3, 0, 1, 1, 1, 0, 3, 4, 0, -1, 0, 0, 1, 0, 1, 1, -2, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, -1, -3, -3, 0, 1, -1, -2, -1, -3, -4, -4, -2, -1, -2, -2, -1, 2, 2, 1, 0, 1, 1, 0, -3, -2, -1, 0, 0, 1, 1, 0, -3, -2, -1, 0, 1, 2, 1, 1, -2, 1, 2, 2, 2, 3, 3, 2, -1, 1, 2, 1, 0, 1, 1, 2, -1, 0, 1, -2, -4, -2, 0, 1, -1, 1, 1, -1, -3, -2, 0, -1, -3, 1, 2, 0, -1, 0, 1, -1, -4, -1, -1, -2, -2, 0, 3, 4, 3, 1, 1, -1, -3, -2, 0, 0, 0, 2, 2, 2, 2, 2, 1, -1, -1, 1, 1, 1, 3, 3, 0, -2, -2, 0, -1, -1, -1, 0, -2, -1, -1, -1, -3, -4, -3, -2, -2, 0, 2, -1, -1, 0, 1, 2, 2, 3, 5, -2, -1, -1, 0, 0, 0, 0, 1, -2, -3, 2, 0, 0, 1, 1, -1, -1, -4, 1, -2, -1, 2, 2, 0, 1, -4, 0, -2, -2, 1, 1, -1, 2, -3, 1, -1, -1, 1, 1, -1, 3, -2, 3, 1, 0, 1, 1, -1, 1, -3, 2, 1, 0, 1, 0, -1, -1, -5, 1, 0, -1, 0, 1, 1, 0, -3, 3, 3, 1, 2, 3, 3, 0, -1, -2, 1, 5, 5, 2, -1, 1, -1, -2, -1, 1, 1, -2, -5, 1, 1, -1, -2, -1, -1, -1, -3, 1, 1, -1, -1, -1, 2, 4, 3, -1, -1, -1, -1, -1, 0, 4, 3, -1, -1, 0, 1, -1, -3, -1, -1, 0, 0, 0, 2, 2, 0, 0, -1, 0, -2, -3, 0, 1, 1, 3, 2, 2, 3, 2, 1, 0, 0, -2, -2, 2, 3, 0, 1, 1, 3, 3, 2, 0, 0, -3, -1, -1, 2, 2, 3, -2, -2, -3, 1, 1, 2, 1, 1, -2, -1, -2, 2, 1, 1, -1, -2, 0, 1, 0, 2, 0, 0, -2, -2, 0, 1, 0, 2, 0, 0, -2, -2, -3, -2, -2, 0, -1, -2, -2, -3, 0, 1, -1, 3, -1, 1, 3, -1, 0, 1, -1, 3, -1, -1, 2, -3, 1, 1, -2, 3, -1, -3, 0, -3, 2, 2, -2, 3, 0, -2, 1, -2, 1, 1, -3, 3, -1, -2, 1, -3, 1, 1, -3, 3, 0, -1, 1, -2, 1, 2, -1, 4, 0, -1, 1, -2, 0, 1, -1, 3, -1, -3, 0, -3, -3, -3, -1, 1, 2, 1, -1, -2, -2, -2, 0, 2, 1, 0, -2, -2, -3, -2, 1, 2, 1, -1, -2, -1, -3, -2, 2, 4, 0, -2, -2, 1, -3, -1, 2, 4, 0, -2, -2, 2, -1, 1, 4, 3, -1, -3, -2, 2, 0, 2, 4, 2, -1, -2, -1, 2, 0, 1, 2, 0, -1, 0, 1, 3, 3, 0, -5, 1, 4, 0, 0, 1, 1, -2, -5, 2, 5, -1, -2, 1, -1, 0, 0, 3, 3, 1, 0, -1, -2, 3, 4, -2, -3, -1, 0, -2, -3, 3, 5, -3, -3, 0, 0, -2, -1, 3, 2, -2, -2, 2, 2, -1, 2, 0, 0, -1, 0, 0, 0, 0, 0, -3, -2, 1, 3, 0, -2, -2 }; /* list of codebooks for inter-coded vectors */ const int8_t* const ff_svq1_inter_codebooks[6] = { svq1_inter_codebook_4x2, svq1_inter_codebook_4x4, svq1_inter_codebook_8x4, svq1_inter_codebook_8x8, NULL, NULL, }; /* 6x16-entry codebook for intra-coded 4x2 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_4x2)[768] = { 12, 13, 13, 11, -7,-10,-15,-17,-16,-15,-12,-10, 11, 15, 15, 12, 2, 17, 20, 15,-45,-24, 2, 13, 21, 20, -6,-36, 12, 16, -1,-27, -18,-21, 10, 45,-11,-20, -7, 21, 43, -8,-28, 0, 33,-16,-28, 3, -12,-18,-18, -6,-20,-10, 28, 55, -5,-18,-21,-18, 56, 30, -6,-20, -34, 27, 29,-22,-30, 29, 26,-25, 30, 34, 33, 26,-25,-31,-35,-33, -31,-35,-36,-32, 29, 36, 37, 31,-71,-12, 38, 34,-63, -1, 42, 33, 58, 37,-31,-60, 55, 34,-33,-61,-57,-57, 22, 93,-57,-58, 21, 93, 59, 69, 70, 62,-63,-68,-68,-60,-64,-71,-71,-64, 63, 73, 72, 62, -2, 0, 7, 15,-11,-10, -3, 5, -5, -8,-10,-10, 1, 9, 14, 9, 15, 8, -4,-11, 12, 2,-11,-12, -8, 0, 19, 28, 4, -1,-15,-26, -15, 27, 2,-14,-14, 22, 1, -9, -4, -6,-13,-10, -6,-14, 6, 47, -35,-20, 6, 23, 6, 9, 6, 4, -6, 2, 23,-22, -7, 4, 28,-21, 20,-22, -2, 6, 22,-28, -5, 8,-10,-18,-16,-12, 36, 19, 2, -1, -3, 0, 4, 8,-45,-10, 23, 23, 40, 15,-20,-35, -4, -1, 4, 1, 9, -5,-33, 24, 8, 3,-26, 19, -1, 4, 6, -3, 32, 25,-13,-49, 24, 24, 15, 7,-17,-27,-19, -7,-47, 0, 39, 24,-21, -6, 7, 4, -1, 0,-10,-13, 1, 1, 5, 16, 20, 5, -3, -9, -1, -4, -2, -6, -17, -7, 1, 4, 12, 7, 0, 0, 3, 0, 12, 11, -3, 1, 0,-23, 4, 17, -6, 0, 6, 3,-25, 0,-17, 10, 8, 5,-14, 4, 1, 4, 13, 10, 4, 2,-23, -9, 1, 2, 3, -3, 1, 7, 1,-23, -7, 20, -7,-18, 2, 12, -5, -4, 10, 9, 4, 10, 7,-24, 6, 3, 4,-10, 22,-14,-22, 6, 0, 5, 5, -1, -4, 3,-11, -4, -7, 31, 7,-14, -5,-16, -1, 42, -4, -2, -9, -5, 5, -8, -6, -3, 42, -4,-21, -5, -18, 12, 20,-12, 13,-13,-10, 7, -8, -9, -2,-18,-16, 6, 40, 8, 10, -1, 0, 4, -3, 4, -1,-13, -2, 6, 1,-15, 5, 3, 1, 2, -4, -2, 1, 3, 15, 0, -9, -4, -3, -4, -4, -4, -3, 5, 16, -3, 2, 13, 3, 4, -3, -8,-10, 0, -6, -2, -4, -1, -2, -3, -6, 23, 6, -6, 7, 1, 4,-18, 5, 1, -1, 1,-15, 14, -5, 6, -4, 4, 2, 2, 2, 6,-24, 2, 7, 3,-26, 0, 3, 3, 5, 7, 1, 6, 14, -2,-18, -3, 7, 5, -4, 2, -6, 3, 32, 1, -6, -6, -6,-12, 5,-36, 7, 6, 9, -1, 11, 0, 4, 4, 5, 3, 4, 15, 3,-38, 10, 23, -5,-42, 0, 4, 4, 4, 23, 17, -6,-13,-13,-37, 1, 29, 5,-14, -1, 1, 5, 0, 3, 1, 0, 4, -5, 2, 8, 0, 0,-10, 4, 7, -2, -3,-10, 3, 1, 1,-12, -1, 13, 3, 0, -1, 1, -3, 0, -1, 3, 1, -6, -9, 3, 9, -6, 1, -4, -6, 8, -1, 0, 8, -3, -3, 0, 18, -5, -1, -4, -1, -8, -2, 3, -4, 0, 17, -1, -5, 5, -2, 9,-10, 1, -5, 6, -5, 4, 2, 2, 3, 10,-14, -8, 1, -1, -2,-18, -1, -1, 20, 1, 2, -1, 1, -9, 1, -1, -9, 22, -4, 6, -4, 8, -3, -1, 7,-19, 5, -7, 31, -4, -4, -6, 0, -5, -5, -7, -8,-19, -4, 1, 1, 4, 32, 38, -1, -8, 4, -7, -8, -6,-12, -1, 0, -7, 1, -1, 9, -1, 0, 9, -1, -1, 0, 2, -6, 1, -3, -12, 0, 2, 1, 1, 1, 8, 0, 9, 1, 0, 2, -2, 1,-11, 0, 0, 8, 2,-10, -1, 2, -1, 0, -2, -4, 0, -5, -2, -1, -1, 14, -3, 7, -1, 5, 0,-10, 1, 1, -1, -5, 14, -1, -2, 1, -3, -2, -6, 0, 0, 6, 2, 3, -9, 4, 4, -5, -1, -1, -7, 3, 8, -1, 2, -4, -1,-11, 11, 2, 1, 0, -1, 2, 3, 9, 0, 2, 0,-15, 3, 5,-20, 3, 3, -1, 3, 3, 1, -1, 16, 1, 2,-29, 9, 2, -13, -6, -1, -3, 36, -1, -8, -3, 2, 5, 4, 2,-37, 9, 11, 3 }; /* 6x16-entry codebook for intra-coded 4x4 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_4x4)[1536] = { -11, -3, 3, 6,-10, -1, 5, 7, -9, -1, 6, 7, -9, -1, 4, 6, 5, 7, 0,-14, 6, 9, 2,-15, 6, 9, 2,-15, 4, 6, 0,-14, 16, 3, -5, -6, 16, 1, -8, -8, 14, -1, -9, -9, 12, 0, -8, -8, 8, 12, 16, 17, -2, 2, 6, 9,-10, -8, -4, 0,-15,-14,-11, -7, -7,-10, -2, 16, -7,-11, -3, 18, -7,-11, -1, 20, -6, -8, 1, 19, -9,-13,-16,-17, 2, -2, -7, -9, 11, 8, 4, -1, 16, 15, 11, 7, -22, -2, 13, 15,-24, -2, 14, 16,-25, -4, 13, 15,-25, -6, 10, 13, 26, 26, 22, 16, 17, 15, 9, 3, -2, -6,-11,-14,-20,-25,-28,-28, -27,-27,-25,-21,-16,-15,-11, -7, 3, 8, 12, 13, 23, 28, 31, 30, 20, 16, -7,-33, 22, 19, -6,-35, 22, 19, -6,-34, 20, 17, -6,-32, -20,-20, 2, 38,-21,-22, 2, 40,-21,-22, 2, 40,-20,-20, 3, 38, -47, -4, 24, 26,-50, -3, 26, 27,-50, -3, 26, 27,-47, -4, 24, 26, 45, 6,-23,-27, 48, 5,-25,-28, 48, 5,-26,-28, 44, 6,-24,-27, -30,-36,-10, 76,-31,-37,-11, 78,-31,-37,-11, 78,-31,-36,-10, 77, -53,-32, 35, 52,-54,-34, 36, 52,-54,-34, 36, 52,-53,-33, 34, 51, -93,-34, 62, 65,-93,-34, 62, 66,-93,-34, 62, 65,-93,-34, 60, 64, -7, 0, 2, 2, -8, -1, 3, 3, -8, 0, 4, 5, -6, 1, 5, 5, 3, 7, 11, 11, 2, 2, 3, 3, 1, -2, -6, -7, 1, -5,-11,-13, 3, -2, -4, -3, 7, 0, -5, -5, 12, 4, -5, -7, 14, 6, -4, -7, 18, 14, 3, -2, 6, 4, 0, -3, -8, -5, -2, 0,-16,-11, -2, 2, -8, -6, 7, 18, -7, -8, 2, 13, -4, -6, -2, 6, 0, -4, -3, 1, 1, -3,-13,-18, 0, -1, -5, -7, -1, 1, 6, 7, -2, 4, 15, 17, -15,-14, -7, -2, -6, -5, -1, 0, 6, 6, 3, 1, 15, 13, 6, 1, 2, -2,-11, 10, 2, -1,-12, 11, 3, -1,-12, 11, 2, -2,-11, 11, -9, 14, -1, -5, -9, 15, -2, -5, -8, 16, -2, -5, -7, 15, -1, -4, 2, 6, 8, 8, -2, 3, 9, 12,-11, -5, 4, 10,-19,-16, -8, 0, 14, 8, -7,-15, 12, 7, -7,-14, 8, 5, -4, -9, 5, 3, -1, -4, 12,-14, -2, 2, 13,-15, -1, 3, 14,-15, -1, 3, 13,-14, -1, 3, 0, 6, 10,-13, 0, 6, 10,-15, 0, 7, 9,-17, 1, 6, 8,-16, -8, -5, 15, -2, -8, -6, 17, -2, -8, -6, 16, -3, -8, -5, 15, -2, -9,-11,-11,-10, 9, 10, 9, 8, 8, 10, 10, 9, -8, -9, -8, -7, 9, 10, 9, 7, -8,-10,-10,-10, -7,-10,-11,-11, 11, 12, 11, 8, 0, 10, 7, 0, 0, 7, 0, -6, 0, 2, -5, -6, -2, -1, -4, -1, 5, 0, -6, -9, 2, 2, 2, 1, -2, 0, 5, 7, -6, -5, 1, 4, 3, -8, 2, -1, 4, -9, 3, 0, 5, -7, 3, 0, 7, -5, 3, 0, -5, -3, 2, 9, -6, -3, 1, 8, -6, -3, 1, 7, -5, -2, 0, 4, 13, 8, 3, 1, -3, -5, -4, -1, -8, -7, -3, 0, -1, 1, 3, 2, 3, 2, -5,-12, 4, 3, -2, -9, 3, 4, 1, -4, 3, 5, 4, -1, -9, -8, -4, 0, 8, 6, 2, 0, 10, 8, 3, 0, -6, -5, -3, -1, -3, -9,-12, -5, 0, -3, -5, 0, 2, 3, 2, 4, 5, 8, 7, 6, -1, -2, 5, 12, -1, -1, 5, 9, 2, 1, -1, -2, 2, -1,-11,-17, -7, 3, 3, -1, -9, 3, 4, -1,-10, 4, 6, -1, -9, 5, 7, 0, -18, -7, 2, 2, -8, 1, 5, 3, 3, 4, 1, 0, 9, 5, -2, -3, -2, 0, 6, 8, -4, -5, -5, -3, 1, -2, -6, -8, 10, 9, 3, -1, 0, -2, -2, 0, 0, -4, -5, 0, -2, -8, -4, 8, -5, -7, 6, 24, 9, 1, -7, 1, 9, 1, -8, 1, 8, 0,-10, 1, 8, -1,-11, -1, 8, 8, 6, 3, 5, 4, 3, 2, -2, -3, -1, 0,-10,-13, -8, -4, 0, 4, 2, -3, 0, 6, 3, -5, 3, 10, 2,-12, 5, 10, -4,-22, 0, -4, -1, 3, 1, -4, -1, 5, 1, -5, 0, 8, -1, -6, -2, 7, -1, -1, -2, -4, -1, -2, -4, -6, -1, -1, -1, -2, 1, 5, 10, 9, 10, 3, 0, -2, 6, -1, -2, -5, 3, -1, -2, -6, 2, 0, 0, -5, 6, 3, 0, 0, 6, 3, 1, 1, 4, -2, -2, 1, 0, -9, -9, -2, -11, -3, 1, 2, -6, 2, 4, 5, -3, 2, 3, 4, -2, 1, 1, 2, -6, -4, -1, -2, 2, -1, -1, -2, 10, 2, -2, -2, 11, 2, -4, -1, 6, 0, -2, 2, 3, 3, 0, 0, -6, 3, 3, 0,-17, -1, 5, 0, -1, 4, 10, 11, -3, -2, 0, 1, -3, -4, -5, -3, -1, -2, -2, -1, 2, -3, -9,-12, 3, 3, 3, 2, 2, 2, 4, 4, 2, 1, -1, -2, -2, 9, 5,-10, -3, 5, 5, -5, -2, 1, 2, 0, -1, -2, -2, 1, -2, -3, 7, -2, -1, -3, 7, -3, -1, -2, 8, -4, -2, -2, 7, -3, 1, -8, -3, 12, 2, -2, -2, 4, 1, 3, 0, -5, -1, 5, 2, -7, -1, 3, 1, -5, -7, -2, 3, 1, -2, -7, -2, 2, 20, 3, -5, -1, 5, 0, -3, -2, -7, -7, 0, 6, -6, 0, 7, 6, 2, 6, 0, -7, -2, 6, -7, 1, -2, 7, -8, 3, -2, 7, -7, 3, -1, 7, -6, 2, -5, -2, 5, 7, 4, 1, -4, -8, 6, 3, -2, -5, -7, -5, 3, 7, -1, -1, 6, 5, 0, -1, 1, -4, 2, 1, 0, -7, 1, 0, 0, -4, -8, 0, 3, 1, -2, 1, -1, -1, 1, -1, -3, 1, 1, -2, 1, 9, 5, 2, -3, -4, -1, 0, -1, -3, -3, 1, 3, 1, -4, 0, 4, 2, 2, -2, -2, 12, 0, -2, -5, 3, -1, 0, -3, 1, -3, -1, -2, 1, 1, 5, 3, 0, -6, -4, -2, 1, 0, -2, -2, 2, 6, 1, -4, -1, -3, -5, -5, -1, 3, 5, 5, 4, 0, 3, 1, -1, -2, 1, -2, -3, 2, -4, -5, -3, 4, -2, -3, -2, 6, 0, -1, -1, 7, 1, 0, 0, -3, -2, -2, 0, -2, -3, -5, -1, -2, 2, 0, -1, -1, 11, 9, -1, 0, 1, -1,-10, -1, 1, 0, -6, 1, 0, 1, 4, 2, -5, -1, 13, -2, 4, 5, 0, -5, 1, 6, 3, -6, -2, 3, 2, -5, -2, 0, -2, -1, 1, 1, -2, -1, -2, 0, 2, 5, 5, 5, 7, 0, -4, -8, -7, 0, 2, -1, -5, -1, 2, 2, -3, 0, 5, 3, -5, 3, 8, 2,-12, 8, 4, 0, -2, 10, -1, -4, -1, 3, -6, -3, 0, -4, -5, 0, 0, 0,-10, -4, 2, -1, -6, 3, 5, -1, -3, 6, 4, 0, -2, 4, 2, 0, 8, 1, -1, 0, 11, 1, -3, -1, 6, -2, -4, -3, -2, -7, -4, 0, -1, -1, -1, 4, 5, 6, 5, -5, -9, -8, -5, 2, 2, 3, 2, 0, 2, 6, 1, 2, 0, 3, 0, 1, -2, -1, -2, 0, -1, -3, -6, 0, 0, 2, 0, 4, 0, 2, 1, 5, -2, 0, 0, -2, -9, -1, 2, 0, 1, 0,-10, -1, 1, 8, 0, -1, -2, 4, 0, 1, -1, 2, -1, -3, -2, 2, -1, -3, -1, 2, -3, 0, -1, 1, 0, 8, 1, -1, 3, 0, 1, 1, 2, 0, -4, -2, 0, -1, -5, 1, -1, -2, -1, 11, 2, 1, 5, -2, -2, 0, 2, -4, 0, -2, 1, -5, 1, 0, 5, 0, 1, -5, -3, 0, 6, -4, 2, 0, 0, -3, 5, 1, 0, -3, 3, 0, 0, 3, -2, -3, 1, 1, -4, 0, 8, -2, -3, -2, 3, 1, 2, -1, -1, 1, 1, 0, 2, 2, 0, 1, 6, 1, -1, 2, 1, 0, 3, 0,-19, 1, -3, -2, 2, 6, 5, -2, -7, -3, 1, 3, 1, -1, -1, 0, 2, -8, -1, -1, -4, 1, 1, -1, 2, 4, 3, 2, 3, -5, 1, 3, 0, 0, 2, -1, 1, -3, 0, 0, 5, -5, -2, 0, 8, -4, -4, -4, 6, 1, 2, 1, 2, 2, 2, -3, 2, 4, 0, -9, 0, 7, 0,-11, 1, 0, 0, 0, -2, 3, 3, -1, -6, 4, 3, -3,-10, -1, 2, 6, 2, 7, -2, -3, 5, -4, 0, 3, -1, -4, 2, 1, -7, 2, -1, -1, 3, 3, 2, 2, 2, -5, -7, -7, -5, 5, 6, 4, 2, -2, -1, 0, 1 }; /* 6x16-entry codebook for intra-coded 8x4 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_8x4)[3072] = { 5, 6, 6, 6, 7, 7, 8, 8, 0, 0, 0, 0, 0, 1, 2, 3, -3, -4, -4, -5, -5, -4, -3, -2, -4, -4, -4, -5, -4, -4, -3, -3, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 4, 4, 5, 5, 5, -1, 0, 1, 1, 2, 3, 4, 4, -9,-10, -9, -9, -8, -7, -6, -5, -4, -4, -5, -6, -6, -7, -7, -7, 0, -1, -2, -2, -3, -3, -4, -4, 4, 4, 3, 3, 2, 1, 1, 0, 7, 7, 7, 6, 6, 5, 4, 4, 2, 4, 5, 6, 4, 1, -3, -6, 3, 4, 5, 5, 4, 0, -5, -8, 2, 3, 4, 4, 2, -2, -7,-10, 2, 2, 2, 1, 0, -4, -9,-12, -9, -7, -3, 1, 4, 4, 3, 3,-10, -7, -2, 3, 5, 5, 3, 3, -9, -6, -2, 3, 6, 5, 4, 3, -8, -6, -1, 3, 4, 4, 3, 2, -5, -5, -5, -5, -3, 1, 4, 7, -5, -5, -5, -4, -2, 1, 6, 8, -4, -5, -4, -3, -1, 3, 8, 10, -3, -4, -3, -2, 1, 5, 9, 11, -2, -2, -2, -2, -2, -2, -2, -2, -4, -5, -5, -5, -5, -5, -5, -4, -3, -4, -4, -4, -4, -4, -4, -3, 9, 10, 10, 11, 11, 11, 10, 10, 7, 4, 1, -2, -4, -6, -9,-10, 9, 7, 3, 0, -2, -4, -8, -9, 11, 8, 4, 2, 0, -3, -6, -8, 11, 9, 5, 3, 1, -2, -5, -7, -13,-13,-13,-12,-11,-10, -8, -8, 0, 1, 2, 3, 4, 4, 4, 3, 3, 4, 5, 6, 6, 6, 5, 4, 3, 4, 4, 4, 3, 3, 3, 2, 10, 10, 11, 10, 9, 9, 8, 7, 6, 6, 6, 6, 5, 4, 3, 2, 0, 0, 0, -1, -2, -3, -4, -4,-10,-10,-11,-12,-13,-14,-14,-14, 16, 16, 17, 16, 15, 13, 12, 11, -1, -2, -3, -4, -4, -4, -4, -3, -4, -5, -6, -6, -6, -6, -6, -6, -5, -6, -6, -6, -6, -6, -5, -5, -13,-13,-13,-12,-11,-10, -8, -6, -9, -8, -7, -6, -4, -2, 0, 1, -2, -1, 1, 3, 5, 7, 8, 9, 5, 7, 9, 11, 13, 14, 15, 15, 16, 14, 11, 7, 2, -3, -7, -9, 14, 12, 8, 3, -1, -6, -9,-11, 11, 9, 4, 0, -4, -8,-11,-13, 8, 5, 1, -3, -6,-10,-12,-14, -18,-15, -9, -3, 1, 6, 9, 11,-17,-13, -7, -1, 3, 7, 11, 12, -15,-11, -5, 1, 5, 9, 12, 13,-13, -9, -3, 2, 5, 9, 11, 13, 22, 21, 19, 15, 10, 3, -4, -9, 20, 18, 15, 9, 2, -5,-12,-17, 16, 13, 8, 1, -7,-14,-20,-24, 10, 6, -1, -8,-15,-21,-25,-27, -25,-23,-20,-14, -7, 1, 9, 14,-23,-21,-16, -9, 0, 9, 16, 21, -20,-16,-10, -1, 8, 16, 22, 25,-15,-11, -3, 6, 14, 20, 25, 27, -4, -2, 0, 1, 2, 2, 2, 2, -5, -2, 0, 2, 3, 3, 3, 3, -6, -4, -1, 1, 2, 3, 3, 3, -7, -5, -2, 0, 1, 1, 2, 2, 2, 1, 1, 1, 1, 0, -2, -3, 3, 3, 2, 1, 0, -1, -3, -4, 4, 3, 2, 1, 0, -2, -4, -6, 5, 4, 3, 1, -1, -3, -5, -6, 5, 6, 6, 4, 2, 0, -2, -3, 3, 4, 4, 4, 3, 1, 0, -1, -2, -2, -1, -1, -1, -1, -2, -2, -5, -4, -3, -2, -2, -2, -3, -3, -1, -1, -1, -1, -1, -1, -1, -1, -3, -4, -4, -4, -3, -3, -3, -3, -1, -1, -1, -1, -1, -1, -1, -2, 5, 6, 6, 6, 6, 5, 4, 3, 4, 4, 4, 4, 4, 5, 6, 7, 0, -1, -1, -1, -1, 0, 1, 2, -2, -3, -3, -3, -3, -2, -1, 0, -3, -3, -4, -4, -4, -3, -2, -1, 0, -2, -4, -4, -2, 0, 2, 3, 0, -2, -3, -3, -1, 2, 4, 5, -1, -2, -4, -3, 0, 3, 5, 6, -2, -3, -4, -3, -1, 2, 4, 5, 9, 4, 0, -3, -3, -1, 0, 1, 8, 4, -1, -4, -3, -1, 1, 2, 6, 2, -3, -5, -4, -2, 0, 1, 5, 1, -3, -4, -4, -2, 0, 1, 5, 3, 1, -1, -4, -8,-10,-10, 3, 3, 2, 1, 0, -2, -3, -4, 1, 1, 1, 2, 3, 2, 1, 0, -1, 0, 1, 2, 3, 4, 3, 2, 0, 1, 2, 2, 1, -1, -3, -3, 0, 1, 1, 1, -1, -2, -4, -3, -3, -3, -3, -3, -3, -3, -1, 2, -4, -4, -3, 0, 3, 7, 12, 14, -5, -5, -6, -6, -6, -6, -6, -5, 2, 2, 2, 1, 0, 0, 0, 0, 4, 4, 3, 2, 1, 0, 0, 0, 6, 6, 5, 4, 2, 2, 1, 1, -7, -7, -6, -3, 0, 4, 7, 8, -1, -2, -3, -3, -2, -1, 1, 2, 3, 3, 1, -1, -2, -2, -2, -1, 6, 6, 4, 2, 0, -2, -2, -2, -6, -5, -2, 2, 5, 9, 11, 12, -4, -4, -2, 0, 2, 4, 5, 6, -3, -2, -2, -2, -2, -1, 0, 1, -2, -2, -2, -3, -3, -3, -3, -2, -7, -3, 1, 3, 3, 0, -3, -5, -6, -2, 3, 5, 4, 1, -3, -5, -5, -1, 4, 6, 5, 2, -3, -4, -4, 0, 5, 7, 6, 3, -1, -3, 0, 0, 0, 0, 0, 0, 0, 0, -2, -2, -3, -3, -3, -3, -2, -1, 6, 7, 8, 9, 9, 8, 7, 6, -4, -4, -5, -5, -6, -6, -5, -4, -9, -8, -6, -4, 0, 3, 6, 6, -5, -4, -1, 3, 5, 6, 5, 3, 1, 3, 6, 6, 4, 1, -2, -5, 6, 7, 5, 1, -3, -7,-10,-11, 10, 9, 5, 1, -3, -6, -6, -4, 5, 3, -1, -5, -6, -5, -2, 2, -2, -4, -6, -6, -4, 1, 6, 10, -6, -7, -7, -4, 1, 7, 11, 12, 6, 5, 3, 2, 0, 0, 0, 0, 2, 1, -1, -2, -3, -2, -1, -1, 0, -1, -2, -4, -4, -2, -1, 1, 0, 0, -1, -2, -1, 0, 2, 3, 0, -1, -2, -2, -2, -2, -1, -1, 5, 4, 2, 1, 0, 0, 0, 0, 6, 5, 3, 1, 0, 0, 0, 0, 2, 0, -2, -4, -4, -3, -2, -2, -7, -4, 0, 2, 2, 2, 2, 1, -7, -3, 0, 0, 0, 0, 0, 0, -4, -1, 1, 1, 0, 0, 0, 1, -1, 1, 2, 2, 2, 2, 3, 3, -2, 0, 2, 2, 1, 1, 1, 1, -1, 1, 2, 2, 1, 0, 0, -1, 0, 2, 4, 2, 0, -1, -2, -3, 1, 2, 3, 1, -2, -4, -6, -6, 1, 2, 2, 4, 5, 6, 4, 1, 0, -1, -1, -1, 0, 0, -2, -4, 0, 0, -1, -2, -2, -2, -4, -6, 2, 1, 0, 0, 1, 1, -1, -3, 1, 1, 1, 1, 1, 2, 3, 3, 0, 0, 1, 0, 1, 2, 4, 4, -1, -1, -1, -1, 0, 1, 2, 3, -4, -4, -5, -5, -5, -3, -1, 0, -6, -5, -5, -4, -3, -2, -1, -1, -1, 0, 0, 1, 1, 2, 3, 3, 0, 1, 1, 1, 2, 2, 3, 4, 0, 0, -1, -1, 0, 1, 2, 3, 0, 1, 1, 1, 0, 0, -1, -1, 1, 3, 3, 2, 1, -1, -2, -2, -2, 0, 2, 2, 2, 2, 1, 1, -9, -8, -4, -2, 1, 3, 3, 3, -1, -1, -1, -2, -3, -3, -3, -4, 0, 0, 0, -1, -2, -2, -3, -3, 2, 2, 2, 0, -1, -1, -1, -1, 5, 5, 4, 3, 2, 2, 2, 2, 6, 3, -1, -4, -3, -1, 1, 1, 2, -1, -3, -4, -1, 2, 2, 0, -1, -2, -2, 1, 4, 4, 1, -3, -2, -1, 1, 4, 6, 3, -3, -8, 3, 3, 2, 1, -1, -2, -2, -2, -4, -4, -2, -1, 1, 3, 4, 4, -4, -5, -5, -4, -2, 0, 2, 2, 7, 7, 4, 1, -1, -2, -3, -2, -1, 1, 3, 0, -4, -6, 0, 6, -2, 1, 4, 1, -4, -6, -1, 7, -3, 1, 4, 2, -3, -6, -1, 6, -2, 0, 3, 2, -2, -5, -1, 4, 1, -1, -2, 1, 4, 4, -1, -7, 1, -1, -4, -1, 5, 6, 0, -6, 3, 0, -4, -3, 3, 6, 2, -4, 3, 0, -5, -4, 1, 4, 1, -3, 2, 2, 3, 3, 3, 3, 2, 2, -4, -5, -6, -7, -7, -7, -7, -6, 1, 2, 3, 3, 3, 3, 2, 2, 0, 0, 1, 1, 1, 2, 2, 1, 3, -3, -3, 3, 4, -2, -2, 2, 3, -4, -4, 4, 4, -4, -4, 2, 4, -4, -4, 4, 4, -4, -3, 3, 3, -3, -4, 3, 3, -3, -3, 3, -2, -2, -2, -2, -2, -2, -1, -1, 6, 7, 8, 8, 8, 7, 6, 5, -5, -6, -7, -7, -8, -7, -6, -5, 1, 1, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, -2, -3, -2, -2, -2, -3, -3, -3, 2, 3, 5, 6, 4, 2, 1, 0, 8, 6, 2, 0, 0, 0, 0, 0, 4, 1, 0, 0, 0, -1, -1, -1, 1, -1, 0, 0, 0, -1, -2, -3, -2, -2, -1, 0, 0, -2, -4, -5, 3, 1, -1, -2, -3, -4, -5, -5, 2, 1, 0, 0, 1, 1, 0, 0, 0, -1, -1, 0, 2, 2, 2, 2, -1, -2, -1, 1, 2, 2, 2, 2, 0, -1, -2, -1, -1, -1, -1, 0, -1, -2, -2, -1, -1, 0, 0, 1, 2, 1, 1, 2, 2, 1, 1, 0, 6, 5, 3, 1, 0, -2, -4, -4, -3, -2, -1, 0, 1, 1, 0, -1, 0, 1, 3, 4, 5, 5, 3, 1, -1, -1, -1, 0, 1, 0, -1, -2, -2, -2, -2, -1, 0, -1, -2, -3, 0, -1, -2, -2, -1, -1, 0, 2, 1, -1, -2, -1, -1, -1, 0, 2, 1, 0, -2, -2, -2, -2, 1, 5, 1, -1, -2, -2, -2, 0, 5, 10, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 2, 1, 2, 2, 3, 4, 4, 6, 5, -3, -3, -3, -2, -2, -3, -3, -3, 1, -1, -2, -2, 0, 3, 5, 7, 2, 0, -2, -3, -2, 0, 2, 3, 3, 1, -2, -3, -3, -2, -1, -1, 3, 1, 0, -1, -1, -1, -1, -1, 1, 3, 5, 4, 2, -1, -3, -4, -3, -2, 1, 2, 1, 0, -1, -2, -5, -3, 0, 2, 2, 1, 0, 0, -3, -1, 1, 2, 2, 1, 0, 0, 0, -1, -1, -1, 1, 2, 3, 4, -3, -4, -4, -3, -1, 0, 0, 1, -2, -3, -2, -1, 1, 1, 1, 1, -2, -2, 0, 3, 4, 4, 3, 2, -4, -4, -3, -2, -1, 1, 2, 3, 0, 1, 1, 1, -1, -2, -3, -3, 3, 4, 5, 4, 2, -1, -3, -3, -2, -2, 0, 2, 2, 2, 1, 0, -4, 0, 5, 7, 4, -1, -4, -4, -1, 2, 4, 3, 0, -3, -3, -2, 2, 1, 0, -1, -2, -2, 0, 1, 0, 0, -1, -2, -2, -1, 1, 2, -4, -3, -2, -1, 0, 1, 2, 2, 10, 9, 5, 0, -3, -4, -3, -2, 1, -1, -2, -2, -1, 0, 0, 0, -2, -2, -1, 1, 1, 1, 0, -1, -5, -3, 0, 3, 4, 2, 0, -2, -2, -1, 0, 1, 1, 0, -1, -1, 3, 2, -1, -2, -2, -1, 1, 1, 7, 5, -1, -5, -6, -2, 2, 4, -2, 3, 3, -3, -4, 1, 2, -2, -3, 3, 4, -3, -4, 2, 3, -2, -3, 3, 4, -3, -4, 2, 3, -2, -4, 2, 4, -2, -3, 1, 2, -1, 4, 3, -1, -3, -3, -1, 1, 2, -4, -6, -4, 0, 4, 5, 4, 1, 0, 2, 5, 6, 2, -3, -5, -4, 1, 1, -1, -3, -5, -2, 2, 4, -1, 0, 1, 2, 2, 3, 3, 4, -1, 0, 1, 1, 0, -1, -1, -1, -1, 0, 1, 2, 2, 1, -1, -2, -3, -2, -1, 0, 0, -1, -2, -3, 1, 1, 1, 1, 0, 0, 1, 2, 1, 0, -1, 0, 0, 1, 1, 0, 1, -2, -4, -1, 1, 2, 1, 0, 1, -4, -7, -3, 1, 3, 2, 1, 1, 1, 1, 1, 1, 1, 0, -1, 1, 1, 1, 0, 1, 2, 2, 0, 1, 1, 0, 0, 0, 2, 0, -3, 3, 2, 0, -1, -1, -2, -6, -9, 0, 0, 0, 1, 0, 0, 1, 2, 1, 0, 0, 0, -1, -1, 0, 2, 0, 1, 1, 1, -1, -3, -2, 0, -7, -5, 1, 6, 6, 2, -1, -1, 3, 1, -1, -3, -4, -2, 1, 4, 2, 0, -2, -3, -4, -3, -1, 2, 2, 2, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, -1, 1, 1, -2, -5, -6, -4, -1, -1, 1, 4, 3, 2, 0, 1, 2, -1, 0, 2, 3, 1, 0, 0, 1, -1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 2, 2, 0, -2, -1, 1, -2, -1, -1, -2, -1, 2, 6, 8, -1, -1, -2, -3, -2, 0, 1, 2, -1, 0, 0, -1, -1, 0, -1, -1, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, -1, -1, 1, -1, 0, 2, 2, -1, -3, -2, 3, 0, 2, 3, 0, -5, -7, -2, 4, -1, 0, 0, 0, -1, -2, -3, -3, -1, 0, -1, -2, -2, -2, -2, -2, 1, 1, 0, 0, 1, 2, 0, -1, 1, 2, 1, 2, 5, 6, 2, 0, -2, -4, -3, 0, 2, 2, 0, -3, 3, 1, 0, 1, 2, 1, -2, -3, 3, 1, 0, 0, 0, 0, 0, -1, 1, -1, -2, -2, -1, 1, 3, 3, 3, 2, 1, 2, 4, 3, 1, -2, -2, -4, -4, -3, -1, 0, -2, -3, 1, 0, -1, -1, 0, 1, 0, -1, 3, 2, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 2, 3, 3, 2, 2, 2, 1, 1, 0, -1, -2, -3, -5, -5, -5, -4, 1, 1, 0, -1, 0, 1, 3, 3, -9, -6, -2, 0, 1, 1, 2, 2, -6, -2, 1, 2, 1, 1, 0, 1, -2, 1, 2, 2, 1, 1, 1, 1, 0, 2, 2, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, -1, -3, -2, 0, -3, -3, -3, -2, -1, 3, 7, 9, 1, 2, 2, 2, 0, -2, -4, -3, 2, 0, -2, -1, 3, 4, -1, -6, 1, 0, -2, -3, -1, 3, 3, 0, 0, 3, 3, 0, -2, -1, 1, 1, -6, -1, 3, 2, -1, -2, 0, 1, 5, 3, 0, -2, -3, 0, 2, 1, 1, 1, 2, 2, 0, -2, -4, -7, -3, -2, 1, 2, 2, 1, -1, -4, 2, 2, 0, -2, -2, 0, 2, 2, 0, 0, -2, -3, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, -2, -1, 0, 1, 0, 1, 2, 3, -4, -2, 0, 0, -1, 0, 2, 3, -2, -2, -2, -1, -1, 0, 2, 4, 0, 0, 0, 0, -1, -1, 0, 1, 0, -1, -1, -1, -1, -1, 0, 0, 6, 4, 2, 0, -1, -2, -1, -1, 0, 1, 1, 1, 1, -1, -5,-10, 1, 1, 1, 1, 1, 1, 0, -4, 1, 0, 1, 1, 1, 1, 1, -1, 2, 1, 1, 1, 0, 0, 0, 0, -3, 1, 4, 3, 3, 1, -1, 0, -4, 0, 1, 0, -1, 0, 0, 0, -5, 0, 2, 1, 1, 1, 0, -1, -1, 2, 1, -2, -2, -1, 0, -1, 2, 4, 5, 3, 0, -1, 1, 2, 0, 0, 1, 0, -2, -2, -1, -1, -2, -2, -2, -2, -3, -2, -1, 0, 0, 0, 1, 0, 0, 0, 1, 2, 0, -2, -2, -3, -1, 2, 2, -1, 1, 0, 0, 0, 1, 5, 3, -2, -1, -1, 0, -1, 0, 2, 0, -5, -1, 0, 1, 0, 0, 2, 2, -2, 3, 1, -1, -1, 0, 1, 1, 2, 1, 0, 0, 1, 1, 1, 1, 1, -10, -8, -2, 1, 2, 1, 1, 1, -1, 1, 2, 1, 0, 0, 0, 0, -1, -1, 0, 1, 2, 2, 2, 1, -1, -1, -1, 0, -1, -3, -5, -4, 1, 1, 2, 1, 1, 0, 0, 2, -1, -2, -1, -1, -1, 0, 2, 4, -3, -7, -5, 0, 2, 0, 0, 0, 3, -1, -2, 1, 2, 1, 1, 2, 1, -2, -1, 1, 2, 1, 0, 1, 0, -1, 0, 3, 2, -1, -1, -1, 2, 1, 1, 0, 0, 0, 0, 0, -9, -7, -2, 3, 3, 2, 1, 1, 3, 2, 0, -2, -2, -1, 1, 1, 0, -1, 0, 0, 1, 1, 0, 0, -2, -1, 1, 1, 1, 0, 0, 0, 1, 2, 1, -2, -4, -3, 1, 2, 1, 2, 1, -2, -3, 0, 3, 1, -1, -1, 0, 0, 1, 3, 0, -4, 2, 0, -1, 1, 2, -2, -2, 3, 2, 0, -1, 2, 3, -2, -4, 1, 0, 1, 1, 1, 2, -2, -6, -2, -1, 0, 0, 0, 2, 0, -2, -1, -1, -1, 1, 2, 1, -2, -3, -2, 3, -1, -2, -1, -1, 0, 1, 2, 10, 4, 0, 0, -1, -2, -2, -1, 3, -1, -2, -1, 0, -1, -1, 0, -5, 2, 7, 1, -4, -2, 1, 0, -2, 2, 3, -1, -3, 0, 2, 0, 2, 1, 0, 0, 1, 1, -1, -2, 1, -2, -2, -1, -1, -2, 0, 0, 0, 3, -2, -7, -1, 3, 0, 0, 1, 3, -3, -5, 2, 3, -1, 0, 0, 2, -2, -2, 4, 2, -2, 0, -1, 1, -1, 0, 2, -1, -2, 1, 4, 0, -3, -4, -2, 1, 2, 1, 0, 0, 3, 5, 3, 1, -1, -2, 1, 1, 1, -1, -3, -1, 1, 1, 1, -1, -2, -2, 0, 0, -1, -2 }; /* 6x16-entry codebook for intra-coded 8x8 vectors */ DECLARE_ALIGNED(4, static const int8_t, svq1_intra_codebook_8x8)[6144] = { 4, 4, 3, 2, 2, 1, 0, -1, 4, 3, 3, 2, 1, 0, -1, -1, 3, 3, 2, 2, 1, 0, -1, -2, 3, 2, 2, 1, 0, -1, -2, -3, 2, 2, 1, 0, -1, -1, -2, -3, 2, 1, 0, 0, -1, -2, -3, -4, 1, 0, 0, -1, -2, -3, -4, -4, 0, 0, -1, -2, -2, -3, -4, -4, 2, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 1, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0, 1, 1, -2, -2, -1, -1, -1, -1, -1, -1, -3, -3, -3, -3, -3, -3, -2, -2, -5, -4, -4, -4, -4, -4, -4, -3, -4, -2, -1, 0, 1, 2, 2, 3, -4, -2, -1, 0, 1, 2, 3, 3, -4, -3, -1, 0, 1, 2, 3, 3, -4, -3, -1, 0, 1, 2, 3, 3, -5, -3, -1, 0, 1, 2, 3, 3, -5, -3, -1, 0, 1, 2, 3, 3, -5, -3, -1, 0, 1, 1, 2, 3, -5, -3, -2, -1, 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 2, 2, 2, 3, 3, 4, 4, 4, 0, 0, 0, 0, 1, 1, 1, 2, -2, -2, -2, -2, -1, -1, -1, 0, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -2, -2, -2, -2, 5, 3, 1, -1, -2, -3, -3, -3, 5, 3, 1, -1, -2, -3, -3, -3, 5, 3, 1, -1, -2, -3, -3, -3, 5, 3, 1, -1, -2, -3, -3, -3, 5, 4, 1, 0, -2, -3, -3, -3, 6, 4, 2, 0, -2, -2, -3, -3, 6, 4, 2, 0, -1, -2, -2, -3, 6, 4, 2, 1, -1, -2, -2, -2, -1, 1, 3, 3, 2, 0, -3, -6, -1, 1, 3, 4, 3, 0, -3, -6, -1, 1, 4, 4, 3, 1, -3, -6, -1, 1, 3, 4, 3, 1, -3, -6, -2, 1, 3, 4, 3, 1, -3, -6, -2, 1, 3, 4, 3, 1, -3, -7, -2, 1, 3, 3, 2, 0, -3, -7, -2, 0, 2, 3, 2, 0, -3, -6, 10, 9, 8, 6, 6, 5, 4, 4, 6, 5, 4, 3, 2, 2, 2, 1, 2, 1, 0, -1, -2, -2, -2, -1, -1, -2, -3, -4, -4, -4, -4, -3, -2, -3, -4, -4, -5, -4, -4, -3, -2, -2, -3, -3, -3, -3, -2, -2, -1, -1, -1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1, 1, 1, 2, -2, -1, 1, 2, 4, 5, 7, 8, -3, -2, 0, 1, 3, 5, 7, 8, -4, -3, -1, 0, 2, 4, 6, 7, -5, -4, -2, -1, 1, 3, 5, 7, -6, -5, -3, -2, 0, 2, 4, 6, -6, -5, -4, -2, -1, 1, 3, 5, -7, -6, -5, -3, -2, 0, 2, 3, -8, -7, -5, -4, -3, -1, 1, 2, 11, 9, 7, 5, 3, 1, -1, -1, 10, 8, 6, 3, 1, 0, -2, -2, 9, 7, 5, 2, 0, -2, -3, -4, 8, 6, 3, 1, -1, -3, -4, -4, 6, 4, 2, -1, -3, -4, -5, -5, 5, 3, 0, -2, -4, -5, -6, -6, 3, 1, -1, -3, -5, -6, -7, -7, 2, 0, -2, -4, -6, -6, -7, -7, 5, 6, 7, 7, 7, 8, 8, 8, 3, 4, 5, 5, 6, 6, 6, 6, 0, 2, 2, 3, 4, 4, 4, 5, -2, -1, 0, 1, 2, 2, 3, 3, -4, -3, -2, -1, 0, 1, 1, 2, -6, -5, -4, -3, -2, -2, -1, 0, -8, -7, -6, -6, -5, -4, -3, -3,-10, -9, -8, -8, -7, -6, -6, -5, 6, 5, 3, 1, -1, -3, -6, -8, 6, 5, 4, 2, -1, -3, -6, -8, 6, 5, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, 0, -3, -6, -8, 6, 6, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, 0, -3, -6, -8, 6, 5, 4, 2, -1, -3, -5, -8, 11, 10, 9, 8, 7, 6, 5, 4, 8, 8, 7, 6, 5, 4, 3, 2, 6, 5, 4, 4, 2, 2, 1, 0, 3, 3, 2, 1, 0, 0, -1, -2, 1, 1, 0, -1, -2, -2, -3, -3, -1, -1, -2, -3, -4, -4, -5, -5, -3, -4, -4, -5, -6, -6, -7, -7, -5, -5, -6, -7, -8, -8, -8, -8, -14,-13,-12,-11, -9, -7, -6, -4,-12,-11,-10, -9, -7, -5, -3, -1, -10, -9, -7, -6, -3, -2, 0, 2, -8, -6, -4, -2, 0, 2, 4, 5, -5, -3, 0, 2, 4, 5, 7, 8, -2, 0, 2, 4, 6, 8, 9, 10, 0, 3, 5, 7, 8, 10, 11, 12, 3, 5, 7, 8, 10, 11, 12, 12, -19,-19,-18,-18,-17,-16,-15,-14,-15,-15,-14,-13,-12,-11,-10, -9, -11,-10, -9, -8, -6, -5, -4, -3, -6, -5, -3, -2, -1, 0, 1, 2, -1, 0, 2, 3, 4, 5, 6, 6, 4, 6, 7, 8, 9, 10, 10, 10, 9, 10, 11, 12, 13, 14, 14, 14, 12, 14, 14, 15, 16, 16, 16, 16, 22, 21, 19, 17, 14, 11, 9, 5, 20, 19, 17, 14, 11, 8, 4, 1, 17, 15, 13, 10, 6, 3, 0, -4, 13, 11, 8, 5, 1, -2, -5, -9, 9, 6, 3, -1, -4, -7,-11,-13, 4, 0, -3, -6, -9,-12,-15,-17, -2, -5, -8,-11,-14,-16,-18,-20, -8,-10,-13,-16,-17,-19,-21,-22, 17, 18, 18, 18, 17, 16, 16, 14, 16, 16, 15, 15, 14, 13, 12, 11, 12, 12, 11, 10, 9, 8, 7, 5, 7, 6, 6, 4, 3, 2, 1, -1, 1, 0, -1, -2, -3, -4, -5, -6, -5, -6, -7, -8, -9,-10,-11,-12, -11,-12,-13,-14,-15,-16,-16,-17,-16,-17,-17,-18,-19,-20,-20,-20, 0, 0, 0, 0, -1, -1, -2, -3, 1, 0, 0, 0, 0, -1, -2, -3, 1, 1, 0, 0, -1, -1, -2, -2, 1, 1, 1, 0, 0, -1, -1, -2, 2, 1, 1, 1, 0, -1, -1, -2, 2, 2, 1, 1, 0, 0, -1, -2, 2, 2, 1, 1, 1, 0, -1, -1, 2, 2, 1, 1, 1, 0, 0, -2, 0, -1, -1, 0, 0, 1, 2, 3, 0, -1, -1, 0, 1, 1, 2, 2, -1, -1, -1, -1, 0, 1, 2, 2, -1, -1, -2, -1, 0, 1, 1, 2, -1, -2, -2, -1, 0, 0, 1, 2, -1, -2, -2, -2, -1, 0, 1, 2, -1, -1, -2, -1, 0, 0, 1, 2, -1, -1, -1, -1, 0, 1, 1, 2, 3, 2, 2, 2, 1, 1, 0, 0, 3, 2, 2, 2, 2, 1, 0, 0, 2, 2, 2, 1, 1, 1, 0, 0, 2, 2, 1, 1, 1, 0, 0, -1, 1, 1, 1, 0, 0, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -3, -3, -3, -2, -2, -2, -2, 5, 2, 0, 0, -1, 0, 0, 0, 4, 2, 0, -1, -1, -1, 0, -1, 4, 1, -1, -1, -2, -1, -1, -1, 4, 1, -1, -1, -2, -1, -1, -1, 4, 1, -1, -2, -2, -1, -1, -1, 4, 1, -1, -2, -2, -1, -1, -1, 4, 1, -1, -1, -1, -1, -1, -1, 4, 2, 0, -1, 0, 0, 0, -1, -2, -1, 0, 1, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 1, 1, -3, -2, 0, 1, 2, 2, 1, 1, -4, -2, 0, 1, 2, 2, 2, 2, -5, -3, -1, 1, 1, 2, 1, 2, -5, -3, -2, 0, 1, 1, 1, 1, 3, 3, 1, 0, -2, -4, -4, -5, 3, 3, 2, 0, -1, -2, -3, -4, 2, 2, 1, 1, 0, -1, -2, -2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, -2, -1, -1, 0, 0, 1, 2, 2, -3, -2, -2, -1, 0, 1, 2, 3, -3, -3, -2, -1, 0, 1, 2, 3, -3, -3, -3, -3, -3, -2, -2, -2, -3, -3, -2, -2, -2, -1, -1, -1, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, -8, -7, -5, -3, -2, -1, 0, -1, -4, -3, -1, 0, 1, 2, 1, 1, -1, 1, 2, 3, 3, 2, 2, 1, 1, 2, 3, 3, 2, 2, 1, 0, 2, 3, 3, 2, 1, 0, 0, -1, 1, 2, 1, 0, -1, -1, -1, -1, 1, 1, 0, -1, -1, -2, -2, -1, 1, 1, 0, 0, -1, -1, 0, -1, -4, -3, -2, 0, 1, 2, 3, 3, -4, -3, -2, 0, 1, 2, 2, 2, -3, -3, -2, -1, 0, 1, 1, 1, -2, -2, -2, -1, -1, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, 2, 1, 1, 0, 0, -1, -1, -2, 3, 3, 3, 1, 0, -1, -2, -2, 5, 4, 4, 2, 1, 0, -1, -2, 0, 0, 0, 0, 1, 2, 3, 3, 0, -1, 0, 0, 1, 2, 3, 3, 0, -1, 0, 0, 1, 2, 3, 2, 0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 1, 0, 0, -1, -2, 2, 1, 0, 0, -2, -3, -5, -6, 0, -1, -1, -3, -5, -6, -8, -9, -2, 0, 1, 2, 2, 1, -1, -4, -2, 0, 2, 2, 2, 1, -1, -4, -2, 0, 2, 2, 2, 1, -1, -3, -2, 0, 2, 2, 2, 1, -1, -3, -2, -1, 2, 2, 2, 1, -1, -3, -2, -1, 1, 2, 2, 1, -1, -3, -3, -1, 1, 2, 2, 1, -1, -3, -2, -1, 1, 2, 2, 1, -1, -3, -1, 1, 1, -1, -3, -3, 0, 4, -1, 1, 1, -1, -3, -3, 0, 4, -1, 1, 1, 0, -3, -3, 0, 4, -1, 1, 2, 0, -3, -3, 0, 5, 0, 1, 2, 0, -3, -4, 0, 4, 0, 1, 2, 0, -3, -4, 0, 5, 0, 1, 2, 0, -3, -3, 0, 4, 0, 1, 2, -1, -2, -2, 0, 4, 6, 6, 5, 6, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 0, 0, 0, 0, 0, -1, -2, -2, -2, -2, -2, -2, -1, -3, -3, -3, -3, -3, -3, -3, -2, -3, -4, -4, -3, -3, -3, -2, -2, -2, -2, -2, -2, -1, -1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 4, 1, -2, -3, -3, -1, 1, 3, 4, 1, -2, -4, -3, -1, 1, 3, 5, 1, -2, -4, -3, -1, 1, 4, 5, 1, -2, -3, -3, -1, 2, 4, 5, 1, -2, -3, -3, -1, 2, 4, 4, 0, -3, -4, -3, -1, 2, 4, 4, 0, -3, -3, -3, -1, 1, 3, 3, 0, -2, -3, -2, -1, 1, 3, -3, -4, -4, -4, -4, -4, -4, -4, -1, -1, -1, -1, -1, -1, -2, -2, 2, 1, 1, 2, 2, 1, 1, 1, 3, 3, 3, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 3, 3, 1, 2, 1, 2, 2, 2, 2, 2, -2, -2, -2, -1, -1, -1, 0, 0, -4, -4, -4, -4, -3, -3, -3, -3, -1, -2, -3, -3, -2, -2, -1, 0, 0, -1, -2, -2, -2, -1, 0, 1, 2, 1, -1, -1, -1, -1, 0, 1, 3, 1, 0, -1, -1, 0, 0, 1, 3, 2, 0, -1, 0, 0, 0, 1, 3, 1, 0, -1, 0, 0, 0, 1, 3, 1, 0, -1, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 2, 3, 4, 0, 0, -1, 0, 0, 0, 2, 3, 0, -1, -1, -1, -1, -1, 0, 1, 0, -1, -1, -1, -1, -1, -1, 0, 0, 0, -1, -1, -1, -2, -2, -1, 1, 0, 0, -1, -1, -2, -2, -1, 2, 2, 1, 0, -1, -1, -1, -1, 3, 3, 2, 1, 0, -1, -1, 0, 1, 0, 1, 0, 0, -1, -2, -1, 0, 0, 0, 0, -1, -1, -2, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 1, 1, -1, -1, -1, 0, 1, 1, 2, 3, -2, -2, -1, 0, 1, 2, 3, 4, -2, -2, -1, 0, 1, 2, 4, 5, -3, -1, 1, 0, 0, -1, 0, 1, -3, 0, 1, 0, -1, -1, 0, 2, -3, 0, 1, 0, -1, -1, 0, 2, -2, 1, 2, 0, -1, -1, 0, 2, -2, 1, 2, 0, -1, -1, 0, 2, -2, 1, 2, 0, -1, -1, 0, 2, -1, 2, 2, 0, -1, -1, 0, 2, -1, 1, 1, 0, -1, -1, -1, 1, -2, -2, -1, 1, 3, 4, 3, 1, -2, -2, -1, 0, 2, 3, 2, 0, -2, -2, -1, 0, 1, 2, 1, -1, -1, -1, -1, 0, 1, 2, 1, -1, -1, -1, -1, 0, 1, 1, 0, -2, 0, -1, -1, 0, 1, 1, 0, -1, 0, -1, -1, 0, 1, 1, 1, -1, 0, -1, -1, 0, 0, 1, 0, -1, -2, -1, 0, 1, 1, 1, 1, 1, -2, -1, 0, 0, 0, 0, 0, 0, -2, -1, -1, 0, -1, -1, -2, -2, -2, -1, -1, -1, -1, -2, -2, -3, -1, 0, 1, 1, 0, -1, -2, -2, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, 3, 3, 3, 2, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 0, 0, 1, 1, 0, 0, 0, -3, -2, -1, -1, -1, -1, 0, -1, -5, -5, -4, -3, -2, -2, -2, -1, 1, 1, 1, 1, 2, 1, 0, -1, 1, 1, 1, 2, 1, 1, 0, -1, 1, 1, 1, 1, 1, 1, 0, -2, 2, 1, 1, 1, 1, 1, 0, -2, 1, 1, 0, 0, 0, 0, -1, -3, 1, 1, 0, 0, 0, -1, -2, -3, 1, 1, 0, 0, -1, -1, -2, -4, 1, 0, 0, -1, -2, -2, -3, -4, 8, 7, 5, 3, 2, 1, 1, 1, 2, 1, 0, 0, -1, -1, -2, -1, -1, -1, -1, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, -1, -1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, -1, -1, -2, -2, -1, -1, -1, -2, -2, -1, 9, 4, 0, -2, -2, -2, -1, -1, 7, 2, -1, -2, -2, -1, 0, 0, 4, 0, -2, -2, -1, 0, 1, 1, 1, -2, -2, -2, -1, 0, 1, 1, -1, -2, -2, -1, 0, 1, 1, 1, -1, -2, -1, 0, 1, 1, 1, 0, -1, -1, 0, 1, 1, 1, 0, -1, 0, -1, 0, 1, 0, 0, -1, -1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 2, 2, 2, 2, 1, 0, -1, -1, 1, 1, 1, 0, -1, -2, -2, -2, 0, 0, 0, -1, -2, -3, -2, -2, -1, -1, -1, -2, -2, -2, -1, 0, -1, -1, -1, -1, 0, 0, 1, 2, -1, -1, -1, 0, 1, 2, 3, 4, -1, -1, 0, 0, -1, -2, -3, -3, -1, -1, 0, 0, 0, -1, -1, -1, -2, -2, -1, 0, 1, 1, 1, 1, -2, -2, -2, 0, 1, 2, 3, 3, -1, -1, -1, 0, 1, 3, 3, 3, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 0, 0, -1, -1, -1, 3, 2, 1, 0, -1, -2, -3, -3, -1, -1, -1, -2, -2, -3, -4, -5, 0, 0, 0, -1, -1, -3, -3, -4, 1, 1, 1, 0, 0, -1, -2, -3, 2, 2, 2, 1, 1, 0, -1, -1, 2, 2, 2, 2, 1, 1, 0, -1, 2, 2, 2, 2, 2, 1, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, -1, -2, 2, 3, 1, -1, 1, 1, -1, -3, 2, 3, 0, -1, 1, 1, -1, -3, 2, 3, 0, -1, 1, 1, -1, -4, 2, 3, 0, -1, 1, 1, -2, -4, 1, 3, 0, -1, 1, 1, -2, -4, 1, 3, -1, -2, 1, 1, -2, -3, 1, 2, 0, -1, 1, 1, -2, -3, 1, 2, 0, -1, 1, 1, -1, -1, -1, -1, -2, -2, -2, -2, -2, 1, 1, 1, 1, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 1, 2, 2, 2, -2, -2, -1, -1, -1, 0, 0, 0, -3, -3, -3, -3, -3, -3, -3, -2, -1, -1, -1, -1, -2, -2, -2, -2, 4, 4, 4, 4, 4, 3, 3, 2, -3, -3, -2, -1, 0, 1, 2, 5, -3, -3, -3, -2, -1, 1, 3, 6, -3, -3, -2, -2, 0, 2, 3, 5, -3, -2, -2, -2, 0, 1, 3, 5, -2, -2, -2, -1, -1, 1, 3, 5, -2, -2, -1, -1, 0, 1, 2, 4, -1, -1, -1, -1, 0, 1, 1, 4, -1, -1, -1, -1, 0, 1, 2, 3, 0, -1, 0, 1, 1, 0, -1, -1, 0, 0, 0, 1, 2, 0, -1, -1, 1, 0, -1, 0, 1, 0, 0, 0, 1, -1, -2, -1, 0, 0, 0, 0, 1, -2, -3, -1, 0, 0, 0, 1, 1, -1, -3, -2, 0, 1, 1, 2, 1, -1, -2, -1, 0, 1, 1, 2, 2, 0, -1, 0, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 2, -1, 0, 0, -1, 0, 0, 0, 1, -3, -2, -1, -1, -1, 0, 1, 1, -4, -2, -1, 0, 0, 1, 1, 1, -3, -2, 0, 0, 1, 1, 1, 1, -3, -1, 0, 1, 1, 1, 0, 0, -1, 0, 1, 1, 1, 0, 0, -1, 0, 1, 2, 2, 1, 0, 0, -1, -4, -4, -4, -3, -2, -1, -1, -1, -2, -2, -2, -1, 0, 0, 0, 0, -1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 2, 2, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, -1, 0, 0, 1, 1, 1, 0, 0, 1, 2, 2, 2, 1, -1, -2, -4, 1, 1, 2, 2, 1, 0, -2, -4, 0, 1, 1, 1, 1, 0, -1, -3, -1, 0, 1, 1, 0, 0, -1, -2, -1, 0, 1, 1, 1, 0, 0, -1, -2, -1, 0, 0, 0, 0, 0, -1, -1, -1, 0, 1, 1, 0, 0, 0, -1, 0, 1, 1, 1, 1, 1, 0, 2, 2, 0, -1, -2, -1, -1, -2, 1, 1, -1, -2, -2, -1, -1, -2, 1, 1, -1, -2, -2, 0, 0, -1, 1, 1, 0, -2, -1, 1, 1, 0, 1, 1, 0, -1, -1, 1, 2, 1, 1, 1, 0, -1, -1, 1, 2, 1, 1, 1, 0, -1, -1, 1, 1, 1, 1, 1, 0, -1, 0, 1, 1, 1, 0, 0, -1, -2, -4, -4, -4, -4, 3, 3, 3, 2, 1, 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, 0, -1, -1, 0, -1, -1, 1, 2, -1, 1, 1, 0, 0, 0, 2, 3, -1, 1, 1, 0, -1, -1, 1, 3, -1, 1, 1, 0, -2, -2, 0, 1, -2, 1, 0, 0, -2, -2, 0, 1, -3, 0, 0, 0, 0, -1, 1, 1, -3, 0, 1, 1, 0, 1, 2, 1, -3, -1, 0, 1, 1, 1, 2, 1, -4, -4, -3, 0, 1, 1, 1, 0, 0, -4, -2, 0, 1, 1, 1, 0, -1, -3, -1, 1, 1, 1, 0, -1, -1, -1, 1, 1, 1, 1, 0, -1, 0, 1, 2, 2, 1, 0, -1, 0, 0, 2, 2, 1, 0, -1, -1, 0, 1, 2, 1, 0, -1, -2, -1, 0, 1, 2, 2, 0, -1, -2, -1, 1, 1, 1, 1, 0, 0, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, 1, 0, 0, -1, -1, -1, -1, -1, 2, 1, 0, 0, -1, -1, -1, -1, 5, 3, 2, 1, 0, 0, 0, 0, 6, 5, 3, 2, 1, 0, 0, 0, 4, 4, 3, 1, 0, 0, 0, 1, 3, 3, 2, 1, 0, 0, 0, 1, 2, 2, 1, 0, -1, -1, 0, 1, 0, 0, 0, -1, -1, -1, 0, 1, 0, 0, -1, -1, -2, -1, 0, 2, 0, -1, -1, -2, -2, -2, 0, 1, 0, -1, -1, -2, -2, -2, -1, 0, 0, 0, -1, -2, -2, -2, -1, 0, 0, 0, -1, -1, -1, 0, 2, 3, 0, -1, -2, -2, -1, -1, 1, 2, 1, 0, -1, -1, -1, 0, 0, 0, 1, 1, 1, 0, 0, 0, -1, -1, 1, 2, 1, 0, 0, -1, -1, -1, -1, 0, 0, 0, -1, -1, -1, -1, -3, -2, -1, -1, 0, 1, 1, 2, -4, -3, -1, 1, 2, 3, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, -1, 0, 0, 0, 1, -1, -1, -2, -2, -2, -1, -1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 1, 1, 1, 1, 2, 2, 1, 1, -4, -3, -4, -4, -4, -4, -3, -3, -1, 0, 1, 2, 2, 3, 3, 3, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, -1, -2, -2, -3, -3, -2, 3, 2, 1, 0, -1, -2, -2, -2, 4, 3, 2, 1, 1, 0, 0, 0, 2, 2, 1, 1, 0, 1, 1, 1, 0, -1, -1, -1, -1, 0, 0, 1, -2, -2, -2, -2, -2, -1, 0, 0, 1, -1, 0, 2, 1, -2, -1, 1, 1, -1, 0, 2, 1, -2, -2, 1, 1, -1, 0, 3, 2, -2, -1, 1, 0, -2, 0, 3, 2, -2, -2, 1, 0, -2, 0, 3, 2, -2, -2, 1, 0, -2, 0, 3, 1, -2, -1, 1, 0, -2, 0, 2, 1, -2, -2, 1, 0, -1, 0, 2, 1, -2, -1, 1, 0, 1, 2, 2, 3, 3, 2, 2, 0, 1, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, 2, 2, 2, 1, -1, 0, 0, 1, 1, 1, 1, 1, -1, -1, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -1, 0, 0, -1, -2, -1, 0, 3, 5, 0, 0, -1, -1, -1, 0, 2, 4, 1, 1, 0, 0, -1, -1, 1, 2, 1, 2, 1, 1, 0, -1, -1, 0, 0, 1, 2, 1, 0, -1, -2, -2, -1, 0, 1, 2, 1, 0, -3, -3, -2, -1, 1, 2, 2, 0, -2, -4, -2, -1, 0, 2, 2, 1, -1, -3, 0, 0, 0, 0, 0, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0, -1, -1, -1, -1, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, 0, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 3, 3, 3, 4, 3, 3, 3, 3, 5, 1, -2, -2, 0, 0, 0, -1, 4, -1, -3, -1, 0, 0, 0, -1, 3, -1, -1, 0, 1, 1, 0, -1, 2, 0, 0, 1, 1, 1, 0, -2, 1, 0, 0, 1, 1, 1, 0, -2, 0, -1, -1, -1, 0, 0, 0, -1, 0, -1, -1, -1, -1, 0, 0, -1, 2, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, -1, -1, 0, 0, 0, 0, 0, 2, 0, -1, -1, -1, -1, -1, 0, 3, 1, -1, -1, -2, -2, -2, -1, 4, 2, 1, 0, -1, -2, -2, -1, 2, 1, 0, 0, -1, -1, 0, 0, 0, -1, -1, -1, -1, 0, 1, 1, 0, 1, 2, 2, 2, 1, -1, -3, 0, 0, 1, 1, 1, 0, -1, -2, 0, 0, 0, 0, 0, 0, -1, -1, 0, 0, -1, 0, 0, 1, 1, 0, 0, 0, -1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 2, 1, -1, -3, 0, 0, 0, 1, 1, -1, -4, -5, -2, -2, -2, -1, 0, 2, 2, 2, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, -2, -3, 0, 0, 1, 1, 0, -1, -3, -4, -1, -1, 0, 1, 0, 0, -2, -3, -1, -1, 0, 1, 1, 1, 0, -1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 2, 1, 2, 0, 0, 0, 0, -1, 1, 0, 2, 0, -1, 1, 0, -1, 0, 0, 1, 0, 0, 2, 1, 0, 1, 0, 1, -1, 0, 2, 2, 0, 1, -1, 0, -1, -1, 2, 1, 1, 2, -2, -2, -3, -2, 0, 1, 1, 1, -2, -2, -3, -3, -1, -1, -1, 0, -3, -1, 0, 1, 2, 1, 1, 0, -3, -1, 0, 1, 2, 1, 1, 1, -2, 0, 0, 1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, -1, -1, 0, -2, 0, 0, 0, 0, 0, -1, -1, -3, 0, 1, 1, 1, 1, 0, 1, -5, -2, 0, 1, 2, 2, 1, 2, -2, -1, -1, 0, 0, 1, 2, 3, 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 1, 0, -1, -1, 0, 1, -1, -1, -1, -1, -2, -2, -1, 0, -2, -2, -2, -2, -2, -1, 0, 1, 0, 0, 0, -1, 0, 1, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 1, 0, -1, -1, -1, 0, 0, 0, 1, 1, 1, 1, 1, -1, -4, -1, -1, 0, 1, 1, 1, 0, -3, -2, -1, 0, 0, 1, 2, 2, -2, -1, 0, 0, 0, 0, 2, 3, -1, -1, 0, 0, 0, 0, 1, 2, 0, 0, 0, -1, -2, -1, 1, 1, 0, 0, 0, -1, -2, -2, 0, 2, 1, 0, 0, -1, -2, -1, 1, 2, 2, 1, 0, 0, 0, -2, -3, -2, -3, 0, 0, 1, 0, -2, -2, -1, -1, 0, -1, 1, 1, -1, -1, 0, 0, 0, -1, 1, 1, -1, -1, 0, 0, 0, 1, 2, 1, -1, -1, 0, 1, 1, 2, 3, 2, 0, 0, 1, 2, -1, 0, 2, 1, 0, 0, 2, 3, -2, -1, 0, 0, -1, 0, 1, 2, 1, 1, 0, -1, -2, -2, -1, 1, 1, 1, 1, -1, -2, -2, 0, 2, 1, 1, 1, -1, -1, -1, 0, 2, 0, 0, 0, 0, 0, 0, 1, 2, -1, -1, -1, 0, 0, 0, 1, 2, -1, -2, -1, 1, 1, 1, 0, 0, -1, -2, -1, 1, 2, 2, 0, -1, -1, -2, -1, 2, 2, 2, 0, -1, -1, -1, -1, -2, -1, -1, 0, 1, 0, 0, -1, -1, -1, 0, 1, 2, 1, 0, 0, 0, 0, 1, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, -1, -1, -1, 1, 2, 1, 0, -1, -2, -2, -3, 2, 2, 1, 0, -2, -3, -4, -4, -4, -2, 1, 1, 1, 1, 0, 0, -2, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, -2, -2, -1, 0, 1, 2, 2, 1, -2, -2, -1, 1, 2, 1, 2, 1, -2, -2, -1, 1, 2, -1, 1, 1, -1, -1, -1, 0, 1, -2, 0, 1, 1, 0, -1, -1, 0, -2, 0, 2, 2, 1, -1, -1, 0, 1, 1, 0, 0, 0, 1, 0, 0, -2, -3, -3, -2, -2, -1, 0, 0, -3, -4, -3, -2, -1, 0, 0, 0, -1, -1, 0, 1, 2, 3, 2, 1, 0, 1, 2, 3, 3, 3, 2, 1, 1, 1, 1, 2, 1, 0, 0, -1, 0, 0, 0, 0, -1, -1, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1, 0, 0, -1, -1, 0, 2, 0, 0, 1, 0, -1, -1, 1, 1, -2, -1, 0, 1, 1, 1, 1, 1, -3, -3, 0, 2, 2, 1, 1, 0, -2, -2, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, -1, -1, 3, 1, -1, -3, -2, -1, 0, 1, 4, 2, -1, -3, -3, -1, 1, 2, 0, 0, 0, -1, -1, -1, -1, -1, 1, 2, 1, 0, 0, 0, -1, -1, 2, 3, 3, 2, 1, 0, -1, -1, 3, 4, 4, 2, 1, 0, -1, -2, 3, 3, 2, 1, 0, -1, -2, -2, 1, 1, 0, -1, -1, -2, -2, -3, 0, 0, 0, -1, -1, -2, -2, -2, -1, -1, -1, -1, -1, -2, -2, -1, 1, 2, 2, 2, 2, 1, 2, 2, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, -1, -2, 0, 0, 0, 0, 1, 0, -1, -4, 1, 0, 0, 0, 0, 0, -2, -5, 1, 0, 0, 0, 0, 0, -1, -4, 1, 0, -1, 0, 0, 0, -1, -3, 0, -1, -1, 0, 1, 1, 1, -1, -2, -1, 0, 0, -1, -1, -1, -2, -1, 0, 0, 0, -1, -1, -2, -2, 0, 1, 1, 0, -1, -1, -1, -2, 0, 1, 1, 0, 0, 0, -1, -1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 2, 2, 1, 1, 1, 0, 0, 1, 2, 2, 1, 1, 1, 0, -1, 0, 1, 1, 0, 4, 2, 1, 0, 0, 1, 1, 1, 4, 2, 1, 0, 0, 0, 0, 1, 3, 1, 0, 0, -1, -1, -1, 0, 1, 0, 0, -1, -1, -2, -1, 0, 0, 0, 0, 0, -1, -1, -1, 0, -1, -1, 0, 0, -1, -1, 0, 1, -2, -1, 0, -1, -1, 0, 0, 1, -2, -2, -1, -2, -1, 0, 0, 1, 0, 1, 1, 1, 2, 1, 0, -1, -1, -1, -1, 0, 0, -1, -2, -2, -1, 0, -1, 0, 0, -1, -2, -1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 2, 3, -1, 0, -1, -1, -1, -1, 0, 3, -1, 0, 0, -1, -1, -2, 0, 3, 0, 0, 0, 0, -1, -1, 1, 4, 2, 2, 0, 0, 0, 0, 0, 1, 1, 1, -1, -2, -1, -2, -1, 1, -1, -1, -2, -2, -2, -3, -2, 0, -1, 0, -1, -1, -1, -2, -1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 2, 2, 0, 0, 1, 0, 0, 1, 2, 2, 0, 0, 0, 0, -1, -1, 2, 2, 0, 0, 1, 0, -1, -1, -1, 0, 1, 1, 0, -1, -1, -1, 1, 2, 3, 2, 1, 0, 0, 0, 0, 1, 1, 1, 0, -1, 0, 0, -2, -2, -1, 0, 1, 0, 0, 0, -2, -2, -1, 2, 2, 2, 1, 0, -2, -1, 0, 1, 1, 0, 0, -1, -1, -1, 0, 0, -1, -2, -1, -2, 0, 1, 1, 1, 0, 0, 1, 1, -3, -3, -3, -2, -1, -1, -2, -2, -1, -1, 0, 1, 2, 1, 0, 0, 1, 1, 1, 2, 2, 1, 0, 0, 1, 1, 1, 1, 1, 0, -1, 1, 1, 0, -1, -1, 0, 0, -1, 1, 0, -1, -1, -1, 0, -1, -1, 1, 1, 0, -1, 0, 0, -1, 0, 2, 2, 0, -1, 0, 0, 0, 0, 2, 1, 0, -2, -1, 0, 1, 1, 0, 2, 0, -1, -1, 0, 1, 1, 0, 1, 0, -2, -1, 0, 1, 0, -1, 1, 0, -1, -1, 0, 1, 0, -1, 0, 1, 1, 0, 1, 1, 0, 0, -2, 1, 2, 1, 0, 0, 0, 1, -5, 0, 2, 1, 0, -1, 0, 1, -6, -1, 2, 1, 0, -1, 0, 0, 5, 3, 0, -1, -2, -1, -1, -1, 1, 1, 0, -1, -1, 0, -1, -1, -1, 0, 1, 1, 2, 2, 1, 0, -2, -1, 0, 1, 2, 1, 1, 1, -2, -1, -1, -1, 0, -1, 0, 1, 0, 1, 0, 0, -1, -1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, -3, -2, 0, 1, 1, 0, 0, -1, -1, 0, 1, 0, -1, 0, 2, 3, -1, 0, 0, -2, -4, -2, -1, 0, 0, 1, 1, 0, -2, -1, 0, -1, 1, 2, 3, 1, 0, 1, 1, 0, -1, 0, 1, 1, 1, 1, 1, 0, -2, -3, -2, 0, 0, 0, 1, 0, -1, -2, -2, 0, 1, 0, 0, -1, 3, 1, 0, 0, 1, 0, -1, -1, -2, -1, 0, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 1, 1, 1, 1, 0, -2, -3, -1, 1, 0, 0, 0, 1, -1, -3, -1, 1, 1, 0, -1, 3, 1, -1, 1, 2, 2, 0, -1, 3, 1, 0, 1, 2, 1, 1, 0, 0, -2, -2, -1, -1, 0, 0, 0, 1, 0, -1, -1, 1, 2, 1, 0, 0, -1, -2, -1, 1, 2, 2, 1, -1, -1, -1, 0, 0, 1, 2, 0, -2, 0, 0, 0, 0, 0, 1, -1, -1, 0, 1, 0, -1, -1, -1, -1, 0, 1, 1, 2, 0, -2, -1, 0, 1, 2, 2, 2, 1, -1, -1, 0, 0, 1, 1, 1, 0, -2, -2, -1, 0, 0, -1, -1, -1, -1, -2, -2, 0, 0, -1, 0, 1, 2, 2, 1, 0, 0, -1, -1, 0, 1, 2, 2, 1, 1, -1, -2, -1, -1, -1, -1, 2, 2, 1, 0, 0, -1, -2, -2, 1, 2, 2, 1, 0, 0, -2, -2, 0, 0, 0, 0, 1, 1, 0, -1, 0, -1, -1, -1, 2, 3, 2, 1, 0, -2, 1, 2, -1, 0, 0, 1, -1, -2, 2, 3, -1, 0, 0, 0, 0, -2, 2, 3, -1, -1, 0, 0, 0, -1, 3, 2, -2, 0, 1, 0, 0, -1, 3, 1, -2, 0, 1, 0, 0, -1, 2, 1, -1, 1, 0, -1, 0, 0, 1, -1, -2, 0, 0, -1, 1, 0, 0, -2, -2, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -2, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 3, 1, 0, 0, -1, 0, 0, 1, 2, 0, -1, -1, -2, -1, 0, 1, 2, -2, -2, -2, -2, -1, 0, 1, 1, -1, -1, -1, -1, 0, 0, 0, -1, 2, 2, 2, 0, -1, -1, -2, -4, -1, -2, -1, -1, 0, 1, 2, 3, -1, -1, -1, -1, 0, 1, 2, 3, 1, 0, -1, 0, -1, 0, 1, 2, 1, 0, 0, 0, -1, 0, 2, 2, 1, 0, -1, -1, -2, 0, 1, 2, 0, -2, -2, -2, -3, -1, 0, 1, 0, -2, -2, -2, -2, -1, 1, 1, 0, 0, 0, 0, 0, 1, 2, 2 }; /* list of codebooks for intra-coded vectors */ const int8_t* const ff_svq1_intra_codebooks[6] = { svq1_intra_codebook_4x2, svq1_intra_codebook_4x4, svq1_intra_codebook_8x4, svq1_intra_codebook_8x8, NULL, NULL, }; #endif /* AVCODEC_SVQ1_CB_H */
123linslouis-android-video-cutter
jni/libavcodec/svq1_cb.h
C
asf20
99,155
/* * AAC decoder * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org ) * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com ) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC decoder * @author Oded Shimon ( ods15 ods15 dyndns org ) * @author Maxim Gavrilov ( maxim.gavrilov gmail com ) */ /* * supported tools * * Support? Name * N (code in SoC repo) gain control * Y block switching * Y window shapes - standard * N window shapes - Low Delay * Y filterbank - standard * N (code in SoC repo) filterbank - Scalable Sample Rate * Y Temporal Noise Shaping * N (code in SoC repo) Long Term Prediction * Y intensity stereo * Y channel coupling * Y frequency domain prediction * Y Perceptual Noise Substitution * Y Mid/Side stereo * N Scalable Inverse AAC Quantization * N Frequency Selective Switch * N upsampling filter * Y quantization & coding - AAC * N quantization & coding - TwinVQ * N quantization & coding - BSAC * N AAC Error Resilience tools * N Error Resilience payload syntax * N Error Protection tool * N CELP * N Silence Compression * N HVXC * N HVXC 4kbits/s VR * N Structured Audio tools * N Structured Audio Sample Bank Format * N MIDI * N Harmonic and Individual Lines plus Noise * N Text-To-Speech Interface * Y Spectral Band Replication * Y (not in this code) Layer-1 * Y (not in this code) Layer-2 * Y (not in this code) Layer-3 * N SinuSoidal Coding (Transient, Sinusoid, Noise) * Y Parametric Stereo * N Direct Stream Transfer * * Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication. * - HE AAC v2 comprises LC AAC with Spectral Band Replication and Parametric Stereo. */ #include "avcodec.h" #include "internal.h" #include "get_bits.h" #include "dsputil.h" #include "fft.h" #include "lpc.h" #include "aac.h" #include "aactab.h" #include "aacdectab.h" #include "cbrt_tablegen.h" #include "sbr.h" #include "aacsbr.h" #include "mpeg4audio.h" #include "aac_parser.h" #include <assert.h> #include <errno.h> #include <math.h> #include <string.h> #if ARCH_ARM # include "arm/aac.h" #endif union float754 { float f; uint32_t i; }; static VLC vlc_scalefactors; static VLC vlc_spectral[11]; static const char overread_err[] = "Input buffer exhausted before END element found\n"; static ChannelElement *get_che(AACContext *ac, int type, int elem_id) { /* Some buggy encoders appear to set all elem_ids to zero and rely on channels always occurring in the same order. This is expressly forbidden by the spec but we will try to work around it. */ int err_printed = 0; while (ac->tags_seen_this_frame[type][elem_id] && elem_id < MAX_ELEM_ID) { if (ac->output_configured < OC_LOCKED && !err_printed) { av_log(ac->avctx, AV_LOG_WARNING, "Duplicate channel tag found, attempting to remap.\n"); err_printed = 1; } elem_id++; } if (elem_id == MAX_ELEM_ID) return NULL; ac->tags_seen_this_frame[type][elem_id] = 1; if (ac->tag_che_map[type][elem_id]) { return ac->tag_che_map[type][elem_id]; } if (ac->tags_mapped >= tags_per_config[ac->m4ac.chan_config]) { return NULL; } switch (ac->m4ac.chan_config) { case 7: if (ac->tags_mapped == 3 && type == TYPE_CPE) { ac->tags_mapped++; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2]; } case 6: /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1] instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */ if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) { ac->tags_mapped++; return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0]; } case 5: if (ac->tags_mapped == 2 && type == TYPE_CPE) { ac->tags_mapped++; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1]; } case 4: if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) { ac->tags_mapped++; return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1]; } case 3: case 2: if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) { ac->tags_mapped++; return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0]; } else if (ac->m4ac.chan_config == 2) { return NULL; } case 1: if (!ac->tags_mapped && type == TYPE_SCE) { ac->tags_mapped++; return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0]; } default: return NULL; } } /** * Check for the channel element in the current channel position configuration. * If it exists, make sure the appropriate element is allocated and map the * channel order to match the internal FFmpeg channel layout. * * @param che_pos current channel position configuration * @param type channel element type * @param id channel element id * @param channels count of the number of channels in the configuration * * @return Returns error status. 0 - OK, !0 - error */ static av_cold int che_configure(AACContext *ac, enum ChannelPosition che_pos[4][MAX_ELEM_ID], int type, int id, int *channels) { if (che_pos[type][id]) { if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement)))) return AVERROR(ENOMEM); ff_aac_sbr_ctx_init(&ac->che[type][id]->sbr); if (type != TYPE_CCE) { ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret; if (type == TYPE_CPE || (type == TYPE_SCE && ac->m4ac.ps == 1)) { ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret; } } } else { if (ac->che[type][id]) ff_aac_sbr_ctx_close(&ac->che[type][id]->sbr); av_freep(&ac->che[type][id]); } return 0; } /** * Configure output channel order based on the current program configuration element. * * @param che_pos current channel position configuration * @param new_che_pos New channel position configuration - we only do something if it differs from the current one. * * @return Returns error status. 0 - OK, !0 - error */ static av_cold int output_configure(AACContext *ac, enum ChannelPosition che_pos[4][MAX_ELEM_ID], enum ChannelPosition new_che_pos[4][MAX_ELEM_ID], int channel_config, enum OCStatus oc_type) { AVCodecContext *avctx = ac->avctx; int i, type, channels = 0, ret; if (new_che_pos != che_pos) memcpy(che_pos, new_che_pos, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0])); if (channel_config) { for (i = 0; i < tags_per_config[channel_config]; i++) { if ((ret = che_configure(ac, che_pos, aac_channel_layout_map[channel_config - 1][i][0], aac_channel_layout_map[channel_config - 1][i][1], &channels))) return ret; } memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0])); ac->tags_mapped = 0; avctx->channel_layout = aac_channel_layout[channel_config - 1]; } else { /* Allocate or free elements depending on if they are in the * current program configuration. * * Set up default 1:1 output mapping. * * For a 5.1 stream the output order will be: * [ Center ] [ Front Left ] [ Front Right ] [ LFE ] [ Surround Left ] [ Surround Right ] */ for (i = 0; i < MAX_ELEM_ID; i++) { for (type = 0; type < 4; type++) { if ((ret = che_configure(ac, che_pos, type, i, &channels))) return ret; } } memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0])); ac->tags_mapped = 4 * MAX_ELEM_ID; avctx->channel_layout = 0; } avctx->channels = channels; ac->output_configured = oc_type; return 0; } /** * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit. * * @param cpe_map Stereo (Channel Pair Element) map, NULL if stereo bit is not present. * @param sce_map mono (Single Channel Element) map * @param type speaker type/position for these channels */ static void decode_channel_map(enum ChannelPosition *cpe_map, enum ChannelPosition *sce_map, enum ChannelPosition type, GetBitContext *gb, int n) { while (n--) { enum ChannelPosition *map = cpe_map && get_bits1(gb) ? cpe_map : sce_map; // stereo or mono map map[get_bits(gb, 4)] = type; } } /** * Decode program configuration element; reference: table 4.2. * * @param new_che_pos New channel position configuration - we only do something if it differs from the current one. * * @return Returns error status. 0 - OK, !0 - error */ static int decode_pce(AACContext *ac, enum ChannelPosition new_che_pos[4][MAX_ELEM_ID], GetBitContext *gb) { int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index; int comment_len; skip_bits(gb, 2); // object_type sampling_index = get_bits(gb, 4); if (ac->m4ac.sampling_index != sampling_index) av_log(ac->avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n"); num_front = get_bits(gb, 4); num_side = get_bits(gb, 4); num_back = get_bits(gb, 4); num_lfe = get_bits(gb, 2); num_assoc_data = get_bits(gb, 3); num_cc = get_bits(gb, 4); if (get_bits1(gb)) skip_bits(gb, 4); // mono_mixdown_tag if (get_bits1(gb)) skip_bits(gb, 4); // stereo_mixdown_tag if (get_bits1(gb)) skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front); decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side ); decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back ); decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe ); skip_bits_long(gb, 4 * num_assoc_data); decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc ); align_get_bits(gb); /* comment field, first byte is length */ comment_len = get_bits(gb, 8) * 8; if (get_bits_left(gb) < comment_len) { av_log(ac->avctx, AV_LOG_ERROR, overread_err); return -1; } skip_bits_long(gb, comment_len); return 0; } /** * Set up channel positions based on a default channel configuration * as specified in table 1.17. * * @param new_che_pos New channel position configuration - we only do something if it differs from the current one. * * @return Returns error status. 0 - OK, !0 - error */ static av_cold int set_default_channel_config(AACContext *ac, enum ChannelPosition new_che_pos[4][MAX_ELEM_ID], int channel_config) { if (channel_config < 1 || channel_config > 7) { av_log(ac->avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n", channel_config); return -1; } /* default channel configurations: * * 1ch : front center (mono) * 2ch : L + R (stereo) * 3ch : front center + L + R * 4ch : front center + L + R + back center * 5ch : front center + L + R + back stereo * 6ch : front center + L + R + back stereo + LFE * 7ch : front center + L + R + outer front left + outer front right + back stereo + LFE */ if (channel_config != 2) new_che_pos[TYPE_SCE][0] = AAC_CHANNEL_FRONT; // front center (or mono) if (channel_config > 1) new_che_pos[TYPE_CPE][0] = AAC_CHANNEL_FRONT; // L + R (or stereo) if (channel_config == 4) new_che_pos[TYPE_SCE][1] = AAC_CHANNEL_BACK; // back center if (channel_config > 4) new_che_pos[TYPE_CPE][(channel_config == 7) + 1] = AAC_CHANNEL_BACK; // back stereo if (channel_config > 5) new_che_pos[TYPE_LFE][0] = AAC_CHANNEL_LFE; // LFE if (channel_config == 7) new_che_pos[TYPE_CPE][1] = AAC_CHANNEL_FRONT; // outer front left + outer front right return 0; } /** * Decode GA "General Audio" specific configuration; reference: table 4.1. * * @return Returns error status. 0 - OK, !0 - error */ static int decode_ga_specific_config(AACContext *ac, GetBitContext *gb, int channel_config) { enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]; int extension_flag, ret; if (get_bits1(gb)) { // frameLengthFlag av_log_missing_feature(ac->avctx, "960/120 MDCT window is", 1); return -1; } if (get_bits1(gb)) // dependsOnCoreCoder skip_bits(gb, 14); // coreCoderDelay extension_flag = get_bits1(gb); if (ac->m4ac.object_type == AOT_AAC_SCALABLE || ac->m4ac.object_type == AOT_ER_AAC_SCALABLE) skip_bits(gb, 3); // layerNr memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0])); if (channel_config == 0) { skip_bits(gb, 4); // element_instance_tag if ((ret = decode_pce(ac, new_che_pos, gb))) return ret; } else { if ((ret = set_default_channel_config(ac, new_che_pos, channel_config))) return ret; } if ((ret = output_configure(ac, ac->che_pos, new_che_pos, channel_config, OC_GLOBAL_HDR))) return ret; if (extension_flag) { switch (ac->m4ac.object_type) { case AOT_ER_BSAC: skip_bits(gb, 5); // numOfSubFrame skip_bits(gb, 11); // layer_length break; case AOT_ER_AAC_LC: case AOT_ER_AAC_LTP: case AOT_ER_AAC_SCALABLE: case AOT_ER_AAC_LD: skip_bits(gb, 3); /* aacSectionDataResilienceFlag * aacScalefactorDataResilienceFlag * aacSpectralDataResilienceFlag */ break; } skip_bits1(gb); // extensionFlag3 (TBD in version 3) } return 0; } /** * Decode audio specific configuration; reference: table 1.13. * * @param data pointer to AVCodecContext extradata * @param data_size size of AVCCodecContext extradata * * @return Returns error status. 0 - OK, !0 - error */ static int decode_audio_specific_config(AACContext *ac, void *data, int data_size) { GetBitContext gb; int i; init_get_bits(&gb, data, data_size * 8); if ((i = ff_mpeg4audio_get_config(&ac->m4ac, data, data_size)) < 0) return -1; if (ac->m4ac.sampling_index > 12) { av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index); return -1; } if (ac->m4ac.sbr == 1 && ac->m4ac.ps == -1) ac->m4ac.ps = 1; skip_bits_long(&gb, i); switch (ac->m4ac.object_type) { case AOT_AAC_MAIN: case AOT_AAC_LC: if (decode_ga_specific_config(ac, &gb, ac->m4ac.chan_config)) return -1; break; default: av_log(ac->avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n", ac->m4ac.sbr == 1? "SBR+" : "", ac->m4ac.object_type); return -1; } return 0; } /** * linear congruential pseudorandom number generator * * @param previous_val pointer to the current state of the generator * * @return Returns a 32-bit pseudorandom integer */ static av_always_inline int lcg_random(int previous_val) { return previous_val * 1664525 + 1013904223; } static av_always_inline void reset_predict_state(PredictorState *ps) { ps->r0 = 0.0f; ps->r1 = 0.0f; ps->cor0 = 0.0f; ps->cor1 = 0.0f; ps->var0 = 1.0f; ps->var1 = 1.0f; } static void reset_all_predictors(PredictorState *ps) { int i; for (i = 0; i < MAX_PREDICTORS; i++) reset_predict_state(&ps[i]); } static void reset_predictor_group(PredictorState *ps, int group_num) { int i; for (i = group_num - 1; i < MAX_PREDICTORS; i += 30) reset_predict_state(&ps[i]); } #define AAC_INIT_VLC_STATIC(num, size) \ INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \ ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \ ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \ size); static av_cold int aac_decode_init(AVCodecContext *avctx) { AACContext *ac = avctx->priv_data; ac->avctx = avctx; ac->m4ac.sample_rate = avctx->sample_rate; if (avctx->extradata_size > 0) { if (decode_audio_specific_config(ac, avctx->extradata, avctx->extradata_size)) return -1; } avctx->sample_fmt = SAMPLE_FMT_S16; AAC_INIT_VLC_STATIC( 0, 304); AAC_INIT_VLC_STATIC( 1, 270); AAC_INIT_VLC_STATIC( 2, 550); AAC_INIT_VLC_STATIC( 3, 300); AAC_INIT_VLC_STATIC( 4, 328); AAC_INIT_VLC_STATIC( 5, 294); AAC_INIT_VLC_STATIC( 6, 306); AAC_INIT_VLC_STATIC( 7, 268); AAC_INIT_VLC_STATIC( 8, 510); AAC_INIT_VLC_STATIC( 9, 366); AAC_INIT_VLC_STATIC(10, 462); ff_aac_sbr_init(); dsputil_init(&ac->dsp, avctx); ac->random_state = 0x1f2e3d4c; // -1024 - Compensate wrong IMDCT method. // 32768 - Required to scale values to the correct range for the bias method // for float to int16 conversion. if (ac->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { ac->add_bias = 385.0f; ac->sf_scale = 1. / (-1024. * 32768.); ac->sf_offset = 0; } else { ac->add_bias = 0.0f; ac->sf_scale = 1. / -1024.; ac->sf_offset = 60; } ff_aac_tableinit(); INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code), ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]), ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), 352); ff_mdct_init(&ac->mdct, 11, 1, 1.0); ff_mdct_init(&ac->mdct_small, 8, 1, 1.0); // window initialization ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_init_ff_sine_windows(10); ff_init_ff_sine_windows( 7); cbrt_tableinit(); return 0; } /** * Skip data_stream_element; reference: table 4.10. */ static int skip_data_stream_element(AACContext *ac, GetBitContext *gb) { int byte_align = get_bits1(gb); int count = get_bits(gb, 8); if (count == 255) count += get_bits(gb, 8); if (byte_align) align_get_bits(gb); if (get_bits_left(gb) < 8 * count) { av_log(ac->avctx, AV_LOG_ERROR, overread_err); return -1; } skip_bits_long(gb, 8 * count); return 0; } static int decode_prediction(AACContext *ac, IndividualChannelStream *ics, GetBitContext *gb) { int sfb; if (get_bits1(gb)) { ics->predictor_reset_group = get_bits(gb, 5); if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) { av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n"); return -1; } } for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->m4ac.sampling_index]); sfb++) { ics->prediction_used[sfb] = get_bits1(gb); } return 0; } /** * Decode Individual Channel Stream info; reference: table 4.6. * * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information. */ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics, GetBitContext *gb, int common_window) { if (get_bits1(gb)) { av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n"); memset(ics, 0, sizeof(IndividualChannelStream)); return -1; } ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = get_bits(gb, 2); ics->use_kb_window[1] = ics->use_kb_window[0]; ics->use_kb_window[0] = get_bits1(gb); ics->num_window_groups = 1; ics->group_len[0] = 1; if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { int i; ics->max_sfb = get_bits(gb, 4); for (i = 0; i < 7; i++) { if (get_bits1(gb)) { ics->group_len[ics->num_window_groups - 1]++; } else { ics->num_window_groups++; ics->group_len[ics->num_window_groups - 1] = 1; } } ics->num_windows = 8; ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index]; ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index]; ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index]; ics->predictor_present = 0; } else { ics->max_sfb = get_bits(gb, 6); ics->num_windows = 1; ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index]; ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index]; ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index]; ics->predictor_present = get_bits1(gb); ics->predictor_reset_group = 0; if (ics->predictor_present) { if (ac->m4ac.object_type == AOT_AAC_MAIN) { if (decode_prediction(ac, ics, gb)) { memset(ics, 0, sizeof(IndividualChannelStream)); return -1; } } else if (ac->m4ac.object_type == AOT_AAC_LC) { av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n"); memset(ics, 0, sizeof(IndividualChannelStream)); return -1; } else { av_log_missing_feature(ac->avctx, "Predictor bit set but LTP is", 1); memset(ics, 0, sizeof(IndividualChannelStream)); return -1; } } } if (ics->max_sfb > ics->num_swb) { av_log(ac->avctx, AV_LOG_ERROR, "Number of scalefactor bands in group (%d) exceeds limit (%d).\n", ics->max_sfb, ics->num_swb); memset(ics, 0, sizeof(IndividualChannelStream)); return -1; } return 0; } /** * Decode band types (section_data payload); reference: table 4.46. * * @param band_type array of the used band type * @param band_type_run_end array of the last scalefactor band of a band type run * * @return Returns error status. 0 - OK, !0 - error */ static int decode_band_types(AACContext *ac, enum BandType band_type[120], int band_type_run_end[120], GetBitContext *gb, IndividualChannelStream *ics) { int g, idx = 0; const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5; for (g = 0; g < ics->num_window_groups; g++) { int k = 0; while (k < ics->max_sfb) { uint8_t sect_end = k; int sect_len_incr; int sect_band_type = get_bits(gb, 4); if (sect_band_type == 12) { av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n"); return -1; } while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1) sect_end += sect_len_incr; sect_end += sect_len_incr; if (get_bits_left(gb) < 0) { av_log(ac->avctx, AV_LOG_ERROR, overread_err); return -1; } if (sect_end > ics->max_sfb) { av_log(ac->avctx, AV_LOG_ERROR, "Number of bands (%d) exceeds limit (%d).\n", sect_end, ics->max_sfb); return -1; } for (; k < sect_end; k++) { band_type [idx] = sect_band_type; band_type_run_end[idx++] = sect_end; } } } return 0; } /** * Decode scalefactors; reference: table 4.47. * * @param global_gain first scalefactor value as scalefactors are differentially coded * @param band_type array of the used band type * @param band_type_run_end array of the last scalefactor band of a band type run * @param sf array of scalefactors or intensity stereo positions * * @return Returns error status. 0 - OK, !0 - error */ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, unsigned int global_gain, IndividualChannelStream *ics, enum BandType band_type[120], int band_type_run_end[120]) { const int sf_offset = ac->sf_offset + (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE ? 12 : 0); int g, i, idx = 0; int offset[3] = { global_gain, global_gain - 90, 100 }; int noise_flag = 1; static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" }; for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb;) { int run_end = band_type_run_end[idx]; if (band_type[idx] == ZERO_BT) { for (; i < run_end; i++, idx++) sf[idx] = 0.; } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) { for (; i < run_end; i++, idx++) { offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; if (offset[2] > 255U) { av_log(ac->avctx, AV_LOG_ERROR, "%s (%d) out of range.\n", sf_str[2], offset[2]); return -1; } sf[idx] = ff_aac_pow2sf_tab[-offset[2] + 300]; } } else if (band_type[idx] == NOISE_BT) { for (; i < run_end; i++, idx++) { if (noise_flag-- > 0) offset[1] += get_bits(gb, 9) - 256; else offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; if (offset[1] > 255U) { av_log(ac->avctx, AV_LOG_ERROR, "%s (%d) out of range.\n", sf_str[1], offset[1]); return -1; } sf[idx] = -ff_aac_pow2sf_tab[offset[1] + sf_offset + 100]; } } else { for (; i < run_end; i++, idx++) { offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; if (offset[0] > 255U) { av_log(ac->avctx, AV_LOG_ERROR, "%s (%d) out of range.\n", sf_str[0], offset[0]); return -1; } sf[idx] = -ff_aac_pow2sf_tab[ offset[0] + sf_offset]; } } } } return 0; } /** * Decode pulse data; reference: table 4.7. */ static int decode_pulses(Pulse *pulse, GetBitContext *gb, const uint16_t *swb_offset, int num_swb) { int i, pulse_swb; pulse->num_pulse = get_bits(gb, 2) + 1; pulse_swb = get_bits(gb, 6); if (pulse_swb >= num_swb) return -1; pulse->pos[0] = swb_offset[pulse_swb]; pulse->pos[0] += get_bits(gb, 5); if (pulse->pos[0] > 1023) return -1; pulse->amp[0] = get_bits(gb, 4); for (i = 1; i < pulse->num_pulse; i++) { pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1]; if (pulse->pos[i] > 1023) return -1; pulse->amp[i] = get_bits(gb, 4); } return 0; } /** * Decode Temporal Noise Shaping data; reference: table 4.48. * * @return Returns error status. 0 - OK, !0 - error */ static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns, GetBitContext *gb, const IndividualChannelStream *ics) { int w, filt, i, coef_len, coef_res, coef_compress; const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE; const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12; for (w = 0; w < ics->num_windows; w++) { if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) { coef_res = get_bits1(gb); for (filt = 0; filt < tns->n_filt[w]; filt++) { int tmp2_idx; tns->length[w][filt] = get_bits(gb, 6 - 2 * is8); if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) { av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n", tns->order[w][filt], tns_max_order); tns->order[w][filt] = 0; return -1; } if (tns->order[w][filt]) { tns->direction[w][filt] = get_bits1(gb); coef_compress = get_bits1(gb); coef_len = coef_res + 3 - coef_compress; tmp2_idx = 2 * coef_compress + coef_res; for (i = 0; i < tns->order[w][filt]; i++) tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)]; } } } } return 0; } /** * Decode Mid/Side data; reference: table 4.54. * * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s; * [1] mask is decoded from bitstream; [2] mask is all 1s; * [3] reserved for scalable AAC */ static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb, int ms_present) { int idx; if (ms_present == 1) { for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++) cpe->ms_mask[idx] = get_bits1(gb); } else if (ms_present == 2) { memset(cpe->ms_mask, 1, cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb * sizeof(cpe->ms_mask[0])); } } #ifndef VMUL2 static inline float *VMUL2(float *dst, const float *v, unsigned idx, const float *scale) { float s = *scale; *dst++ = v[idx & 15] * s; *dst++ = v[idx>>4 & 15] * s; return dst; } #endif #ifndef VMUL4 static inline float *VMUL4(float *dst, const float *v, unsigned idx, const float *scale) { float s = *scale; *dst++ = v[idx & 3] * s; *dst++ = v[idx>>2 & 3] * s; *dst++ = v[idx>>4 & 3] * s; *dst++ = v[idx>>6 & 3] * s; return dst; } #endif #ifndef VMUL2S static inline float *VMUL2S(float *dst, const float *v, unsigned idx, unsigned sign, const float *scale) { union float754 s0, s1; s0.f = s1.f = *scale; s0.i ^= sign >> 1 << 31; s1.i ^= sign << 31; *dst++ = v[idx & 15] * s0.f; *dst++ = v[idx>>4 & 15] * s1.f; return dst; } #endif #ifndef VMUL4S static inline float *VMUL4S(float *dst, const float *v, unsigned idx, unsigned sign, const float *scale) { unsigned nz = idx >> 12; union float754 s = { .f = *scale }; union float754 t; t.i = s.i ^ (sign & 1<<31); *dst++ = v[idx & 3] * t.f; sign <<= nz & 1; nz >>= 1; t.i = s.i ^ (sign & 1<<31); *dst++ = v[idx>>2 & 3] * t.f; sign <<= nz & 1; nz >>= 1; t.i = s.i ^ (sign & 1<<31); *dst++ = v[idx>>4 & 3] * t.f; sign <<= nz & 1; nz >>= 1; t.i = s.i ^ (sign & 1<<31); *dst++ = v[idx>>6 & 3] * t.f; return dst; } #endif /** * Decode spectral data; reference: table 4.50. * Dequantize and scale spectral data; reference: 4.6.3.3. * * @param coef array of dequantized, scaled spectral data * @param sf array of scalefactors or intensity stereo positions * @param pulse_present set if pulses are present * @param pulse pointer to pulse data struct * @param band_type array of the used band type * * @return Returns error status. 0 - OK, !0 - error */ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024], GetBitContext *gb, const float sf[120], int pulse_present, const Pulse *pulse, const IndividualChannelStream *ics, enum BandType band_type[120]) { int i, k, g, idx = 0; const int c = 1024 / ics->num_windows; const uint16_t *offsets = ics->swb_offset; float *coef_base = coef; int err_idx; for (g = 0; g < ics->num_windows; g++) memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb])); for (g = 0; g < ics->num_window_groups; g++) { unsigned g_len = ics->group_len[g]; for (i = 0; i < ics->max_sfb; i++, idx++) { const unsigned cbt_m1 = band_type[idx] - 1; float *cfo = coef + offsets[i]; int off_len = offsets[i + 1] - offsets[i]; int group; if (cbt_m1 >= INTENSITY_BT2 - 1) { for (group = 0; group < g_len; group++, cfo+=128) { memset(cfo, 0, off_len * sizeof(float)); } } else if (cbt_m1 == NOISE_BT - 1) { for (group = 0; group < g_len; group++, cfo+=128) { float scale; float band_energy; for (k = 0; k < off_len; k++) { ac->random_state = lcg_random(ac->random_state); cfo[k] = ac->random_state; } band_energy = ac->dsp.scalarproduct_float(cfo, cfo, off_len); scale = sf[idx] / sqrtf(band_energy); ac->dsp.vector_fmul_scalar(cfo, cfo, scale, off_len); } } else { const float *vq = ff_aac_codebook_vector_vals[cbt_m1]; const uint16_t *cb_vector_idx = ff_aac_codebook_vector_idx[cbt_m1]; VLC_TYPE (*vlc_tab)[2] = vlc_spectral[cbt_m1].table; const int cb_size = ff_aac_spectral_sizes[cbt_m1]; OPEN_READER(re, gb); switch (cbt_m1 >> 1) { case 0: for (group = 0; group < g_len; group++, cfo+=128) { float *cf = cfo; int len = off_len; do { int code; unsigned cb_idx; UPDATE_CACHE(re, gb); GET_VLC(code, re, gb, vlc_tab, 8, 2); if (code >= cb_size) { err_idx = code; goto err_cb_overflow; } cb_idx = cb_vector_idx[code]; cf = VMUL4(cf, vq, cb_idx, sf + idx); } while (len -= 4); } break; case 1: for (group = 0; group < g_len; group++, cfo+=128) { float *cf = cfo; int len = off_len; do { int code; unsigned nnz; unsigned cb_idx; uint32_t bits; UPDATE_CACHE(re, gb); GET_VLC(code, re, gb, vlc_tab, 8, 2); if (code >= cb_size) { err_idx = code; goto err_cb_overflow; } #if MIN_CACHE_BITS < 20 UPDATE_CACHE(re, gb); #endif cb_idx = cb_vector_idx[code]; nnz = cb_idx >> 8 & 15; bits = SHOW_UBITS(re, gb, nnz) << (32-nnz); LAST_SKIP_BITS(re, gb, nnz); cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx); } while (len -= 4); } break; case 2: for (group = 0; group < g_len; group++, cfo+=128) { float *cf = cfo; int len = off_len; do { int code; unsigned cb_idx; UPDATE_CACHE(re, gb); GET_VLC(code, re, gb, vlc_tab, 8, 2); if (code >= cb_size) { err_idx = code; goto err_cb_overflow; } cb_idx = cb_vector_idx[code]; cf = VMUL2(cf, vq, cb_idx, sf + idx); } while (len -= 2); } break; case 3: case 4: for (group = 0; group < g_len; group++, cfo+=128) { float *cf = cfo; int len = off_len; do { int code; unsigned nnz; unsigned cb_idx; unsigned sign; UPDATE_CACHE(re, gb); GET_VLC(code, re, gb, vlc_tab, 8, 2); if (code >= cb_size) { err_idx = code; goto err_cb_overflow; } cb_idx = cb_vector_idx[code]; nnz = cb_idx >> 8 & 15; sign = SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12); LAST_SKIP_BITS(re, gb, nnz); cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx); } while (len -= 2); } break; default: for (group = 0; group < g_len; group++, cfo+=128) { float *cf = cfo; uint32_t *icf = (uint32_t *) cf; int len = off_len; do { int code; unsigned nzt, nnz; unsigned cb_idx; uint32_t bits; int j; UPDATE_CACHE(re, gb); GET_VLC(code, re, gb, vlc_tab, 8, 2); if (!code) { *icf++ = 0; *icf++ = 0; continue; } if (code >= cb_size) { err_idx = code; goto err_cb_overflow; } cb_idx = cb_vector_idx[code]; nnz = cb_idx >> 12; nzt = cb_idx >> 8; bits = SHOW_UBITS(re, gb, nnz) << (32-nnz); LAST_SKIP_BITS(re, gb, nnz); for (j = 0; j < 2; j++) { if (nzt & 1<<j) { uint32_t b; int n; /* The total length of escape_sequence must be < 22 bits according to the specification (i.e. max is 111111110xxxxxxxxxxxx). */ UPDATE_CACHE(re, gb); b = GET_CACHE(re, gb); b = 31 - av_log2(~b); if (b > 8) { av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n"); return -1; } #if MIN_CACHE_BITS < 21 LAST_SKIP_BITS(re, gb, b + 1); UPDATE_CACHE(re, gb); #else SKIP_BITS(re, gb, b + 1); #endif b += 4; n = (1 << b) + SHOW_UBITS(re, gb, b); LAST_SKIP_BITS(re, gb, b); *icf++ = cbrt_tab[n] | (bits & 1<<31); bits <<= 1; } else { unsigned v = ((const uint32_t*)vq)[cb_idx & 15]; *icf++ = (bits & 1<<31) | v; bits <<= !!v; } cb_idx >>= 4; } } while (len -= 2); ac->dsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len); } } CLOSE_READER(re, gb); } } coef += g_len << 7; } if (pulse_present) { idx = 0; for (i = 0; i < pulse->num_pulse; i++) { float co = coef_base[ pulse->pos[i] ]; while (offsets[idx + 1] <= pulse->pos[i]) idx++; if (band_type[idx] != NOISE_BT && sf[idx]) { float ico = -pulse->amp[i]; if (co) { co /= sf[idx]; ico = co / sqrtf(sqrtf(fabsf(co))) + (co > 0 ? -ico : ico); } coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico * sf[idx]; } } } return 0; err_cb_overflow: av_log(ac->avctx, AV_LOG_ERROR, "Read beyond end of ff_aac_codebook_vectors[%d][]. index %d >= %d\n", band_type[idx], err_idx, ff_aac_spectral_sizes[band_type[idx]]); return -1; } static av_always_inline float flt16_round(float pf) { union float754 tmp; tmp.f = pf; tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U; return tmp.f; } static av_always_inline float flt16_even(float pf) { union float754 tmp; tmp.f = pf; tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U; return tmp.f; } static av_always_inline float flt16_trunc(float pf) { union float754 pun; pun.f = pf; pun.i &= 0xFFFF0000U; return pun.f; } static av_always_inline void predict(AACContext *ac, PredictorState *ps, float *coef, int output_enable) { const float a = 0.953125; // 61.0 / 64 const float alpha = 0.90625; // 29.0 / 32 float e0, e1; float pv; float k1, k2; k1 = ps->var0 > 1 ? ps->cor0 * flt16_even(a / ps->var0) : 0; k2 = ps->var1 > 1 ? ps->cor1 * flt16_even(a / ps->var1) : 0; pv = flt16_round(k1 * ps->r0 + k2 * ps->r1); if (output_enable) *coef += pv * ac->sf_scale; e0 = *coef / ac->sf_scale; e1 = e0 - k1 * ps->r0; ps->cor1 = flt16_trunc(alpha * ps->cor1 + ps->r1 * e1); ps->var1 = flt16_trunc(alpha * ps->var1 + 0.5 * (ps->r1 * ps->r1 + e1 * e1)); ps->cor0 = flt16_trunc(alpha * ps->cor0 + ps->r0 * e0); ps->var0 = flt16_trunc(alpha * ps->var0 + 0.5 * (ps->r0 * ps->r0 + e0 * e0)); ps->r1 = flt16_trunc(a * (ps->r0 - k1 * e0)); ps->r0 = flt16_trunc(a * e0); } /** * Apply AAC-Main style frequency domain prediction. */ static void apply_prediction(AACContext *ac, SingleChannelElement *sce) { int sfb, k; if (!sce->ics.predictor_initialized) { reset_all_predictors(sce->predictor_state); sce->ics.predictor_initialized = 1; } if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) { for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) { for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) { predict(ac, &sce->predictor_state[k], &sce->coeffs[k], sce->ics.predictor_present && sce->ics.prediction_used[sfb]); } } if (sce->ics.predictor_reset_group) reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group); } else reset_all_predictors(sce->predictor_state); } /** * Decode an individual_channel_stream payload; reference: table 4.44. * * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information. * @param scale_flag scalable [1] or non-scalable [0] AAC (Unused until scalable AAC is implemented.) * * @return Returns error status. 0 - OK, !0 - error */ static int decode_ics(AACContext *ac, SingleChannelElement *sce, GetBitContext *gb, int common_window, int scale_flag) { Pulse pulse; TemporalNoiseShaping *tns = &sce->tns; IndividualChannelStream *ics = &sce->ics; float *out = sce->coeffs; int global_gain, pulse_present = 0; /* This assignment is to silence a GCC warning about the variable being used * uninitialized when in fact it always is. */ pulse.num_pulse = 0; global_gain = get_bits(gb, 8); if (!common_window && !scale_flag) { if (decode_ics_info(ac, ics, gb, 0) < 0) return -1; } if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0) return -1; if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0) return -1; pulse_present = 0; if (!scale_flag) { if ((pulse_present = get_bits1(gb))) { if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n"); return -1; } if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) { av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n"); return -1; } } if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics)) return -1; if (get_bits1(gb)) { av_log_missing_feature(ac->avctx, "SSR", 1); return -1; } } if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0) return -1; if (ac->m4ac.object_type == AOT_AAC_MAIN && !common_window) apply_prediction(ac, sce); return 0; } /** * Mid/Side stereo decoding; reference: 4.6.8.1.3. */ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe) { const IndividualChannelStream *ics = &cpe->ch[0].ics; float *ch0 = cpe->ch[0].coeffs; float *ch1 = cpe->ch[1].coeffs; int g, i, group, idx = 0; const uint16_t *offsets = ics->swb_offset; for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb; i++, idx++) { if (cpe->ms_mask[idx] && cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) { for (group = 0; group < ics->group_len[g]; group++) { ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i], ch1 + group * 128 + offsets[i], offsets[i+1] - offsets[i]); } } } ch0 += ics->group_len[g] * 128; ch1 += ics->group_len[g] * 128; } } /** * intensity stereo decoding; reference: 4.6.8.2.3 * * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s; * [1] mask is decoded from bitstream; [2] mask is all 1s; * [3] reserved for scalable AAC */ static void apply_intensity_stereo(ChannelElement *cpe, int ms_present) { const IndividualChannelStream *ics = &cpe->ch[1].ics; SingleChannelElement *sce1 = &cpe->ch[1]; float *coef0 = cpe->ch[0].coeffs, *coef1 = cpe->ch[1].coeffs; const uint16_t *offsets = ics->swb_offset; int g, group, i, k, idx = 0; int c; float scale; for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb;) { if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) { const int bt_run_end = sce1->band_type_run_end[idx]; for (; i < bt_run_end; i++, idx++) { c = -1 + 2 * (sce1->band_type[idx] - 14); if (ms_present) c *= 1 - 2 * cpe->ms_mask[idx]; scale = c * sce1->sf[idx]; for (group = 0; group < ics->group_len[g]; group++) for (k = offsets[i]; k < offsets[i + 1]; k++) coef1[group * 128 + k] = scale * coef0[group * 128 + k]; } } else { int bt_run_end = sce1->band_type_run_end[idx]; idx += bt_run_end - i; i = bt_run_end; } } coef0 += ics->group_len[g] * 128; coef1 += ics->group_len[g] * 128; } } /** * Decode a channel_pair_element; reference: table 4.4. * * @param elem_id Identifies the instance of a syntax element. * * @return Returns error status. 0 - OK, !0 - error */ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe) { int i, ret, common_window, ms_present = 0; common_window = get_bits1(gb); if (common_window) { if (decode_ics_info(ac, &cpe->ch[0].ics, gb, 1)) return -1; i = cpe->ch[1].ics.use_kb_window[0]; cpe->ch[1].ics = cpe->ch[0].ics; cpe->ch[1].ics.use_kb_window[1] = i; ms_present = get_bits(gb, 2); if (ms_present == 3) { av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n"); return -1; } else if (ms_present) decode_mid_side_stereo(cpe, gb, ms_present); } if ((ret = decode_ics(ac, &cpe->ch[0], gb, common_window, 0))) return ret; if ((ret = decode_ics(ac, &cpe->ch[1], gb, common_window, 0))) return ret; if (common_window) { if (ms_present) apply_mid_side_stereo(ac, cpe); if (ac->m4ac.object_type == AOT_AAC_MAIN) { apply_prediction(ac, &cpe->ch[0]); apply_prediction(ac, &cpe->ch[1]); } } apply_intensity_stereo(cpe, ms_present); return 0; } /** * Decode coupling_channel_element; reference: table 4.8. * * @param elem_id Identifies the instance of a syntax element. * * @return Returns error status. 0 - OK, !0 - error */ static int decode_cce(AACContext *ac, GetBitContext *gb, ChannelElement *che) { int num_gain = 0; int c, g, sfb, ret; int sign; float scale; SingleChannelElement *sce = &che->ch[0]; ChannelCoupling *coup = &che->coup; coup->coupling_point = 2 * get_bits1(gb); coup->num_coupled = get_bits(gb, 3); for (c = 0; c <= coup->num_coupled; c++) { num_gain++; coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE; coup->id_select[c] = get_bits(gb, 4); if (coup->type[c] == TYPE_CPE) { coup->ch_select[c] = get_bits(gb, 2); if (coup->ch_select[c] == 3) num_gain++; } else coup->ch_select[c] = 2; } coup->coupling_point += get_bits1(gb) || (coup->coupling_point >> 1); sign = get_bits(gb, 1); scale = pow(2., pow(2., (int)get_bits(gb, 2) - 3)); if ((ret = decode_ics(ac, sce, gb, 0, 0))) return ret; for (c = 0; c < num_gain; c++) { int idx = 0; int cge = 1; int gain = 0; float gain_cache = 1.; if (c) { cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb); gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0; gain_cache = pow(scale, -gain); } if (coup->coupling_point == AFTER_IMDCT) { coup->gain[c][0] = gain_cache; } else { for (g = 0; g < sce->ics.num_window_groups; g++) { for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) { if (sce->band_type[idx] != ZERO_BT) { if (!cge) { int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; if (t) { int s = 1; t = gain += t; if (sign) { s -= 2 * (t & 0x1); t >>= 1; } gain_cache = pow(scale, -t) * s; } } coup->gain[c][idx] = gain_cache; } } } } } return 0; } /** * Parse whether channels are to be excluded from Dynamic Range Compression; reference: table 4.53. * * @return Returns number of bytes consumed. */ static int decode_drc_channel_exclusions(DynamicRangeControl *che_drc, GetBitContext *gb) { int i; int num_excl_chan = 0; do { for (i = 0; i < 7; i++) che_drc->exclude_mask[num_excl_chan++] = get_bits1(gb); } while (num_excl_chan < MAX_CHANNELS - 7 && get_bits1(gb)); return num_excl_chan / 7; } /** * Decode dynamic range information; reference: table 4.52. * * @param cnt length of TYPE_FIL syntactic element in bytes * * @return Returns number of bytes consumed. */ static int decode_dynamic_range(DynamicRangeControl *che_drc, GetBitContext *gb, int cnt) { int n = 1; int drc_num_bands = 1; int i; /* pce_tag_present? */ if (get_bits1(gb)) { che_drc->pce_instance_tag = get_bits(gb, 4); skip_bits(gb, 4); // tag_reserved_bits n++; } /* excluded_chns_present? */ if (get_bits1(gb)) { n += decode_drc_channel_exclusions(che_drc, gb); } /* drc_bands_present? */ if (get_bits1(gb)) { che_drc->band_incr = get_bits(gb, 4); che_drc->interpolation_scheme = get_bits(gb, 4); n++; drc_num_bands += che_drc->band_incr; for (i = 0; i < drc_num_bands; i++) { che_drc->band_top[i] = get_bits(gb, 8); n++; } } /* prog_ref_level_present? */ if (get_bits1(gb)) { che_drc->prog_ref_level = get_bits(gb, 7); skip_bits1(gb); // prog_ref_level_reserved_bits n++; } for (i = 0; i < drc_num_bands; i++) { che_drc->dyn_rng_sgn[i] = get_bits1(gb); che_drc->dyn_rng_ctl[i] = get_bits(gb, 7); n++; } return n; } /** * Decode extension data (incomplete); reference: table 4.51. * * @param cnt length of TYPE_FIL syntactic element in bytes * * @return Returns number of bytes consumed */ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt, ChannelElement *che, enum RawDataBlockType elem_type) { int crc_flag = 0; int res = cnt; switch (get_bits(gb, 4)) { // extension type case EXT_SBR_DATA_CRC: crc_flag++; case EXT_SBR_DATA: if (!che) { av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n"); return res; } else if (!ac->m4ac.sbr) { av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n"); skip_bits_long(gb, 8 * cnt - 4); return res; } else if (ac->m4ac.sbr == -1 && ac->output_configured == OC_LOCKED) { av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n"); skip_bits_long(gb, 8 * cnt - 4); return res; } else if (ac->m4ac.ps == -1 && ac->output_configured < OC_LOCKED && ac->avctx->channels == 1) { ac->m4ac.sbr = 1; ac->m4ac.ps = 1; output_configure(ac, ac->che_pos, ac->che_pos, ac->m4ac.chan_config, ac->output_configured); } else { ac->m4ac.sbr = 1; } res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type); break; case EXT_DYNAMIC_RANGE: res = decode_dynamic_range(&ac->che_drc, gb, cnt); break; case EXT_FILL: case EXT_FILL_DATA: case EXT_DATA_ELEMENT: default: skip_bits_long(gb, 8 * cnt - 4); break; }; return res; } /** * Decode Temporal Noise Shaping filter coefficients and apply all-pole filters; reference: 4.6.9.3. * * @param decode 1 if tool is used normally, 0 if tool is used in LTP. * @param coef spectral coefficients */ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns, IndividualChannelStream *ics, int decode) { const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb); int w, filt, m, i; int bottom, top, order, start, end, size, inc; float lpc[TNS_MAX_ORDER]; for (w = 0; w < ics->num_windows; w++) { bottom = ics->num_swb; for (filt = 0; filt < tns->n_filt[w]; filt++) { top = bottom; bottom = FFMAX(0, top - tns->length[w][filt]); order = tns->order[w][filt]; if (order == 0) continue; // tns_decode_coef compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0); start = ics->swb_offset[FFMIN(bottom, mmm)]; end = ics->swb_offset[FFMIN( top, mmm)]; if ((size = end - start) <= 0) continue; if (tns->direction[w][filt]) { inc = -1; start = end - 1; } else { inc = 1; } start += w * 128; // ar filter for (m = 0; m < size; m++, start += inc) for (i = 1; i <= FFMIN(m, order); i++) coef[start] -= coef[start - i * inc] * lpc[i - 1]; } } } /** * Conduct IMDCT and windowing. */ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce, float bias) { IndividualChannelStream *ics = &sce->ics; float *in = sce->coeffs; float *out = sce->ret; float *saved = sce->saved; const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; float *buf = ac->buf_mdct; float *temp = ac->temp; int i; // imdct if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { if (ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) av_log(ac->avctx, AV_LOG_WARNING, "Transition from an ONLY_LONG or LONG_STOP to an EIGHT_SHORT sequence detected. " "If you heard an audible artifact, please submit the sample to the FFmpeg developers.\n"); for (i = 0; i < 1024; i += 128) ff_imdct_half(&ac->mdct_small, buf + i, in + i); } else ff_imdct_half(&ac->mdct, buf, in); /* window overlapping * NOTE: To simplify the overlapping code, all 'meaningless' short to long * and long to short transitions are considered to be short to short * transitions. This leaves just two cases (long to long and short to short) * with a little special sauce for EIGHT_SHORT_SEQUENCE. */ if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) && (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) { ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, bias, 512); } else { for (i = 0; i < 448; i++) out[i] = saved[i] + bias; if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, bias, 64); ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, bias, 64); ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, bias, 64); ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, bias, 64); ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, bias, 64); memcpy( out + 448 + 4*128, temp, 64 * sizeof(float)); } else { ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, bias, 64); for (i = 576; i < 1024; i++) out[i] = buf[i-512] + bias; } } // buffer update if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { for (i = 0; i < 64; i++) saved[i] = temp[64 + i] - bias; ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 0, 64); ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 0, 64); ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 0, 64); memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float)); } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) { memcpy( saved, buf + 512, 448 * sizeof(float)); memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float)); } else { // LONG_STOP or ONLY_LONG memcpy( saved, buf + 512, 512 * sizeof(float)); } } /** * Apply dependent channel coupling (applied before IMDCT). * * @param index index into coupling gain array */ static void apply_dependent_coupling(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index) { IndividualChannelStream *ics = &cce->ch[0].ics; const uint16_t *offsets = ics->swb_offset; float *dest = target->coeffs; const float *src = cce->ch[0].coeffs; int g, i, group, k, idx = 0; if (ac->m4ac.object_type == AOT_AAC_LTP) { av_log(ac->avctx, AV_LOG_ERROR, "Dependent coupling is not supported together with LTP\n"); return; } for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb; i++, idx++) { if (cce->ch[0].band_type[idx] != ZERO_BT) { const float gain = cce->coup.gain[index][idx]; for (group = 0; group < ics->group_len[g]; group++) { for (k = offsets[i]; k < offsets[i + 1]; k++) { // XXX dsputil-ize dest[group * 128 + k] += gain * src[group * 128 + k]; } } } } dest += ics->group_len[g] * 128; src += ics->group_len[g] * 128; } } /** * Apply independent channel coupling (applied after IMDCT). * * @param index index into coupling gain array */ static void apply_independent_coupling(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index) { int i; const float gain = cce->coup.gain[index][0]; const float bias = ac->add_bias; const float *src = cce->ch[0].ret; float *dest = target->ret; const int len = 1024 << (ac->m4ac.sbr == 1); for (i = 0; i < len; i++) dest[i] += gain * (src[i] - bias); } /** * channel coupling transformation interface * * @param index index into coupling gain array * @param apply_coupling_method pointer to (in)dependent coupling function */ static void apply_channel_coupling(AACContext *ac, ChannelElement *cc, enum RawDataBlockType type, int elem_id, enum CouplingPoint coupling_point, void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index)) { int i, c; for (i = 0; i < MAX_ELEM_ID; i++) { ChannelElement *cce = ac->che[TYPE_CCE][i]; int index = 0; if (cce && cce->coup.coupling_point == coupling_point) { ChannelCoupling *coup = &cce->coup; for (c = 0; c <= coup->num_coupled; c++) { if (coup->type[c] == type && coup->id_select[c] == elem_id) { if (coup->ch_select[c] != 1) { apply_coupling_method(ac, &cc->ch[0], cce, index); if (coup->ch_select[c] != 0) index++; } if (coup->ch_select[c] != 2) apply_coupling_method(ac, &cc->ch[1], cce, index++); } else index += 1 + (coup->ch_select[c] == 3); } } } } /** * Convert spectral data to float samples, applying all supported tools as appropriate. */ static void spectral_to_sample(AACContext *ac) { int i, type; float imdct_bias = (ac->m4ac.sbr <= 0) ? ac->add_bias : 0.0f; for (type = 3; type >= 0; type--) { for (i = 0; i < MAX_ELEM_ID; i++) { ChannelElement *che = ac->che[type][i]; if (che) { if (type <= TYPE_CPE) apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling); if (che->ch[0].tns.present) apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1); if (che->ch[1].tns.present) apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1); if (type <= TYPE_CPE) apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling); if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) { imdct_and_windowing(ac, &che->ch[0], imdct_bias); if (type == TYPE_CPE) { imdct_and_windowing(ac, &che->ch[1], imdct_bias); } if (ac->m4ac.sbr > 0) { ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret); } } if (type <= TYPE_CCE) apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling); } } } } static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) { int size; AACADTSHeaderInfo hdr_info; size = ff_aac_parse_header(gb, &hdr_info); if (size > 0) { if (ac->output_configured != OC_LOCKED && hdr_info.chan_config) { enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]; memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0])); ac->m4ac.chan_config = hdr_info.chan_config; if (set_default_channel_config(ac, new_che_pos, hdr_info.chan_config)) return -7; if (output_configure(ac, ac->che_pos, new_che_pos, hdr_info.chan_config, OC_TRIAL_FRAME)) return -7; } else if (ac->output_configured != OC_LOCKED) { ac->output_configured = OC_NONE; } if (ac->output_configured != OC_LOCKED) { ac->m4ac.sbr = -1; ac->m4ac.ps = -1; } ac->m4ac.sample_rate = hdr_info.sample_rate; ac->m4ac.sampling_index = hdr_info.sampling_index; ac->m4ac.object_type = hdr_info.object_type; if (!ac->avctx->sample_rate) ac->avctx->sample_rate = hdr_info.sample_rate; if (hdr_info.num_aac_frames == 1) { if (!hdr_info.crc_absent) skip_bits(gb, 16); } else { av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0); return -1; } } return size; } static int aac_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AACContext *ac = avctx->priv_data; ChannelElement *che = NULL, *che_prev = NULL; GetBitContext gb; enum RawDataBlockType elem_type, elem_type_prev = TYPE_END; int err, elem_id, data_size_tmp; int buf_consumed; int samples = 0, multiplier; int buf_offset; init_get_bits(&gb, buf, buf_size * 8); if (show_bits(&gb, 12) == 0xfff) { if (parse_adts_frame_header(ac, &gb) < 0) { av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n"); return -1; } if (ac->m4ac.sampling_index > 12) { av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index); return -1; } } memset(ac->tags_seen_this_frame, 0, sizeof(ac->tags_seen_this_frame)); // parse while ((elem_type = get_bits(&gb, 3)) != TYPE_END) { elem_id = get_bits(&gb, 4); if (elem_type < TYPE_DSE) { if (!(che=get_che(ac, elem_type, elem_id))) { av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n", elem_type, elem_id); return -1; } samples = 1024; } switch (elem_type) { case TYPE_SCE: err = decode_ics(ac, &che->ch[0], &gb, 0, 0); break; case TYPE_CPE: err = decode_cpe(ac, &gb, che); break; case TYPE_CCE: err = decode_cce(ac, &gb, che); break; case TYPE_LFE: err = decode_ics(ac, &che->ch[0], &gb, 0, 0); break; case TYPE_DSE: err = skip_data_stream_element(ac, &gb); break; case TYPE_PCE: { enum ChannelPosition new_che_pos[4][MAX_ELEM_ID]; memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0])); if ((err = decode_pce(ac, new_che_pos, &gb))) break; if (ac->output_configured > OC_TRIAL_PCE) av_log(avctx, AV_LOG_ERROR, "Not evaluating a further program_config_element as this construct is dubious at best.\n"); else err = output_configure(ac, ac->che_pos, new_che_pos, 0, OC_TRIAL_PCE); break; } case TYPE_FIL: if (elem_id == 15) elem_id += get_bits(&gb, 8) - 1; if (get_bits_left(&gb) < 8 * elem_id) { av_log(avctx, AV_LOG_ERROR, overread_err); return -1; } while (elem_id > 0) elem_id -= decode_extension_payload(ac, &gb, elem_id, che_prev, elem_type_prev); err = 0; /* FIXME */ break; default: err = -1; /* should not happen, but keeps compiler happy */ break; } che_prev = che; elem_type_prev = elem_type; if (err) return err; if (get_bits_left(&gb) < 3) { av_log(avctx, AV_LOG_ERROR, overread_err); return -1; } } spectral_to_sample(ac); multiplier = (ac->m4ac.sbr == 1) ? ac->m4ac.ext_sample_rate > ac->m4ac.sample_rate : 0; samples <<= multiplier; if (ac->output_configured < OC_LOCKED) { avctx->sample_rate = ac->m4ac.sample_rate << multiplier; avctx->frame_size = samples; } data_size_tmp = samples * avctx->channels * sizeof(int16_t); if (*data_size < data_size_tmp) { av_log(avctx, AV_LOG_ERROR, "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n", *data_size, data_size_tmp); return -1; } *data_size = data_size_tmp; if (samples) ac->dsp.float_to_int16_interleave(data, (const float **)ac->output_data, samples, avctx->channels); if (ac->output_configured) ac->output_configured = OC_LOCKED; buf_consumed = (get_bits_count(&gb) + 7) >> 3; for (buf_offset = buf_consumed; buf_offset < buf_size; buf_offset++) if (buf[buf_offset]) break; return buf_size > buf_offset ? buf_consumed : buf_size; } static av_cold int aac_decode_close(AVCodecContext *avctx) { AACContext *ac = avctx->priv_data; int i, type; for (i = 0; i < MAX_ELEM_ID; i++) { for (type = 0; type < 4; type++) { if (ac->che[type][i]) ff_aac_sbr_ctx_close(&ac->che[type][i]->sbr); av_freep(&ac->che[type][i]); } } ff_mdct_end(&ac->mdct); ff_mdct_end(&ac->mdct_small); return 0; } AVCodec aac_decoder = { "aac", AVMEDIA_TYPE_AUDIO, CODEC_ID_AAC, sizeof(AACContext), aac_decode_init, NULL, aac_decode_close, aac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), .sample_fmts = (const enum SampleFormat[]) { SAMPLE_FMT_S16,SAMPLE_FMT_NONE }, .channel_layouts = aac_channel_layout, };
123linslouis-android-video-cutter
jni/libavcodec/aacdec.c
C
asf20
76,773
/* * Dirac decoder support via Schroedinger libraries * Copyright (c) 2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com > * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Dirac decoder support via libschroedinger-1.0 libraries. More details about * the Schroedinger project can be found at http://www.diracvideo.org/. * The library implements Dirac Specification Version 2.2. * (http://dirac.sourceforge.net/specification.html). */ #include "avcodec.h" #include "libdirac_libschro.h" #include "libschroedinger.h" #undef NDEBUG #include <assert.h> #include <schroedinger/schro.h> #include <schroedinger/schrodebug.h> #include <schroedinger/schrovideoformat.h> /** libschroedinger decoder private data */ typedef struct FfmpegSchroDecoderParams { /** Schroedinger video format */ SchroVideoFormat *format; /** Schroedinger frame format */ SchroFrameFormat frame_format; /** decoder handle */ SchroDecoder* decoder; /** queue storing decoded frames */ FfmpegDiracSchroQueue dec_frame_queue; /** end of sequence signalled */ int eos_signalled; /** end of sequence pulled */ int eos_pulled; /** decoded picture */ AVPicture dec_pic; } FfmpegSchroDecoderParams; typedef struct FfmpegSchroParseUnitContext { const uint8_t *buf; int buf_size; } FfmpegSchroParseUnitContext; static void libschroedinger_decode_buffer_free(SchroBuffer *schro_buf, void *priv); static void FfmpegSchroParseContextInit(FfmpegSchroParseUnitContext *parse_ctx, const uint8_t *buf, int buf_size) { parse_ctx->buf = buf; parse_ctx->buf_size = buf_size; } static SchroBuffer* FfmpegFindNextSchroParseUnit(FfmpegSchroParseUnitContext *parse_ctx) { SchroBuffer *enc_buf = NULL; int next_pu_offset = 0; unsigned char *in_buf; if (parse_ctx->buf_size < 13 || parse_ctx->buf[0] != 'B' || parse_ctx->buf[1] != 'B' || parse_ctx->buf[2] != 'C' || parse_ctx->buf[3] != 'D') return NULL; next_pu_offset = (parse_ctx->buf[5] << 24) + (parse_ctx->buf[6] << 16) + (parse_ctx->buf[7] << 8) + parse_ctx->buf[8]; if (next_pu_offset == 0 && SCHRO_PARSE_CODE_IS_END_OF_SEQUENCE(parse_ctx->buf[4])) next_pu_offset = 13; if (next_pu_offset <= 0 || parse_ctx->buf_size < next_pu_offset) return NULL; in_buf = av_malloc(next_pu_offset); memcpy(in_buf, parse_ctx->buf, next_pu_offset); enc_buf = schro_buffer_new_with_data(in_buf, next_pu_offset); enc_buf->free = libschroedinger_decode_buffer_free; enc_buf->priv = in_buf; parse_ctx->buf += next_pu_offset; parse_ctx->buf_size -= next_pu_offset; return enc_buf; } /** * Returns FFmpeg chroma format. */ static enum PixelFormat GetFfmpegChromaFormat(SchroChromaFormat schro_pix_fmt) { int num_formats = sizeof(ffmpeg_schro_pixel_format_map) / sizeof(ffmpeg_schro_pixel_format_map[0]); int idx; for (idx = 0; idx < num_formats; ++idx) if (ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt == schro_pix_fmt) return ffmpeg_schro_pixel_format_map[idx].ff_pix_fmt; return PIX_FMT_NONE; } static av_cold int libschroedinger_decode_init(AVCodecContext *avccontext) { FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; /* First of all, initialize our supporting libraries. */ schro_init(); schro_debug_set_level(avccontext->debug); p_schro_params->decoder = schro_decoder_new(); schro_decoder_set_skip_ratio(p_schro_params->decoder, 1); if (!p_schro_params->decoder) return -1; /* Initialize the decoded frame queue. */ ff_dirac_schro_queue_init(&p_schro_params->dec_frame_queue); return 0; } static void libschroedinger_decode_buffer_free(SchroBuffer *schro_buf, void *priv) { av_freep(&priv); } static void libschroedinger_decode_frame_free(void *frame) { schro_frame_unref(frame); } static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext) { FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; SchroDecoder *decoder = p_schro_params->decoder; p_schro_params->format = schro_decoder_get_video_format(decoder); /* Tell FFmpeg about sequence details. */ if (avcodec_check_dimensions(avccontext, p_schro_params->format->width, p_schro_params->format->height) < 0) { av_log(avccontext, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n", p_schro_params->format->width, p_schro_params->format->height); avccontext->height = avccontext->width = 0; return; } avccontext->height = p_schro_params->format->height; avccontext->width = p_schro_params->format->width; avccontext->pix_fmt = GetFfmpegChromaFormat(p_schro_params->format->chroma_format); if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, &p_schro_params->frame_format) == -1) { av_log(avccontext, AV_LOG_ERROR, "This codec currently only supports planar YUV 4:2:0, 4:2:2 " "and 4:4:4 formats.\n"); return; } avccontext->time_base.den = p_schro_params->format->frame_rate_numerator; avccontext->time_base.num = p_schro_params->format->frame_rate_denominator; if (!p_schro_params->dec_pic.data[0]) avpicture_alloc(&p_schro_params->dec_pic, avccontext->pix_fmt, avccontext->width, avccontext->height); } static int libschroedinger_decode_frame(AVCodecContext *avccontext, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; SchroDecoder *decoder = p_schro_params->decoder; SchroVideoFormat *format; AVPicture *picture = data; SchroBuffer *enc_buf; SchroFrame* frame; int state; int go = 1; int outer = 1; FfmpegSchroParseUnitContext parse_ctx; *data_size = 0; FfmpegSchroParseContextInit(&parse_ctx, buf, buf_size); if (!buf_size) { if (!p_schro_params->eos_signalled) { state = schro_decoder_push_end_of_stream(decoder); p_schro_params->eos_signalled = 1; } } /* Loop through all the individual parse units in the input buffer */ do { if ((enc_buf = FfmpegFindNextSchroParseUnit(&parse_ctx))) { /* Push buffer into decoder. */ if (SCHRO_PARSE_CODE_IS_PICTURE(enc_buf->data[4]) && SCHRO_PARSE_CODE_NUM_REFS(enc_buf->data[4]) > 0) avccontext->has_b_frames = 1; state = schro_decoder_push(decoder, enc_buf); if (state == SCHRO_DECODER_FIRST_ACCESS_UNIT) libschroedinger_handle_first_access_unit(avccontext); go = 1; } else outer = 0; format = p_schro_params->format; while (go) { /* Parse data and process result. */ state = schro_decoder_wait(decoder); switch (state) { case SCHRO_DECODER_FIRST_ACCESS_UNIT: libschroedinger_handle_first_access_unit(avccontext); break; case SCHRO_DECODER_NEED_BITS: /* Need more input data - stop iterating over what we have. */ go = 0; break; case SCHRO_DECODER_NEED_FRAME: /* Decoder needs a frame - create one and push it in. */ frame = ff_create_schro_frame(avccontext, p_schro_params->frame_format); schro_decoder_add_output_picture(decoder, frame); break; case SCHRO_DECODER_OK: /* Pull a frame out of the decoder. */ frame = schro_decoder_pull(decoder); if (frame) ff_dirac_schro_queue_push_back(&p_schro_params->dec_frame_queue, frame); break; case SCHRO_DECODER_EOS: go = 0; p_schro_params->eos_pulled = 1; schro_decoder_reset(decoder); outer = 0; break; case SCHRO_DECODER_ERROR: return -1; break; } } } while (outer); /* Grab next frame to be returned from the top of the queue. */ frame = ff_dirac_schro_queue_pop(&p_schro_params->dec_frame_queue); if (frame) { memcpy(p_schro_params->dec_pic.data[0], frame->components[0].data, frame->components[0].length); memcpy(p_schro_params->dec_pic.data[1], frame->components[1].data, frame->components[1].length); memcpy(p_schro_params->dec_pic.data[2], frame->components[2].data, frame->components[2].length); /* Fill picture with current buffer data from Schroedinger. */ avpicture_fill(picture, p_schro_params->dec_pic.data[0], avccontext->pix_fmt, avccontext->width, avccontext->height); *data_size = sizeof(AVPicture); /* Now free the frame resources. */ libschroedinger_decode_frame_free(frame); } return buf_size; } static av_cold int libschroedinger_decode_close(AVCodecContext *avccontext) { FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; /* Free the decoder. */ schro_decoder_free(p_schro_params->decoder); av_freep(&p_schro_params->format); avpicture_free(&p_schro_params->dec_pic); /* Free data in the output frame queue. */ ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue, libschroedinger_decode_frame_free); return 0; } static void libschroedinger_flush(AVCodecContext *avccontext) { /* Got a seek request. Free the decoded frames queue and then reset * the decoder */ FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; /* Free data in the output frame queue. */ ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue, libschroedinger_decode_frame_free); ff_dirac_schro_queue_init(&p_schro_params->dec_frame_queue); schro_decoder_reset(p_schro_params->decoder); p_schro_params->eos_pulled = 0; p_schro_params->eos_signalled = 0; } AVCodec libschroedinger_decoder = { "libschroedinger", AVMEDIA_TYPE_VIDEO, CODEC_ID_DIRAC, sizeof(FfmpegSchroDecoderParams), libschroedinger_decode_init, NULL, libschroedinger_decode_close, libschroedinger_decode_frame, CODEC_CAP_DELAY, .flush = libschroedinger_flush, .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), };
123linslouis-android-video-cutter
jni/libavcodec/libschroedingerdec.c
C
asf20
11,988
/* * Video Acceleration API (shared data between FFmpeg and the video player) * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 * * Copyright (C) 2008-2009 Splitted-Desktop Systems * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VAAPI_H #define AVCODEC_VAAPI_H #include <stdint.h> /** * \defgroup VAAPI_Decoding VA API Decoding * \ingroup Decoder * @{ */ /** * This structure is used to share data between the FFmpeg library and * the client video application. * This shall be zero-allocated and available as * AVCodecContext.hwaccel_context. All user members can be set once * during initialization or through each AVCodecContext.get_buffer() * function call. In any case, they must be valid prior to calling * decoding functions. */ struct vaapi_context { /** * Window system dependent data * * - encoding: unused * - decoding: Set by user */ void *display; /** * Configuration ID * * - encoding: unused * - decoding: Set by user */ uint32_t config_id; /** * Context ID (video decode pipeline) * * - encoding: unused * - decoding: Set by user */ uint32_t context_id; /** * VAPictureParameterBuffer ID * * - encoding: unused * - decoding: Set by libavcodec */ uint32_t pic_param_buf_id; /** * VAIQMatrixBuffer ID * * - encoding: unused * - decoding: Set by libavcodec */ uint32_t iq_matrix_buf_id; /** * VABitPlaneBuffer ID (for VC-1 decoding) * * - encoding: unused * - decoding: Set by libavcodec */ uint32_t bitplane_buf_id; /** * Slice parameter/data buffer IDs * * - encoding: unused * - decoding: Set by libavcodec */ uint32_t *slice_buf_ids; /** * Number of effective slice buffer IDs to send to the HW * * - encoding: unused * - decoding: Set by libavcodec */ unsigned int n_slice_buf_ids; /** * Size of pre-allocated slice_buf_ids * * - encoding: unused * - decoding: Set by libavcodec */ unsigned int slice_buf_ids_alloc; /** * Pointer to VASliceParameterBuffers * * - encoding: unused * - decoding: Set by libavcodec */ void *slice_params; /** * Size of a VASliceParameterBuffer element * * - encoding: unused * - decoding: Set by libavcodec */ unsigned int slice_param_size; /** * Size of pre-allocated slice_params * * - encoding: unused * - decoding: Set by libavcodec */ unsigned int slice_params_alloc; /** * Number of slices currently filled in * * - encoding: unused * - decoding: Set by libavcodec */ unsigned int slice_count; /** * Pointer to slice data buffer base * - encoding: unused * - decoding: Set by libavcodec */ const uint8_t *slice_data; /** * Current size of slice data * * - encoding: unused * - decoding: Set by libavcodec */ uint32_t slice_data_size; }; /* @} */ #endif /* AVCODEC_VAAPI_H */
123linslouis-android-video-cutter
jni/libavcodec/vaapi.h
C
asf20
3,895
/* * AVCodecParser prototypes and definitions * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PARSER_H #define AVCODEC_PARSER_H #include "avcodec.h" typedef struct ParseContext{ uint8_t *buffer; int index; int last_index; unsigned int buffer_size; uint32_t state; ///< contains the last few bytes in MSB order int frame_start_found; int overread; ///< the number of bytes which where irreversibly read from the next frame int overread_index; ///< the index into ParseContext.buffer of the overread bytes uint64_t state64; ///< contains the last 8 bytes in MSB order } ParseContext; struct MpegEncContext; typedef struct ParseContext1{ ParseContext pc; /* XXX/FIXME PC1 vs. PC */ /* MPEG-2-specific */ AVRational frame_rate; int progressive_sequence; int width, height; /* XXX: suppress that, needed by MPEG-4 */ struct MpegEncContext *enc; int first_picture; } ParseContext1; #define END_NOT_FOUND (-100) int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size); int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf, int buf_size); void ff_parse_close(AVCodecParserContext *s); void ff_parse1_close(AVCodecParserContext *s); /** * Fetches timestamps for a specific byte within the current access unit. * @param off byte position within the access unit * @param remove Found timestamps will be removed if set to 1, kept if set to 0. */ void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove); #endif /* AVCODEC_PARSER_H */
123linslouis-android-video-cutter
jni/libavcodec/parser.h
C
asf20
2,439
/* * Real Audio 1.0 (14.4K) * * Copyright (c) 2008 Vitor Sessak * Copyright (c) 2003 Nick Kurshev * Based on public domain decoder at http://www.honeypot.net/audio * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intmath.h" #include "avcodec.h" #include "get_bits.h" #include "ra144.h" #include "celp_filters.h" #define NBLOCKS 4 ///< number of subblocks within a block #define BLOCKSIZE 40 ///< subblock size in 16-bit words #define BUFFERSIZE 146 ///< the size of the adaptive codebook typedef struct { AVCodecContext *avctx; unsigned int old_energy; ///< previous frame energy unsigned int lpc_tables[2][10]; /** LPC coefficients: lpc_coef[0] is the coefficients of the current frame * and lpc_coef[1] of the previous one. */ unsigned int *lpc_coef[2]; unsigned int lpc_refl_rms[2]; /** The current subblock padded by the last 10 values of the previous one. */ int16_t curr_sblock[50]; /** Adaptive codebook, its size is two units bigger to avoid a * buffer overflow. */ uint16_t adapt_cb[146+2]; } RA144Context; static av_cold int ra144_decode_init(AVCodecContext * avctx) { RA144Context *ractx = avctx->priv_data; ractx->avctx = avctx; ractx->lpc_coef[0] = ractx->lpc_tables[0]; ractx->lpc_coef[1] = ractx->lpc_tables[1]; avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } /** * Evaluate sqrt(x << 24). x must fit in 20 bits. This value is evaluated in an * odd way to make the output identical to the binary decoder. */ static int t_sqrt(unsigned int x) { int s = 2; while (x > 0xfff) { s++; x >>= 2; } return ff_sqrt(x << 20) << s; } /** * Evaluate the LPC filter coefficients from the reflection coefficients. * Does the inverse of the eval_refl() function. */ static void eval_coefs(int *coefs, const int *refl) { int buffer[10]; int *b1 = buffer; int *b2 = coefs; int i, j; for (i=0; i < 10; i++) { b1[i] = refl[i] << 4; for (j=0; j < i; j++) b1[j] = ((refl[i] * b2[i-j-1]) >> 12) + b2[j]; FFSWAP(int *, b1, b2); } for (i=0; i < 10; i++) coefs[i] >>= 4; } /** * Copy the last offset values of *source to *target. If those values are not * enough to fill the target buffer, fill it with another copy of those values. */ static void copy_and_dup(int16_t *target, const int16_t *source, int offset) { source += BUFFERSIZE - offset; memcpy(target, source, FFMIN(BLOCKSIZE, offset)*sizeof(*target)); if (offset < BLOCKSIZE) memcpy(target + offset, source, (BLOCKSIZE - offset)*sizeof(*target)); } /** inverse root mean square */ static int irms(const int16_t *data) { unsigned int i, sum = 0; for (i=0; i < BLOCKSIZE; i++) sum += data[i] * data[i]; if (sum == 0) return 0; /* OOPS - division by zero */ return 0x20000000 / (t_sqrt(sum) >> 8); } static void add_wav(int16_t *dest, int n, int skip_first, int *m, const int16_t *s1, const int8_t *s2, const int8_t *s3) { int i; int v[3]; v[0] = 0; for (i=!skip_first; i<3; i++) v[i] = (gain_val_tab[n][i] * m[i]) >> gain_exp_tab[n]; if (v[0]) { for (i=0; i < BLOCKSIZE; i++) dest[i] = (s1[i]*v[0] + s2[i]*v[1] + s3[i]*v[2]) >> 12; } else { for (i=0; i < BLOCKSIZE; i++) dest[i] = ( s2[i]*v[1] + s3[i]*v[2]) >> 12; } } static unsigned int rescale_rms(unsigned int rms, unsigned int energy) { return (rms * energy) >> 10; } static unsigned int rms(const int *data) { int i; unsigned int res = 0x10000; int b = 10; for (i=0; i < 10; i++) { res = (((0x1000000 - data[i]*data[i]) >> 12) * res) >> 12; if (res == 0) return 0; while (res <= 0x3fff) { b++; res <<= 2; } } return t_sqrt(res) >> b; } static void do_output_subblock(RA144Context *ractx, const uint16_t *lpc_coefs, int gval, GetBitContext *gb) { uint16_t buffer_a[40]; uint16_t *block; int cba_idx = get_bits(gb, 7); // index of the adaptive CB, 0 if none int gain = get_bits(gb, 8); int cb1_idx = get_bits(gb, 7); int cb2_idx = get_bits(gb, 7); int m[3]; if (cba_idx) { cba_idx += BLOCKSIZE/2 - 1; copy_and_dup(buffer_a, ractx->adapt_cb, cba_idx); m[0] = (irms(buffer_a) * gval) >> 12; } else { m[0] = 0; } m[1] = (cb1_base[cb1_idx] * gval) >> 8; m[2] = (cb2_base[cb2_idx] * gval) >> 8; memmove(ractx->adapt_cb, ractx->adapt_cb + BLOCKSIZE, (BUFFERSIZE - BLOCKSIZE) * sizeof(*ractx->adapt_cb)); block = ractx->adapt_cb + BUFFERSIZE - BLOCKSIZE; add_wav(block, gain, cba_idx, m, cba_idx? buffer_a: NULL, cb1_vects[cb1_idx], cb2_vects[cb2_idx]); memcpy(ractx->curr_sblock, ractx->curr_sblock + 40, 10*sizeof(*ractx->curr_sblock)); if (ff_celp_lp_synthesis_filter(ractx->curr_sblock + 10, lpc_coefs, block, BLOCKSIZE, 10, 1, 0xfff)) memset(ractx->curr_sblock, 0, 50*sizeof(*ractx->curr_sblock)); } static void int_to_int16(int16_t *out, const int *inp) { int i; for (i=0; i < 10; i++) *out++ = *inp++; } /** * Evaluate the reflection coefficients from the filter coefficients. * Does the inverse of the eval_coefs() function. * * @return 1 if one of the reflection coefficients is greater than * 4095, 0 if not. */ static int eval_refl(int *refl, const int16_t *coefs, AVCodecContext *avctx) { int b, i, j; int buffer1[10]; int buffer2[10]; int *bp1 = buffer1; int *bp2 = buffer2; for (i=0; i < 10; i++) buffer2[i] = coefs[i]; refl[9] = bp2[9]; if ((unsigned) bp2[9] + 0x1000 > 0x1fff) { av_log(avctx, AV_LOG_ERROR, "Overflow. Broken sample?\n"); return 1; } for (i=8; i >= 0; i--) { b = 0x1000-((bp2[i+1] * bp2[i+1]) >> 12); if (!b) b = -2; for (j=0; j <= i; j++) bp1[j] = ((bp2[j] - ((refl[i+1] * bp2[i-j]) >> 12)) * (0x1000000 / b)) >> 12; if ((unsigned) bp1[i] + 0x1000 > 0x1fff) return 1; refl[i] = bp1[i]; FFSWAP(int *, bp1, bp2); } return 0; } static int interp(RA144Context *ractx, int16_t *out, int a, int copyold, int energy) { int work[10]; int b = NBLOCKS - a; int i; // Interpolate block coefficients from the this frame's forth block and // last frame's forth block. for (i=0; i<10; i++) out[i] = (a * ractx->lpc_coef[0][i] + b * ractx->lpc_coef[1][i])>> 2; if (eval_refl(work, out, ractx->avctx)) { // The interpolated coefficients are unstable, copy either new or old // coefficients. int_to_int16(out, ractx->lpc_coef[copyold]); return rescale_rms(ractx->lpc_refl_rms[copyold], energy); } else { return rescale_rms(rms(work), energy); } } /** Uncompress one block (20 bytes -> 160*2 bytes). */ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; static const uint8_t sizes[10] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2}; unsigned int refl_rms[4]; // RMS of the reflection coefficients uint16_t block_coefs[4][10]; // LPC coefficients of each sub-block unsigned int lpc_refl[10]; // LPC reflection coefficients of the frame int i, j; int16_t *data = vdata; unsigned int energy; RA144Context *ractx = avctx->priv_data; GetBitContext gb; if (*data_size < 2*160) return -1; if(buf_size < 20) { av_log(avctx, AV_LOG_ERROR, "Frame too small (%d bytes). Truncated file?\n", buf_size); *data_size = 0; return buf_size; } init_get_bits(&gb, buf, 20 * 8); for (i=0; i<10; i++) lpc_refl[i] = lpc_refl_cb[i][get_bits(&gb, sizes[i])]; eval_coefs(ractx->lpc_coef[0], lpc_refl); ractx->lpc_refl_rms[0] = rms(lpc_refl); energy = energy_tab[get_bits(&gb, 5)]; refl_rms[0] = interp(ractx, block_coefs[0], 1, 1, ractx->old_energy); refl_rms[1] = interp(ractx, block_coefs[1], 2, energy <= ractx->old_energy, t_sqrt(energy*ractx->old_energy) >> 12); refl_rms[2] = interp(ractx, block_coefs[2], 3, 0, energy); refl_rms[3] = rescale_rms(ractx->lpc_refl_rms[0], energy); int_to_int16(block_coefs[3], ractx->lpc_coef[0]); for (i=0; i < 4; i++) { do_output_subblock(ractx, block_coefs[i], refl_rms[i], &gb); for (j=0; j < BLOCKSIZE; j++) *data++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2); } ractx->old_energy = energy; ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0]; FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); *data_size = 2*160; return 20; } AVCodec ra_144_decoder = { "real_144", AVMEDIA_TYPE_AUDIO, CODEC_ID_RA_144, sizeof(RA144Context), ra144_decode_init, NULL, NULL, ra144_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"), };
123linslouis-android-video-cutter
jni/libavcodec/ra144.c
C
asf20
10,098
/** * @file * VP5 and VP6 compatible video decoder (common data) * * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VP56DATA_H #define AVCODEC_VP56DATA_H #include "libavutil/common.h" typedef enum { VP56_FRAME_NONE =-1, VP56_FRAME_CURRENT = 0, VP56_FRAME_PREVIOUS = 1, VP56_FRAME_GOLDEN = 2, VP56_FRAME_GOLDEN2 = 3, VP56_FRAME_UNUSED = 4, VP56_FRAME_UNUSED2 = 5, } VP56Frame; typedef enum { VP56_MB_INTER_NOVEC_PF = 0, /**< Inter MB, no vector, from previous frame */ VP56_MB_INTRA = 1, /**< Intra MB */ VP56_MB_INTER_DELTA_PF = 2, /**< Inter MB, above/left vector + delta, from previous frame */ VP56_MB_INTER_V1_PF = 3, /**< Inter MB, first vector, from previous frame */ VP56_MB_INTER_V2_PF = 4, /**< Inter MB, second vector, from previous frame */ VP56_MB_INTER_NOVEC_GF = 5, /**< Inter MB, no vector, from golden frame */ VP56_MB_INTER_DELTA_GF = 6, /**< Inter MB, above/left vector + delta, from golden frame */ VP56_MB_INTER_4V = 7, /**< Inter MB, 4 vectors, from previous frame */ VP56_MB_INTER_V1_GF = 8, /**< Inter MB, first vector, from golden frame */ VP56_MB_INTER_V2_GF = 9, /**< Inter MB, second vector, from golden frame */ } VP56mb; typedef struct { int8_t val; int8_t prob_idx; } VP56Tree; extern const uint8_t vp56_b2p[]; extern const uint8_t vp56_b6to4[]; extern const uint8_t vp56_coeff_parse_table[6][11]; extern const uint8_t vp56_def_mb_types_stats[3][10][2]; extern const VP56Tree vp56_pva_tree[]; extern const VP56Tree vp56_pc_tree[]; extern const uint8_t vp56_coeff_bias[]; extern const uint8_t vp56_coeff_bit_length[]; static const VP56Frame vp56_reference_frame[] = { VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_NOVEC_PF */ VP56_FRAME_CURRENT, /* VP56_MB_INTRA */ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_DELTA_PF */ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_V1_PF */ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_V2_PF */ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_NOVEC_GF */ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_DELTA_GF */ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_4V */ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_V1_GF */ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_V2_GF */ }; static const uint8_t vp56_ac_dequant[64] = { 94, 92, 90, 88, 86, 82, 78, 74, 70, 66, 62, 58, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 40, 39, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, }; static const uint8_t vp56_dc_dequant[64] = { 47, 47, 47, 47, 45, 43, 43, 43, 43, 43, 42, 41, 41, 40, 40, 40, 40, 35, 35, 35, 35, 33, 33, 33, 33, 32, 32, 32, 27, 27, 26, 26, 25, 25, 24, 24, 23, 23, 19, 19, 19, 19, 18, 18, 17, 16, 16, 16, 16, 16, 15, 11, 11, 11, 10, 10, 9, 8, 7, 5, 3, 3, 2, 2, }; static const uint8_t vp56_pre_def_mb_type_stats[16][3][10][2] = { { { { 9, 15 }, { 32, 25 }, { 7, 19 }, { 9, 21 }, { 1, 12 }, { 14, 12 }, { 3, 18 }, { 14, 23 }, { 3, 10 }, { 0, 4 }, }, { { 41, 22 }, { 1, 0 }, { 1, 31 }, { 0, 0 }, { 0, 0 }, { 0, 1 }, { 1, 7 }, { 0, 1 }, { 98, 25 }, { 4, 10 }, }, { { 2, 3 }, { 2, 3 }, { 0, 2 }, { 0, 2 }, { 0, 0 }, { 11, 4 }, { 1, 4 }, { 0, 2 }, { 3, 2 }, { 0, 4 }, }, }, { { { 48, 39 }, { 1, 2 }, { 11, 27 }, { 29, 44 }, { 7, 27 }, { 1, 4 }, { 0, 3 }, { 1, 6 }, { 1, 2 }, { 0, 0 }, }, { { 123, 37 }, { 6, 4 }, { 1, 27 }, { 0, 0 }, { 0, 0 }, { 5, 8 }, { 1, 7 }, { 0, 1 }, { 12, 10 }, { 0, 2 }, }, { { 49, 46 }, { 3, 4 }, { 7, 31 }, { 42, 41 }, { 0, 0 }, { 2, 6 }, { 1, 7 }, { 1, 4 }, { 2, 4 }, { 0, 1 }, }, }, { { { 21, 32 }, { 1, 2 }, { 4, 10 }, { 32, 43 }, { 6, 23 }, { 2, 3 }, { 1, 19 }, { 1, 6 }, { 12, 21 }, { 0, 7 }, }, { { 26, 14 }, { 14, 12 }, { 0, 24 }, { 0, 0 }, { 0, 0 }, { 55, 17 }, { 1, 9 }, { 0, 36 }, { 5, 7 }, { 1, 3 }, }, { { 26, 25 }, { 1, 1 }, { 2, 10 }, { 67, 39 }, { 0, 0 }, { 1, 1 }, { 0, 14 }, { 0, 2 }, { 31, 26 }, { 1, 6 }, }, }, { { { 69, 83 }, { 0, 0 }, { 0, 2 }, { 10, 29 }, { 3, 12 }, { 0, 1 }, { 0, 3 }, { 0, 3 }, { 2, 2 }, { 0, 0 }, }, { { 209, 5 }, { 0, 0 }, { 0, 27 }, { 0, 0 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, }, { { 103, 46 }, { 1, 2 }, { 2, 10 }, { 33, 42 }, { 0, 0 }, { 1, 4 }, { 0, 3 }, { 0, 1 }, { 1, 3 }, { 0, 0 }, }, }, { { { 11, 20 }, { 1, 4 }, { 18, 36 }, { 43, 48 }, { 13, 35 }, { 0, 2 }, { 0, 5 }, { 3, 12 }, { 1, 2 }, { 0, 0 }, }, { { 2, 5 }, { 4, 5 }, { 0, 121 }, { 0, 0 }, { 0, 0 }, { 0, 3 }, { 2, 4 }, { 1, 4 }, { 2, 2 }, { 0, 1 }, }, { { 14, 31 }, { 9, 13 }, { 14, 54 }, { 22, 29 }, { 0, 0 }, { 2, 6 }, { 4, 18 }, { 6, 13 }, { 1, 5 }, { 0, 1 }, }, }, { { { 70, 44 }, { 0, 1 }, { 2, 10 }, { 37, 46 }, { 8, 26 }, { 0, 2 }, { 0, 2 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, }, { { 175, 5 }, { 0, 1 }, { 0, 48 }, { 0, 0 }, { 0, 0 }, { 0, 2 }, { 0, 1 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, }, { { 85, 39 }, { 0, 0 }, { 1, 9 }, { 69, 40 }, { 0, 0 }, { 0, 1 }, { 0, 3 }, { 0, 1 }, { 2, 3 }, { 0, 0 }, }, }, { { { 8, 15 }, { 0, 1 }, { 8, 21 }, { 74, 53 }, { 22, 42 }, { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 2 }, { 0, 0 }, }, { { 83, 5 }, { 2, 3 }, { 0, 102 }, { 0, 0 }, { 0, 0 }, { 1, 3 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, }, { { 31, 28 }, { 0, 0 }, { 3, 14 }, { 130, 34 }, { 0, 0 }, { 0, 1 }, { 0, 3 }, { 0, 1 }, { 3, 3 }, { 0, 1 }, }, }, { { { 141, 42 }, { 0, 0 }, { 1, 4 }, { 11, 24 }, { 1, 11 }, { 0, 1 }, { 0, 1 }, { 0, 2 }, { 0, 0 }, { 0, 0 }, }, { { 233, 6 }, { 0, 0 }, { 0, 8 }, { 0, 0 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 1 }, { 0, 0 }, }, { { 171, 25 }, { 0, 0 }, { 1, 5 }, { 25, 21 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, }, }, { { { 8, 19 }, { 4, 10 }, { 24, 45 }, { 21, 37 }, { 9, 29 }, { 0, 3 }, { 1, 7 }, { 11, 25 }, { 0, 2 }, { 0, 1 }, }, { { 34, 16 }, { 112, 21 }, { 1, 28 }, { 0, 0 }, { 0, 0 }, { 6, 8 }, { 1, 7 }, { 0, 3 }, { 2, 5 }, { 0, 2 }, }, { { 17, 21 }, { 68, 29 }, { 6, 15 }, { 13, 22 }, { 0, 0 }, { 6, 12 }, { 3, 14 }, { 4, 10 }, { 1, 7 }, { 0, 3 }, }, }, { { { 46, 42 }, { 0, 1 }, { 2, 10 }, { 54, 51 }, { 10, 30 }, { 0, 2 }, { 0, 2 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, }, { { 159, 35 }, { 2, 2 }, { 0, 25 }, { 0, 0 }, { 0, 0 }, { 3, 6 }, { 0, 5 }, { 0, 1 }, { 4, 4 }, { 0, 1 }, }, { { 51, 39 }, { 0, 1 }, { 2, 12 }, { 91, 44 }, { 0, 0 }, { 0, 2 }, { 0, 3 }, { 0, 1 }, { 2, 3 }, { 0, 1 }, }, }, { { { 28, 32 }, { 0, 0 }, { 3, 10 }, { 75, 51 }, { 14, 33 }, { 0, 1 }, { 0, 2 }, { 0, 1 }, { 1, 2 }, { 0, 0 }, }, { { 75, 39 }, { 5, 7 }, { 2, 48 }, { 0, 0 }, { 0, 0 }, { 3, 11 }, { 2, 16 }, { 1, 4 }, { 7, 10 }, { 0, 2 }, }, { { 81, 25 }, { 0, 0 }, { 2, 9 }, { 106, 26 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, }, { { { 100, 46 }, { 0, 1 }, { 3, 9 }, { 21, 37 }, { 5, 20 }, { 0, 1 }, { 0, 2 }, { 1, 2 }, { 0, 1 }, { 0, 0 }, }, { { 212, 21 }, { 0, 1 }, { 0, 9 }, { 0, 0 }, { 0, 0 }, { 1, 2 }, { 0, 2 }, { 0, 0 }, { 2, 2 }, { 0, 0 }, }, { { 140, 37 }, { 0, 1 }, { 1, 8 }, { 24, 33 }, { 0, 0 }, { 1, 2 }, { 0, 2 }, { 0, 1 }, { 1, 2 }, { 0, 0 }, }, }, { { { 27, 29 }, { 0, 1 }, { 9, 25 }, { 53, 51 }, { 12, 34 }, { 0, 1 }, { 0, 3 }, { 1, 5 }, { 0, 2 }, { 0, 0 }, }, { { 4, 2 }, { 0, 0 }, { 0, 172 }, { 0, 0 }, { 0, 0 }, { 0, 1 }, { 0, 2 }, { 0, 0 }, { 2, 0 }, { 0, 0 }, }, { { 14, 23 }, { 1, 3 }, { 11, 53 }, { 90, 31 }, { 0, 0 }, { 0, 3 }, { 1, 5 }, { 2, 6 }, { 1, 2 }, { 0, 0 }, }, }, { { { 80, 38 }, { 0, 0 }, { 1, 4 }, { 69, 33 }, { 5, 16 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 1 }, { 0, 0 }, }, { { 187, 22 }, { 1, 1 }, { 0, 17 }, { 0, 0 }, { 0, 0 }, { 3, 6 }, { 0, 4 }, { 0, 1 }, { 4, 4 }, { 0, 1 }, }, { { 123, 29 }, { 0, 0 }, { 1, 7 }, { 57, 30 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, }, }, { { { 16, 20 }, { 0, 0 }, { 2, 8 }, { 104, 49 }, { 15, 33 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, { { 133, 6 }, { 1, 2 }, { 1, 70 }, { 0, 0 }, { 0, 0 }, { 0, 2 }, { 0, 4 }, { 0, 3 }, { 1, 1 }, { 0, 0 }, }, { { 13, 14 }, { 0, 0 }, { 4, 20 }, { 175, 20 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, }, { { { 194, 16 }, { 0, 0 }, { 1, 1 }, { 1, 9 }, { 1, 3 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, }, { { 251, 1 }, { 0, 0 }, { 0, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, }, { { 202, 23 }, { 0, 0 }, { 1, 3 }, { 2, 9 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, }, }, }; static const uint8_t vp56_filter_threshold[] = { 14, 14, 13, 13, 12, 12, 10, 10, 10, 10, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, }; static const uint8_t vp56_mb_type_model_model[] = { 171, 83, 199, 140, 125, 104, }; static const VP56Tree vp56_pmbtm_tree[] = { { 4, 0}, { 2, 1}, {-8}, {-4}, { 8, 2}, { 6, 3}, { 4, 4}, { 2, 5}, {-24}, {-20}, {-16}, {-12}, {-0}, }; static const VP56Tree vp56_pmbt_tree[] = { { 8, 1}, { 4, 2}, { 2, 4}, {-VP56_MB_INTER_NOVEC_PF}, {-VP56_MB_INTER_DELTA_PF}, { 2, 5}, {-VP56_MB_INTER_V1_PF}, {-VP56_MB_INTER_V2_PF}, { 4, 3}, { 2, 6}, {-VP56_MB_INTRA}, {-VP56_MB_INTER_4V}, { 4, 7}, { 2, 8}, {-VP56_MB_INTER_NOVEC_GF}, {-VP56_MB_INTER_DELTA_GF}, { 2, 9}, {-VP56_MB_INTER_V1_GF}, {-VP56_MB_INTER_V2_GF}, }; /* relative pos of surrounding blocks, from closest to farthest */ static const int8_t vp56_candidate_predictor_pos[12][2] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 1, -1 }, { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }, { 1, -2 }, { 2, -1 }, { -2, -2 }, { 2, -2 }, }; #endif /* AVCODEC_VP56DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/vp56data.h
C
asf20
12,077
/* * jfdctint.c * * This file is part of the Independent JPEG Group's software. * * The authors make NO WARRANTY or representation, either express or implied, * with respect to this software, its quality, accuracy, merchantability, or * fitness for a particular purpose. This software is provided "AS IS", and * you, its user, assume the entire risk as to its quality and accuracy. * * This software is copyright (C) 1991-1996, Thomas G. Lane. * All Rights Reserved except as specified below. * * Permission is hereby granted to use, copy, modify, and distribute this * software (or portions thereof) for any purpose, without fee, subject to * these conditions: * (1) If any part of the source code for this software is distributed, then * this README file must be included, with this copyright and no-warranty * notice unaltered; and any additions, deletions, or changes to the original * files must be clearly indicated in accompanying documentation. * (2) If only executable code is distributed, then the accompanying * documentation must state that "this software is based in part on the work * of the Independent JPEG Group". * (3) Permission for use of this software is granted only if the user accepts * full responsibility for any undesirable consequences; the authors accept * NO LIABILITY for damages of any kind. * * These conditions apply to any software derived from or based on the IJG * code, not just to the unmodified library. If you use our work, you ought * to acknowledge us. * * Permission is NOT granted for the use of any IJG author's name or company * name in advertising or publicity relating to this software or products * derived from it. This software may be referred to only as "the Independent * JPEG Group's software". * * We specifically permit and encourage the use of this software as the basis * of commercial products, provided that all warranty or liability claims are * assumed by the product vendor. * * This file contains a slow-but-accurate integer implementation of the * forward DCT (Discrete Cosine Transform). * * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT * on each column. Direct algorithms are also available, but they are * much more complex and seem not to be any faster when reduced to code. * * This implementation is based on an algorithm described in * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. * The primary algorithm described there uses 11 multiplies and 29 adds. * We use their alternate method with 12 multiplies and 32 adds. * The advantage of this method is that no data path contains more than one * multiplication; this allows a very simple and accurate implementation in * scaled fixed-point arithmetic, with a minimal number of shifts. */ /** * @file * Independent JPEG Group's slow & accurate dct. */ #include <stdlib.h> #include <stdio.h> #include "libavutil/common.h" #include "dsputil.h" #define DCTSIZE 8 #define BITS_IN_JSAMPLE 8 #define GLOBAL(x) x #define RIGHT_SHIFT(x, n) ((x) >> (n)) #define MULTIPLY16C16(var,const) ((var)*(const)) #if 1 //def USE_ACCURATE_ROUNDING #define DESCALE(x,n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n) #else #define DESCALE(x,n) RIGHT_SHIFT(x, n) #endif /* * This module is specialized to the case DCTSIZE = 8. */ #if DCTSIZE != 8 Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ #endif /* * The poop on this scaling stuff is as follows: * * Each 1-D DCT step produces outputs which are a factor of sqrt(N) * larger than the true DCT outputs. The final outputs are therefore * a factor of N larger than desired; since N=8 this can be cured by * a simple right shift at the end of the algorithm. The advantage of * this arrangement is that we save two multiplications per 1-D DCT, * because the y0 and y4 outputs need not be divided by sqrt(N). * In the IJG code, this factor of 8 is removed by the quantization step * (in jcdctmgr.c), NOT in this module. * * We have to do addition and subtraction of the integer inputs, which * is no problem, and multiplication by fractional constants, which is * a problem to do in integer arithmetic. We multiply all the constants * by CONST_SCALE and convert them to integer constants (thus retaining * CONST_BITS bits of precision in the constants). After doing a * multiplication we have to divide the product by CONST_SCALE, with proper * rounding, to produce the correct output. This division can be done * cheaply as a right shift of CONST_BITS bits. We postpone shifting * as long as possible so that partial sums can be added together with * full fractional precision. * * The outputs of the first pass are scaled up by PASS1_BITS bits so that * they are represented to better-than-integral precision. These outputs * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word * with the recommended scaling. (For 12-bit sample data, the intermediate * array is int32_t anyway.) * * To avoid overflow of the 32-bit intermediate results in pass 2, we must * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis * shows that the values given below are the most effective. */ #if BITS_IN_JSAMPLE == 8 #define CONST_BITS 13 #define PASS1_BITS 4 /* set this to 2 if 16x16 multiplies are faster */ #else #define CONST_BITS 13 #define PASS1_BITS 1 /* lose a little precision to avoid overflow */ #endif /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus * causing a lot of useless floating-point operations at run time. * To get around this we use the following pre-calculated constants. * If you change CONST_BITS you may want to add appropriate values. * (With a reasonable C compiler, you can just rely on the FIX() macro...) */ #if CONST_BITS == 13 #define FIX_0_298631336 ((int32_t) 2446) /* FIX(0.298631336) */ #define FIX_0_390180644 ((int32_t) 3196) /* FIX(0.390180644) */ #define FIX_0_541196100 ((int32_t) 4433) /* FIX(0.541196100) */ #define FIX_0_765366865 ((int32_t) 6270) /* FIX(0.765366865) */ #define FIX_0_899976223 ((int32_t) 7373) /* FIX(0.899976223) */ #define FIX_1_175875602 ((int32_t) 9633) /* FIX(1.175875602) */ #define FIX_1_501321110 ((int32_t) 12299) /* FIX(1.501321110) */ #define FIX_1_847759065 ((int32_t) 15137) /* FIX(1.847759065) */ #define FIX_1_961570560 ((int32_t) 16069) /* FIX(1.961570560) */ #define FIX_2_053119869 ((int32_t) 16819) /* FIX(2.053119869) */ #define FIX_2_562915447 ((int32_t) 20995) /* FIX(2.562915447) */ #define FIX_3_072711026 ((int32_t) 25172) /* FIX(3.072711026) */ #else #define FIX_0_298631336 FIX(0.298631336) #define FIX_0_390180644 FIX(0.390180644) #define FIX_0_541196100 FIX(0.541196100) #define FIX_0_765366865 FIX(0.765366865) #define FIX_0_899976223 FIX(0.899976223) #define FIX_1_175875602 FIX(1.175875602) #define FIX_1_501321110 FIX(1.501321110) #define FIX_1_847759065 FIX(1.847759065) #define FIX_1_961570560 FIX(1.961570560) #define FIX_2_053119869 FIX(2.053119869) #define FIX_2_562915447 FIX(2.562915447) #define FIX_3_072711026 FIX(3.072711026) #endif /* Multiply an int32_t variable by an int32_t constant to yield an int32_t result. * For 8-bit samples with the recommended scaling, all the variable * and constant values involved are no more than 16 bits wide, so a * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. * For 12-bit samples, a full 32-bit multiplication will be needed. */ #if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2 #define MULTIPLY(var,const) MULTIPLY16C16(var,const) #else #define MULTIPLY(var,const) ((var) * (const)) #endif static av_always_inline void row_fdct(DCTELEM * data){ int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast32_t tmp10, tmp11, tmp12, tmp13; int_fast32_t z1, z2, z3, z4, z5; DCTELEM *dataptr; int ctr; /* Pass 1: process rows. */ /* Note results are scaled up by sqrt(8) compared to a true DCT; */ /* furthermore, we scale the results by 2**PASS1_BITS. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[0] + dataptr[7]; tmp7 = dataptr[0] - dataptr[7]; tmp1 = dataptr[1] + dataptr[6]; tmp6 = dataptr[1] - dataptr[6]; tmp2 = dataptr[2] + dataptr[5]; tmp5 = dataptr[2] - dataptr[5]; tmp3 = dataptr[3] + dataptr[4]; tmp4 = dataptr[3] - dataptr[4]; /* Even part per LL&M figure 1 --- note that published figure is faulty; * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". */ tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS-PASS1_BITS); dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), CONST_BITS-PASS1_BITS); /* Odd part per figure 8 --- note paper omits factor of sqrt(2). * cK represents cos(K*pi/16). * i0..i3 in the paper are tmp4..tmp7 here. */ z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; z3 = tmp4 + tmp6; z4 = tmp5 + tmp7; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z3 += z5; z4 += z5; dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); dataptr += DCTSIZE; /* advance pointer to next row */ } } /* * Perform the forward DCT on one block of samples. */ GLOBAL(void) ff_jpeg_fdct_islow (DCTELEM * data) { int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast32_t tmp10, tmp11, tmp12, tmp13; int_fast32_t z1, z2, z3, z4, z5; DCTELEM *dataptr; int ctr; row_fdct(data); /* Pass 2: process columns. * We remove the PASS1_BITS scaling, but leave the results scaled up * by an overall factor of 8. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; /* Even part per LL&M figure 1 --- note that published figure is faulty; * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". */ tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), CONST_BITS+PASS1_BITS); /* Odd part per figure 8 --- note paper omits factor of sqrt(2). * cK represents cos(K*pi/16). * i0..i3 in the paper are tmp4..tmp7 here. */ z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; z3 = tmp4 + tmp6; z4 = tmp5 + tmp7; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z3 += z5; z4 += z5; dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS+PASS1_BITS); dataptr++; /* advance pointer to next column */ } } /* * The secret of DCT2-4-8 is really simple -- you do the usual 1-DCT * on the rows and then, instead of doing even and odd, part on the colums * you do even part two times. */ GLOBAL(void) ff_fdct248_islow (DCTELEM * data) { int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast32_t tmp10, tmp11, tmp12, tmp13; int_fast32_t z1; DCTELEM *dataptr; int ctr; row_fdct(data); /* Pass 2: process columns. * We remove the PASS1_BITS scaling, but leave the results scaled up * by an overall factor of 8. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1]; tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; tmp10 = tmp0 + tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; tmp13 = tmp0 - tmp3; dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), CONST_BITS+PASS1_BITS); tmp10 = tmp4 + tmp7; tmp11 = tmp5 + tmp6; tmp12 = tmp5 - tmp6; tmp13 = tmp4 - tmp7; dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), CONST_BITS+PASS1_BITS); dataptr++; /* advance pointer to next column */ } }
123linslouis-android-video-cutter
jni/libavcodec/jfdctint.c
C
asf20
16,074
/* * TwinVQ decoder * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_TWINVQ_DATA_H #define AVCODEC_TWINVQ_DATA_H #include <stddef.h> #include <stdint.h> /* * The bark_tab_* tables are constructed so that * * /i-1 \ * |-- | * bark |\ bark_tab[j] | == i * |/ | * |-- | * \j=0 / * * * for some slightly nonconventional bark-scale function */ static const uint16_t bark_tab_l08_512[] = { 7, 8, 7, 8, 8, 8, 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 14, 15, 16, 18, 19, 21, 24, 27, 30, 35, 40, 46, 53 }; static const uint16_t bark_tab_l11_512[] = { 6, 6, 6, 6, 6, 6, 7, 6, 7, 7, 8, 8, 8, 9, 10, 10, 11, 13, 13, 15, 17, 18, 21, 25, 27, 33, 38, 45, 54, 66 }; static const uint16_t bark_tab_l16_1024[] = { 9, 9, 8, 9, 10, 9, 10, 10, 10, 12, 11, 13, 13, 14, 16, 17, 19, 20, 24, 26, 30, 35, 40, 48, 56, 68, 83, 102, 128, 165 }; static const uint16_t bark_tab_l22_1024[] = { 6, 7, 6, 6, 7, 7, 7, 7, 7, 8, 9, 8, 10, 10, 11, 12, 13, 15, 16, 18, 21, 24, 27, 33, 38, 46, 55, 68, 84, 107, 140, 191 }; static const uint16_t bark_tab_l22_512[] = { 3, 3, 3, 4, 3, 3, 4, 3, 4, 4, 4, 5, 4, 5, 6, 6, 7, 7, 8, 9, 10, 12, 14, 16, 20, 22, 28, 34, 42, 53, 71, 95 }; static const uint16_t bark_tab_l44_2048[] = { 5, 6, 5, 6, 5, 6, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 13, 14, 16, 17, 19, 22, 25, 29, 33, 39, 46, 54, 64, 79, 98, 123, 161, 220, 320, 512 }; static const uint16_t bark_tab_m08_256[] = { 6, 5, 6, 6, 6, 6, 7, 7, 8, 8, 9, 10, 11, 13, 15, 18, 20, 25, 31, 39 }; static const uint16_t bark_tab_m11_256[] = { 4, 5, 4, 5, 5, 5, 6, 5, 7, 7, 8, 9, 10, 12, 15, 17, 22, 28, 35, 47 }; static const uint16_t bark_tab_m16_512[] = { 7, 6, 7, 7, 7, 8, 9, 9, 10, 11, 14, 15, 18, 22, 27, 34, 44, 59, 81, 117 }; static const uint16_t bark_tab_m22_256[] = { 3, 2, 3, 2, 3, 3, 4, 3, 4, 5, 5, 7, 8, 9, 13, 16, 22, 30, 44, 70 }; static const uint16_t bark_tab_m22_512[] = { 5, 5, 5, 6, 5, 7, 6, 7, 9, 9, 11, 13, 15, 20, 24, 33, 43, 61, 88, 140 }; static const uint16_t bark_tab_m44_512[] = { 3, 2, 3, 3, 3, 4, 3, 5, 4, 6, 7, 8, 10, 14, 18, 25, 36, 55, 95, 208 }; static const uint16_t bark_tab_s08_64[] = { 3, 3, 3, 3, 4, 5, 6, 8, 12, 17 }; static const uint16_t bark_tab_s11_64[] = { 2, 3, 2, 3, 3, 4, 6, 8, 12, 21 }; static const uint16_t bark_tab_s16_128[] = { 3, 4, 4, 4, 5, 7, 10, 16, 26, 49 }; static const uint16_t bark_tab_s22_128[] = { 3, 2, 3, 4, 4, 6, 9, 14, 26, 57 }; static const uint16_t bark_tab_s44_128[] = { 1, 2, 1, 2, 3, 4, 6, 10, 23, 76 }; /** * TwinVQ codebooks. They are coded in a struct so we can use code such as * * float val = tab.fcb0808l[get_bits(gb, 12)]; * * without risking a segfault on malformed files. */ static const struct { float lsp08[504]; int16_t fcb08l[640]; int16_t fcb08m[320]; int16_t fcb08s[320]; int16_t shape08[1280]; int16_t cb0808l0[1088]; int16_t cb0808l1[1088]; int16_t cb0808s0[1152]; int16_t cb0808s1[1152]; int16_t cb0808m0[1024]; int16_t cb0808m1[1024]; int16_t cb1108l0[1728]; int16_t cb1108l1[1728]; int16_t cb1108m0[1536]; int16_t cb1108m1[1536]; int16_t cb1108s0[1856]; int16_t cb1108s1[1856]; int16_t fcb11l[640]; int16_t fcb11m[320]; int16_t fcb11s[320]; int16_t shape11[1280]; float lsp11[1312]; int16_t cb1110l0[1280]; int16_t cb1110l1[1280]; int16_t cb1110m0[1152]; int16_t cb1110m1[1152]; int16_t cb1110s0[1344]; int16_t cb1110s1[1344]; int16_t fcb16l[640]; int16_t fcb16m[320]; int16_t fcb16s[320]; int16_t shape16[1920]; float lsp16[1400]; int16_t cb1616l0[1024]; int16_t cb1616l1[1024]; int16_t cb1616m0[960]; int16_t cb1616m1[960]; int16_t cb1616s0[1024]; int16_t cb1616s1[1024]; int16_t cb2220l0[1152]; int16_t cb2220l1[1152]; int16_t cb2220m0[1088]; int16_t cb2220m1[1088]; int16_t cb2220s0[1152]; int16_t cb2220s1[1152]; int16_t fcb22l_1[512]; int16_t fcb22m_1[640]; int16_t fcb22s_1[640]; int16_t shape22_1[1152]; float lsp22_1[1312]; int16_t cb2224l0[960]; int16_t cb2224l1[960]; int16_t cb2224m0[896]; int16_t cb2224m1[896]; int16_t cb2224s0[960]; int16_t cb2224s1[960]; int16_t fcb22l_2[512]; int16_t fcb22m_2[640]; int16_t fcb22s_2[640]; int16_t shape22_2[1152]; float lsp22_2[1312]; int16_t cb2232l0[768]; int16_t cb2232l1[768]; int16_t cb2232m0[704]; int16_t cb2232m1[704]; int16_t cb2232s0[704]; int16_t cb2232s1[704]; int16_t cb4440l0[1088]; int16_t cb4440l1[1088]; int16_t cb4440m0[1088]; int16_t cb4440m1[1088]; int16_t cb4440s0[1152]; int16_t cb4440s1[1152]; int16_t fcb44l[640]; int16_t fcb44m[640]; int16_t fcb44s[640]; int16_t shape44[1152]; float lsp44[1640]; int16_t cb4448l0[896]; int16_t cb4448l1[896]; int16_t cb4448m0[896]; int16_t cb4448m1[896]; int16_t cb4448s0[960]; int16_t cb4448s1[960]; } tab = { .cb0808l0 = { 96, -12592, -12443, 425, 182, -456, -341, -843, 615, 689, 982, 1470, -518, 231, -538, 282, 409, -600, -303, -29, 51, -4, -115, 79, -27, 450, -937, -461, -554, -159, 426, 710, -29106, -2148, 99, 3426, 1838, 12427, 585, -2080, -2524, -474, 1572, 718, 578, -344, 188, 328, 12125, 112, 654, -1232, -1644, 288, 553, 1513, 966, 1012, 49, 631, -111, -238, -116, -182, -21, -46, 334, 11013, -454, -261, 12, 21, 52, -20440, -295, -502, -516, -329, -230, 465, 59, 270, 971, -127, 505, -194, 43, -30, 300, 38, 665, -613, 33, -172, -153, 323, -166, 54, 399, 109, 186, -1765, -222, 138, 16, 204, 30111, 208, -564, -612, 156, -146, -345, 321, -138, 202, -184, 93, 710, -15945, -13401, 234, -1113, 146, -9, 56, -628, -834, -1268, 872, 61, -1184, -126, -205, 145, -109, -8248, 113, -146, 1288, 9142, 857, -782, -686, -256, -650, 1061, -202, 12, -709, -88, 273, 497, 150, -59, -8807, 240, 532, 16, 1482, 11012, -444, 1918, -1786, 1934, 172, 598, -1324, 5638, -3166, 492, -545, -770, 1067, 0, -356, -421, 1684, 273, -502, 316, 1116, 807, -529, -831, -13379, -420, 236, 470, -2590, -193, -47, 580, -1613, 798, 27, -16, -12768, -893, 256, 0, 1659, 1463, 544, 196, -30444, 314, -421, 508, -276, -173, 414, -380, -371, -40, -121, 375, 432, -438, 1, -350, -280, 1198, -373, 452, 100, -68, 9053, 165, 770, 73, 291, 717, 515, 596, -323, -4, -2, 803, 738, 2605, 30, 73, 455, 11280, 1534, -283, 1502, -9126, -4760, -570, 483, -179, -8628, -1639, 322, -56, 6149, -3330, 114, 4598, -1976, -34, -56, 840, 753, 12292, -7100, -492, 320, -412, 908, 1186, 444, 6546, -788, 5394, 697, 13105, 194, -394, 294, 2639, 12, -1009, -1426, -36, 2106, -252, -31979, -66, 341, 996, 298, 105, 6, 10, 106, -498, -244, -105, -574, 16, -206, 24, -2067, -381, 10265, -103, -762, -785, -2036, -11927, 16, -710, -35, -270, -99, 4, 772, -272, -186, -328, -14936, -57, -1357, -175, -606, 220, 918, -11, 398, -189, -278, 138, 429, 509, -701, -43, -42, -630, -560, 11736, -528, 10286, -633, -870, 423, 550, -888, 297, -170, 258, 2234, 486, 292, -446, -11858, 10008, 52, 1203, -164, 810, -1527, -604, -883, -588, -96, 332, 148, -180, 223, 356, 285, 434, -57, -172, -520, -432, -72, 294, -93, -134, 316, 30647, -351, 278, 84, -439, 589, 105, 1001, 297, 660, 196, 171, 178, -90, -55, 1172, 21100, 227, -288, 372, 162, 458, -555, -1329, 380, 366, -104, 105, 674, -378, 1328, 283, -1928, 549, 762, 454, 55, 606, 12499, 24, 435, 23, 29, 6170, 1129, -95, 97, 569, 132, 491, 164, -288, -1011, -134, 1234, -427, -254, -524, 226, -14114, 328, -70, 1666, -189, -2352, 1097, 619, 632, -981, 745, 587, -27, -200, -871, 50, 470, -246, 2610, 581, 254, 9893, -586, 880, -11894, 386, 1135, 117, 1072, 116, -830, -160, -1002, -699, -66, -230, -260, 112, 106, 221, 297, -47, 7642, 170, -330, -599, -51, -476, 33, 475, 624, 6199, -350, -406, 184, 906, -528, 382, 401, 348, 26, -186, 33, -130, -62, -50, 1268, -132, -109, 1164, -354, 675, 3, -402, -244, 644, 648, -132, -4, 45, 20386, -136, 568, 126, 376, 14476, -376, 267, 13518, -260, 111, 1014, 758, 439, 551, -164, 207, 128, -416, 616, 690, -9460, -1856, 1123, 826, -265, -762, 1596, -632, 52, -622, -894, 367, -433, -100, 1873, 756, -17436, 168, -541, 550, 145, -5612, -1057, -1344, -656, -194, 216, -500, -245, 246, 64, 688, 727, 12538, -5492, 252, -908, -424, -532, -659, -277, -230, -736, -183, 35, -228, 200, -12, -248, -60, -493, 433, 446, 366, -644, 92, -324, 29, 833, -21542, -977, 94, 379, 49, -1058, 248, -178, 85, -961, -1198, -48, 467, -242, -10202, 1556, 11263, -716, 814, -1686, 3594, -27, 694, -802, 390, 4144, -663, 44, -546, 312, -28, -484, 981, -307, 496, 408, 203, 12543, 296, -1240, 159, 846, -957, -1493, -618, 1593, 11868, 2616, 1954, 412, -922, -1320, 3325, -254, -1892, 607, -2223, -8745, -1486, 17, 343, -50, -562, 22011, -350, -491, -70, -60, 617, 768, -346, 387, 660, 1409, 222, 616, 173, -1323, 4017, -207, -525, -13243, 11, 440, -614, -280, 549, -670, -79, 459, 560, -102, -214, -54, -1201, 230, -526, 857, 1044, -369, 2470, -11010, -12586, 243, -205, 838, -920, 348, -738, 1319, 86, -78, -428, -1909, -155, 2, 508, 711, -292, 1699, 225, -101, -163, 540, 9692, 235, -183, -38, 198, -466, -204, -8957, -914, -299, 193, 10, 723, 643, -533, -1418, 323, 20, 334, -886, -331, 368, 130, -30233, -152, -14, 637, 132, -232, -149, -430, 64, -243, -376, 370, 388, 196, -1098, 117, -794, -16, -274, 348, 464, -28156, 184, 322, -101, 2, -27, -183, 610, 256, -160, -573, -226, 588, 1613, 1028, 9518, -2151, -1602, -528, -356, -116, -11511, 1828, -2206, -47, -757, -1479, -1429, -14717, 1686, 253, 802, 462, -37, -916, -289, -401, 13383, 353, -74, 114, -189, 636, 434, -639, 1013, 234, 11752, 219, 1464, -132, -12838, 125, -592, -40, -162, -1772, 506, 479, 422, 36, 15, -960, 799, 517, 1311, -409, 748, 729, 446, 11029, -13039, 1257, -651, -13, -742, 1416, -388, -274, -795, 163, -572, 74, 430, -90, -126, -74, -598, 140, 125, -20, -20332, 208, 37, 19, -174, -209, 305, 28, -402, 28, -315, -1, -134, 440, -832, 79, -635, -304, 8, -32768, 625, 470, -1224, -351, 546, -1171, -706, 652, 31, 7484, -448, 916, 1244, -379, -300, 68, 868, 607, 247, 70, -984, 14314, 21, -350, -82, 368, 456, -742, 472, 34, 782, -498, -879, 700, 417, 216, 415, -161, -181, -608, 1570, 862, -96, -114, 8095, -26, 168, -363, -804, -36, -770, 139, -171, 6645, -1425, 4826, -5288, 1358, -11747, -64, 650, -3206, -1692, 789, -2047, -279, 916, -1648, 1164, 2044, -144, -717, -392, -216, 372, 348, 1052, -175, 668, 308, -15, 29112, -406, -774, 365, -1006, -526, 1076, 59, -672, -87, -106, 174, 96, 615, 462, -43, -496, 112, 149, -56, -182, -268, -32768, -205, -676, 165, -1210, -325, 7964, -44, 546, -699, 285, -418, 355, 238, 550, 67, 425, 384, -950, -330, -208, -452, 212, 11610, -190, 37, -907, -11137, -982, 585, -783, -864, 164, -24, -514, -211, 2, -510, -580, 595, 128, 100, -229, -55, 290, -539, 40, -7786, -270, 295, -508, 562, -1196, 218, 33, 3788, -8954, -1082, 297, -906, -322, 123, 1162, -343, -11655, 88, -28, 1173, 9, -99, 36, -11987, 356, 12630, 767, -183, -983, -559, 186, 1148, 530, -440, 1230, -456, -133, -424, 35, -357, 418, 1457, -687, 740, -242, 17855, -368, -1057, -262, -646, 406, -712, -1058, -84, 454 }, .cb0808l1 = { 982, -26, -721, 359, 509, 13290, 2391, 727, 325, 328, 269, -156, 346, -242, -31, -356, 741, 396, -98, 108, 35, -237, -29684, 196, -69, 462, -339, 24, -1221, 352, -658, 396, 243, -1658, -458, -1153, 5, -662, -47, 18, -572, -567, -2084, -980, -210, 150, -396, 14836, -210, 0, -162, -539, 588, -868, 248, -8576, 1020, 526, 1056, 262, -149, 818, -1353, -1120, 767, -738, -634, -14742, -105, 811, 1718, -116, -64, 307, 920, -1244, 2388, 10213, -4505, -250, 617, -1725, -645, 1258, 1146, -590, 707, -12, 372, 1794, 1012, -149, 404, -978, -306, 168, -1536, 89, 142, 938, -19891, 973, -481, -419, -904, -455, -1821, -1617, 654, -2022, 1906, -497, -11346, -330, -11679, -14, 1, 535, -377, 1057, -214, -213, 430, -13, -3379, -11250, 911, -716, -240, -10, 260, 132, -611, -64, -594, -8540, 837, -3717, -1154, 906, 10623, -502, -167, 67, 119, 13501, -1469, 213, -1048, -1403, 432, -1079, 45, -230, -730, -203, -595, -1150, -460, -97, 395, -304, 27816, -300, -16, 153, -671, 551, 436, -956, -182, 194, 113, -5504, 194, 263, -332, -517, -244, -396, 540, 56, -371, 446, 147, -66, 7, -306, 1440, -308, 327, 645, 597, -6642, 72, 392, -138, -50, -144, -262, 504, -230, 114, 2076, 8175, 1188, 290, -872, 202, 69, 82, -281, -126, -291, -158, -152, -45, 239, 153, -516, -422, -691, 801, 28, 496, -298, -11118, 10430, -227, -851, 214, -801, 538, 834, -137, 942, 573, 405, 1308, 2234, 300, 1269, 12361, -752, 2177, -743, 60, 464, 946, 302, -422, 116, -1200, -110, -843, 284, -578, 732, -308, 153, -64, 156, 225, -29232, -452, -466, -130, 888, 240, 305, -83, 236, 208, 417, 1530, 294, 594, 351, 508, 137, -7274, -184, 201, 44, -635, -891, -652, -596, 380, -652, -8670, -76, -3746, -732, 262, -1860, -1030, 1366, -279, 444, 911, 209, 330, 251, -208, -747, 65, -10154, -204, 12960, -325, 347, -465, -730, -727, 385, -89, -763, -427, 868, -39, -859, 34, -29, -388, -1324, -218, 2051, -1593, 5511, 10507, -8516, 2254, 5847, -1474, 1994, 4704, -1876, 880, -3810, -489, -946, -1225, -1104, 125, 139, -668, 2232, -537, 179, -215, 63, 144, 72, 1198, 9750, 248, -709, 308, 10552, -434, -462, 13569, 1096, -491, -262, 804, -1599, 679, 569, 604, 1326, 213, -2026, 324, -2612, -373, -12818, -20, 38, -171, 316, 15516, 306, 763, 97, 91, -832, 23, -437, -390, 505, -1226, 2518, 106, -2065, 315, 86, 523, 172, -1012, -13851, 3358, 2610, -381, -194, 1200, -4106, -1298, -3637, -1534, 780, 1367, -544, -770, 1690, 1047, -54, 2136, 12502, 32, 6689, 706, -1172, 846, -4853, 2146, 2548, -39, -465, -596, 177, 213, 421, 28, -388, 11, 69, 31, -83, -28, -166, -150, -19836, -323, 3, 659, 783, 390, 139, -138, 31, -111, 453, -80, 432, -519, -259, 686, 11431, 163, -13179, 554, 40, -379, -120, -692, 340, 169, 120, -476, 643, 778, 501, -128, 543, 1275, -134, 20568, 201, 401, 512, -362, -210, -269, -812, 112, 75, 149, -547, -494, -418, -100, -13621, -1002, 1176, 1634, -395, -4289, -1531, -47, 850, -1102, 13558, -403, 683, -164, -2215, -1180, -1750, 344, 630, -968, 669, 540, 26, -594, 192, -17, -336, 19645, 1133, 18, -56, 418, -426, -1535, 409, 732, 186, 268, -20422, -22, 62, -621, 722, 440, 96, -307, -128, 480, 5, 87, 668, -361, -599, -22, 652, -176, -114, 214, -12233, -698, 232, 608, -126, -714, -488, -228, 929, -1582, -19845, 245, -460, 124, 57, 328, -436, -158, 236, -196, -534, 209, 69, 229, 210, -251, 1100, 583, 415, 210, 189, -219, 1242, 19482, -105, 190, -374, -43, -232, 253, 561, -297, -376, -1077, -308, 13486, -12462, 64, -190, -298, -643, 460, 232, -987, -478, 1596, 168, -722, 616, -873, -98, -948, 231, -1102, 11915, 746, -495, 1248, 1203, 11067, -32, 160, -94, -24, -153, -209, -1453, -1059, -313, -922, 1143, -538, -1348, -323, 679, -54, -232, -470, 2075, -19135, 628, -774, 35, 247, -86, 721, 512, 1305, 850, 9760, 248, -2404, -220, 6, -73, -1370, 567, 1432, -2529, -1508, 14358, -992, -1111, -940, -111, 968, -530, 576, 102, -1045, 453, 180, -94, -7936, -310, 512, 996, -32, -1062, -150, -26, -6687, -181, -336, -1510, 616, 70, -332, -175, 624, -546, 171, 364, 1011, 68, -284, -368, 711, 46, 73, -34, -419, 404, 28270, 283, -324, 335, -131, 316, 212, -27, -342, -1062, 470, 1269, 454, 286, -1928, -1674, -739, -389, 1073, -6172, -317, -586, -194, -182, -13034, -848, 4596, -659, 709, -630, -310, 400, 344, -276, 430, 876, -2047, -1012, -1672, -180, 64, 22005, -736, 829, 266, 182, 436, -112, -36, 131, 252, -63, 154, 368, 107, 93, -42, -32768, 0, 200, -230, 271, -1776, 4329, 986, -553, 481, 1888, -2770, 848, -6305, 264, 12244, 1610, -640, 1348, -2742, -2078, 907, -1115, 370, -16539, -1571, -176, 24, -515, 234, 954, 605, 613, -154, 463, 535, -160, 684, 470, 827, 10458, 150, -669, -6684, 339, -542, -730, -351, 984, 212, 116, -7, 62, 926, 2175, -185, -552, 489, -209, 5247, 38, 366, 53, 16, 263, -142, -535, -224, 338, -174, -125, 113, -12750, 400, -410, 281, -12, 744, -173, 486, -12159, -107, -183, -484, 2, 150, 1, -239, 7, -399, -608, -873, 698, -1623, 701, -773, 272, -832, -94, -921, 885, 13588, 178, 192, 148, 1346, 44, 59, -275, -14, -328, 212, 133, -223, 300, -394, -275, -43, -76, -47, 322, -208, 21713, 484, 329, 1860, 40, -916, 502, 130, 477, 1754, 503, 7984, -338, -323, -230, 354, 928, 430, -89, -94, 108, -543, 365, -130, 70, 902, -131, 58, 469, 580, -30949, 36, 232, -410, -451, 104, -8698, 113, -1682, -42, -279, -92, -280, -477, -386, -531, 832, 80, -15002, -56, 93, 164, -721, 8388, -412, -2396, 584, 1004, -310, -2229, -304, -383, 275, 1062, 1266, 297, -70, -909, 891, 131, -1046, 539, 32502, 1000, -21, -229, 138, 1528, -175, 546, 326, 168, -320, 716, -291, -298, -227, 1094, -59, -12561, 12943, 786, 600, -206, 889, -761, 54, 332, -1253, -597, 357, -1124, -50, -168, 1172, 2266, 75, -174, 583, 408, -157, 14666, 378, 302, -5, 48, 109, 28, -21, 1044, 529, -859, -1182, -202, 1984, 308, 402, 66, -1139, 2595, -380, 1119, 309, 482, -10705, 100, -4591, 11646, -1364, -365, 9521, -318, -23, 1076, -135, -2742, -833, 78, 910, 96, -20, -599, 46, 855, -1265, 4748, 2394, -250, -9096, -962, 191, -346, 348, 342, 1909, 15330, 266, 540, 271, 2986, 1356, 1542, -1019, -895, 737, 281, 684, -538, 10414, -922, 287, 679, 204, -11142, -2321, -346, -1572, -250, -315, -604, 1336, 311, 1317, -1111, 409, -104, -221, -14125, -1511, -990, 705, -808, 587, 676, 348 }, .cb0808s0 = { -7488, -1327, -5244, -2049, -3736, -45, 446, 1558, -755, -6052, 6034, -4326, 740, -348, 12369, 2115, -662, -685, -6592, 10176, 8575, -1035, -2752, -4453, -283, 1547, 4776, -2932, 700, 3425, -3905, 1073, 2356, -7094, -1705, -435, 4840, -1944, 1188, 780, -3963, -6170, -1726, 4759, -4356, -2124, -1686, 321, -901, 1414, -923, -2678, -1198, -14777, -2038, -3528, 123, 11216, 1904, -1914, 7588, 2744, -4265, -4886, -3530, -1495, -1709, -5857, 3829, 2196, -4842, -817, -874, -5649, -2181, -3871, 3774, -1368, 322, -1126, -996, -3873, 13698, -9369, -848, 3797, -667, -1083, 2429, -3351, -1672, -3562, -1590, -3507, 552, 6610, -4137, -10061, -5452, -6142, -1454, 1726, -1298, -4479, 6126, 1626, -2791, 1584, 1300, 5726, 2584, 11109, 696, -3344, -2418, 9029, 4346, -3554, 1393, 144, 2051, 8916, 6174, 5170, 376, 9778, -2298, -4119, 3733, -35, -2673, 2222, 1383, 2046, 2859, -16131, 1637, -1195, -662, 2800, -2241, 3801, -5062, -978, 5670, -5449, -79, 3479, 606, 3766, -1325, -265, 907, -745, 1005, -14528, -4227, -3955, -7194, 3690, 2166, -2520, 11555, -511, 5900, -388, -3854, -3440, 2136, -868, -2986, 722, 1286, -4027, 10382, -1646, 5193, 2539, 1239, 7819, -67, 3382, -3297, -46, -3808, 830, 1313, -2188, -4346, 5922, -1057, -6294, 14317, 2001, 968, 4150, -4121, 1412, -302, -8401, -1388, 10649, -9513, 1042, 840, -4606, 2098, 1166, 1472, -802, -2810, 420, -561, -325, 2652, -2866, 1334, 4878, 958, 83, 456, 1203, -7594, 14590, -1210, 2202, -1954, -1938, -3413, -1096, 6036, -1675, -1320, -4485, -10665, 10026, -2484, -3273, 4753, -275, -3542, 924, 1262, 7348, -2959, -749, -408, 4594, 4876, -491, 3409, 4616, 110, 557, -1378, -1616, -4532, 1699, 1412, 579, -494, 716, 197, -23346, -2284, 156, 1096, -151, -1827, 688, -322, 2371, -7909, -1324, -1683, 7861, 7074, -451, 258, 9088, 1900, 8660, 840, 3491, -3275, 3029, -475, -2122, -5725, -8668, -6069, -3458, 4240, -3007, -5463, 9395, -2686, 4718, -717, 42, -1802, 3122, -3197, -5212, -1572, -243, -451, 8213, -2199, -3372, 4110, -8176, -10525, -5551, 4312, 682, 2069, 1985, -3713, -6780, 1193, 2831, -2228, 486, -3667, -789, -1691, 4567, 464, -2114, -2340, -1881, 1921, 1602, 18418, 1535, -567, 228, -9359, -6027, -267, 3628, 32767, 1423, -74, -2817, 2112, -128, -1516, -2446, 1673, 2812, -1582, 2125, 618, 2569, 2714, -1710, 340, 3255, 848, 3379, -2317, -2361, -1823, 412, -2496, -18164, -1224, 2552, -3040, 144, -597, 7716, 4916, -2867, -2172, 2120, -2776, 675, -11985, 1692, -1384, -3588, 4310, 1020, -4215, -251, -7090, -1916, 1914, -2804, 6189, -6732, -1370, -3704, 450, -2652, 6553, -38, 10348, 1244, -2246, -3729, -2158, -1340, 2357, 3118, 9378, -1727, 3150, -3867, 1277, -15, 769, -2352, -411, 1428, -14032, -1029, 2828, -1894, 6084, -36, 518, 13159, 1095, -1185, -3207, -555, -3256, -76, 3884, 3394, 1010, 1946, 160, -4863, 4714, -7087, -3985, 5602, 3350, 7822, -5729, -7701, 9296, 3067, 3582, 5256, 13629, -4012, -2206, -3867, -664, -104, 4397, -7862, 36, 955, -38, -973, 3458, 5004, 364, -9116, -2764, -2168, -1892, -7632, -4834, -5788, -3565, -1245, -4544, 6552, 4601, 2342, 6625, 1040, 2154, -6985, 5838, -1912, -3439, 1189, -2422, -555, 3286, -14872, -776, 1228, 2434, 120, 13673, 904, -1354, 645, -1550, -1377, -1888, 1416, -679, -1685, 1731, 2404, -5786, 3285, -193, -123, 1973, 3663, -1388, -14961, -3597, 5555, -1420, 284, 1527, -2575, 1941, 871, 3900, -2168, -12763, 2970, -408, -3131, -6426, 1892, 782, 6768, -284, 1034, 9785, 6029, -3873, -4102, -4349, 2548, -3686, -5622, 4769, -351, 8178, -7253, 3687, 624, -4386, 4028, -2780, -1938, -4061, -1872, -1264, 7300, 760, 8530, -821, -874, -14225, -1143, -5400, -850, -2537, 478, 1668, -1244, -362, 877, 3481, -1338, -5218, 2091, 3996, -577, 390, 8626, 820, 181, -988, 5604, 9694, 1112, -3064, -266, 1234, -486, 1264, -2173, -13671, 3729, -3212, 2548, 1745, -9363, 8065, 3713, -3343, -4847, 2808, -4716, -2175, 25, -5718, 4056, 1855, 4663, 2324, -1166, 543, 2, 3931, -3196, 2771, -920, -2907, -746, -1241, -306, 2793, -22, -2642, 3048, 3256, 1804, -1310, 17876, -1816, 56, -1694, -465, -534, -2274, 6139, -2247, -2515, -1077, 3305, 1519, 273, 1128, -1637, 2561, -1534, 874, -22808, -1119, -2551, -10344, -2229, -3510, 194, 2594, 1737, 4713, 13767, 3532, -311, 8097, -1012, -841, -4360, 793, -267, -206, 12905, -2683, -6424, 196, 7098, -1690, -690, 1236, -2882, -2668, -2020, 8291, -2714, -4607, -923, -2077, -2878, 1687, -10457, -1575, 2172, -3974, 5795, 1748, -1852, -5143, 4763, -5097, -2840, -1851, 2634, 5970, 180, -3326, -1655, 1226, 375, 5137, -2678, -5246, 4327, -3670, 9956, -1976, 2189, 2952, -6785, -697, 1129, -5768, -5819, 6532, 3650, -1711, 3857, 47, -9618, -1941, 2524, -1244, 7242, 11646, -64, 2304, 201, -3707, -700, 149, 2692, -805, 3978, 2738, -977, -1004, -5776, 12779, 7454, -353, -4731, -3866, 7076, 146, -3302, 3065, 1955, -343, -1459, -426, -5906, -1318, 500, -1014, -1002, -2090, -2924, -20521, 2610, 1581, 397, -3380, -2885, 510, -1147, 3398, 1914, 99, -119, 144, -3128, 2445, 1791, 397, 3734, -80, -3410, -3798, -1142, -1515, -2615, -1540, 5193, 2187, 940, 4969, -2334, -16589, 325, -2186, -4567, 5121, -894, -6848, -6002, 1832, -568, 8259, 833, 3420, -4459, -748, 3442, 4358, -3041, -10203, 9303, -1511, -4821, 1950, -966, 3573, 453, 705, 16238, -901, -163, -2866, -104, -1767, -1779, -1249, 3251, 1975, 1254, -838, -390, -3150, 1020, 2526, -2025, 662, -2817, -1338, -855, -3442, -21123, 241, -134, -952, -588, 2572, 2080, 8153, 114, 9732, -6774, -5266, -2462, 2286, -599, -426, 1396, -7051, -1228, 312, -4495, -2525, 4649, -1305, -1106, -2366, 2232, 4065, -18674, -1295, -3259, -1004, -5136, 206, 1177, -5130, 2394, 2518, -1381, 2564, -138, 4341, 16988, 2546, 6782, -3433, 850, -970, -255, 1308, 2228, 1704, -1283, 1452, -2608, 1487, 3106, -2267, -2998, -6814, 1654, 21195, 1555, 968, 154, 124, -1258, 714, -407, 44, 247, 992, 2228, 2824, 1435, -341, 1212, -1612, 6126, 1636, -8368, 578, -5418, 217, -191, 204, -7147, 5110, 3766, 5055, -5979, 6683, 368, -3597, -4595, 7630, -3611, -2384, 1369, 6995, -3299, -53, 2036, -4654, 4259, 9618, -1012, -2964, 4397, -2112, 11885, -1648, -942, -3474, -544, -1410, -1958, -1535, 2981, -1591, -16787, 335, 4609, -1990, 3821, -645, 1842, -64, -3485, 3202, -374, -58, -1410, 7304, -1958, -2142, -11412, -2533, 513, -6149, -6679, 2152, 3153, 5102, 2216, -1361, 2260, 4863, -7031, 1538, -5250, -2511, 96, 3339, -3447, -3708, 7168, -4168, 838, -3134, 3228, -1531, -5598, 14125, 208, -2150, 819, -1085, 12282, 6714, -2778, -9252, -5117, -6623, -1711, -4253, -6306, -1292, -1370, -1027, -908, -2863, -1832, 4645, -722, -284, -161, -5106, 7110, -12494, -1514, -5453, -3308, 3520, 1101, -1096, -2325, -746, -33, 2645, -4458, -797, -684, 1514, 1716, -6204, 6580, -3427, -650, -10493, 4868, 5833, -2385, -274, 1530, 3892, -1940, -1415, -2389, -11499, -2064, 937, -333, 1361, -1583, 5458, -2296, -3263, -8344, -4236, -6357, -2372, -3115, 1336, -2184, 194, -4262, -7838, 6946, 4535, 749, 7332, 67, -327, 273, 3211, -1825, -357, 7039, 3346, 6282, 488, -3940, 10196, 6463, 327, 4407, 909, 889, -4943, -622, -4049, 2532, 1870, 652, 1778, 663, 3063, -1012, -1390, 4162, 20486, -86, 3166, 325, -1912, 511, -634, 1262, -4719, -1490, 6767, -3314, -125, 4490, -10334, 5386, 9932, 781, 290, 2198, 1695, 3790, -1878, 7760, -300, 2021, 5508, 2200, 232, 7138, 1370, -3268, 3496, 13934, -1230, -2303, 958, 141, 3348, -2867, -987 }, .cb0808s1 = { 9313, 734, 6610, -3629, -12020, 5317, -244, -1858, 2, -1812, -6486, 892, 926, -236, 1016, -1249, -469, -238, -1908, -10594, -4704, -907, -7746, 3847, 564, -5956, 3395, 371, -5136, 4001, 1180, 769, -555, -1872, -2943, -1744, 8620, 1485, 9901, -1392, 3425, -7940, 151, 376, 1984, 3031, 3815, -974, 537, -7038, 1964, -5625, 4457, -10214, -1787, -2768, -8514, 176, -3692, 6441, 3148, 602, -2000, 13769, -2792, 1104, -2067, -6219, 1515, -288, 3240, -5490, 11589, 3742, -2343, -1752, 3701, 7525, -1676, 845, 6895, 2884, 3540, 2454, 1010, 2454, -5761, 2035, 3369, -9628, -862, -7060, 1802, 5676, 2396, 2757, 5891, -701, -11896, -4061, 7932, -272, 2562, 83, 560, -5180, -2223, -356, -3343, 2874, -1370, -7612, 1773, 2006, -4258, 5312, 342, 8196, 4939, 519, 3568, 4420, 2768, -11872, -3021, 1893, 1690, -5483, -8129, 7540, -116, -2064, -4473, 1141, 1930, 656, -7728, -2742, -3276, 2782, 2860, -6082, 5198, -4751, -486, -789, -16932, -566, 5116, 1196, 832, 4282, 78, 3088, 2768, 2125, 1027, 1712, 310, 808, -1595, -106, 3174, 4598, -2945, 1551, -7688, 620, -1640, 339, 4538, 3339, 532, -351, 260, 249, -2135, -543, -18362, -648, -3871, 5514, -1782, -11301, -374, -2078, 1610, 50, -4439, -2546, -3058, 839, -9221, 2618, 1790, 103, -1061, -363, 285, -3542, 503, -437, 30, 1382, 75, -2852, -1028, 3095, 4318, -2316, 739, 801, -22765, 2162, 913, 1698, 149, 2049, -313, -803, 3393, -1476, 4396, -4003, 854, -1344, 1062, 10009, 6332, -8522, -2616, -9904, -390, -3146, -2951, 4222, 5538, 495, 3776, -13684, 4687, -2187, -905, 4997, 6209, 4775, -1234, 1956, -4607, 3006, -370, -670, -12448, -5802, 8151, 140, 1485, -6340, 2139, 1231, 22, -212, 2090, -676, 2366, -701, -4113, 365, 2970, -577, 918, 7324, -709, 2035, 5162, 7232, -13287, -3259, -908, -1900, -4255, -2590, 318, 4891, 696, -40, -1647, 1572, -1221, 4896, 5241, 49, -2083, -5068, 7645, 8978, 1628, 2895, -4930, -8068, 2266, 2025, -1868, 3250, 2642, -785, -14571, 9979, 3481, -2246, 1154, 2646, 2616, -2033, -2936, -1300, 2490, 879, -1237, -1228, -724, -1780, 524, -6619, -3339, -2526, 3533, 844, 2946, 2208, -3522, -12411, -3062, 2380, 448, 604, -4708, 2403, 1914, -58, 149, -3704, -2019, 4246, -7020, -3197, -712, -2219, 10036, -2776, -3166, 2648, 2947, 3386, 6445, 1587, -268, -536, 1895, -9005, 10791, -982, 8215, 6414, 5166, 4751, 160, 3050, -865, 6216, -1187, -7077, 1640, 5078, 4354, 1762, -3869, 1174, -149, 1078, 1884, 5149, 15091, -432, -2441, -1102, -1194, 1078, -1535, 8289, -2702, 4007, 694, 72, 685, 2816, 13244, -422, -7094, 432, 2044, -12004, -276, 2174, -908, -4784, 5725, -250, 22, 5116, -2, 2686, 955, -8509, -7697, -3735, 672, -1202, 4299, 4284, 12352, -2362, 5757, 1317, 4293, 508, 3050, -524, 1097, 3346, -537, -2440, -1596, -5659, 4188, -625, 1659, 3061, 2791, 1712, -2991, 966, -16903, 610, -3314, 4160, -3750, 580, -3407, -340, -11829, -520, -1625, 2905, 674, -147, -5284, -4278, -5021, 4635, 6299, 2207, 2595, -7811, -68, 4107, 4314, -1540, -11044, -2214, -803, 232, -7602, -95, 1130, 4991, -361, 1675, 4487, 3607, -6192, -130, 137, -1440, 2826, 178, -13834, -984, 1149, 1230, 1587, 1571, 3286, 5293, -2259, 2021, -6211, -7608, -2710, 2502, 4315, -539, -8530, -746, -654, -4003, -5917, -3728, 4522, -10350, -1266, 210, 5078, -2988, -3866, 3919, 969, -1063, -6300, -4584, -2420, -2094, -884, 2338, -3150, 5461, -1145, -734, 1644, 2183, 19114, -1144, -2313, -404, 1236, 3583, 134, 1802, -4088, -2795, 681, 3738, 1831, 16571, 917, -2290, -3648, -1588, -158, -528, -792, 394, -7432, 2446, 402, -391, -73, -1398, 1286, -6503, 5216, 1094, -939, 1673, -2038, 15842, -1971, 4160, -1664, 7231, 642, 5770, 4209, -1839, 220, -266, 165, 2055, 5222, -3344, -6544, 5412, 1514, 586, 1591, -15256, -2715, 941, 1308, -9170, -2863, 4935, -2913, -1291, 2791, 7967, 14, -1101, 3774, 3580, 848, 1337, 1138, -2839, -3564, -5300, 12429, 14, 1466, -7114, 1198, -3474, -237, -2577, -1305, 445, 1069, -174, 1684, 3902, 229, 5842, -690, 978, -754, 1182, -859, 21078, -185, 710, 797, -2155, 367, -2168, 1002, 3805, -924, 468, -2322, -3590, 1608, 3387, 18, 1536, -858, 642, -7964, 17689, 9843, -4878, -3003, 7373, 5934, 4286, 10484, -63, -4629, 974, -2227, 2602, 3810, 1905, -1668, -2130, 2020, -2360, 2853, 612, 5070, -1248, -868, -497, 3478, -1937, -3006, -645, 3589, 3019, -3293, 16469, -5243, -2918, 1788, -2569, 3717, -9630, -1352, -3870, -416, -4190, -8863, -6888, -498, -814, -783, -4625, 5841, 1562, -2173, 481, 280, 816, 4742, -9962, 11799, -2029, -2460, 4972, -600, -1452, -1901, -2122, 3130, 2686, -819, -2366, 866, -2093, 1052, -58, 284, 3830, -4738, -4114, -1321, 1307, -2820, 4915, -11701, 522, -1982, 7024, 8403, 1762, -46, 532, 5097, 5013, -615, 3086, 2089, 6899, -1107, -4047, -2903, 5356, -4802, -965, 6706, 3895, 9022, 1388, 10971, 5927, -2954, -965, -3473, -5177, -2654, 3418, -5315, -16695, -6587, -416, 404, 1230, -2586, -3292, 1390, 14, -481, -4446, 1335, 109, 1060, 3958, 1275, -5655, 1253, -2411, 207, -12550, 6208, -2447, -3415, 2503, 848, 3094, 9336, 2647, 2455, 2238, 2356, -2132, 5347, 915, 2227, -103, 5832, -2504, 7562, 9568, -6100, 4091, 2668, -1722, 287, 6763, 4058, -387, -2060, 5522, 3184, 4766, -158, 650, -11284, 11841, 6230, -4232, 5308, 3174, 4926, -2970, -4761, -980, 117, 1944, -1974, -5484, 6534, -266, -7222, 924, -2654, -588, 9609, -2337, 1892, -2110, 5088, 1856, 7964, -4029, -940, 1429, 805, -4705, -1362, 892, -189, -8354, 3259, 194, 767, -2877, -4165, -990, 12185, -160, -2002, -1384, -5388, -1604, 226, -6353, -4157, 1773, 2360, -4356, -730, -5462, -4054, -15669, -1528, -394, 4101, -203, 2792, -787, 3391, -299, 6384, -1630, -7186, -12765, 4618, 934, -401, 2790, 2284, -4932, -1260, -6009, -2590, -2285, -1289, 3366, -4192, -4462, 32767, -3135, -1888, 67, -2874, 150, 4760, -1571, 584, -2187, 358, -1733, -1286, -4573, -2003, 1872, 940, -1942, -255, -8856, -1320, -3348, 4854, -509, 2836, -14, 2490, -1537, 882, 1188, -3132, -15209, -1633, -44, -2827, 368, -1099, -1073, -467, 6318, 5863, 2840, -5200, 569, -2984, 6587, 9596, -4924, 457, 4879, -4449, 3528, 1868, -3894, -3905, 15420, -2590, -599, -4975, 3892, -1454, -616, 1890, -2700, -3268, -1386, -1065, -3078, -2454, -1902, 4726, -34, -4218, 1619, -3074, 5540, -6392, -3570, 2687, -8742, 333, -106, 2326, -1737, -3775, 397, -3553, -6632, -6066, 9567, 2904, -889, 1136, 1295, 19390, -268, -3127, -180, 1696, -814, -775, -4914, -456, -758, -866, 1102, -3740, -374, 469, -6902, 1440, -10243, -6221, -4797, -3074, -1142, 297, 5069, -1547, 5474, 716, -454, 3806, 4100, 2901, -2169, -744, 5032, -5586, -2986, 2286, 2414, 7860, -2672, -46, -10046, 5348, -1018, 1016, 9142, 4543, 5587, 2228, -2684, -4594, -2457, -1850, -3651, -1806, 4826, -11686, 1940, -3529, 1078, -5234, -2420, -83, -2322, -5134, -775, 677, -9257, -864, -915, 4494, 411, -4820, 5999, 4472, 5823, -4597, 3121, -1868, -1539, 2338, -4249, 1154, -13422, 791, -1235, -1240, 364, 177, -1508, -2527, -2949, -2062, 118, -3115, 293, -1927, 18644, -1100, 152, -2528, 1914, -1380, -1624, 302, -831, -920, 320, -879, -1252, 813, -11, 6960, -522, 3092, -119, 1486, 3068, 6690, -3079, 13305, 6342, 937, 1632, -1026, 1896, -2335, -3961, 5510, 2782, 187, -2448, -1251, 756, -15856, 3179, -1155, 808, -1748, -6593, 1494, -3122, -98, -3808, 491, 1752, 3188, 2158, -1924, 763, 1165, 148, -3161, -1284, 18082, -195, -1125, 845 }, .cb0808m0 = { -18656, -461, 236, -1122, -796, -101, 851, -3748, 1374, -8549, -3366, -1482, 1026, 2046, 4394, -521, 232, -486, -1656, 32767, 1954, -1183, -130, 392, 194, -868, 2883, -168, -1674, -910, -34, 819, -1105, 1628, -4871, -585, -1170, -572, 451, 3911, 10770, -35, -4126, 7124, 7110, -860, -3914, -3294, 272, -647, 220, 11965, -3378, 2726, 1990, 1624, -3689, 9884, 2394, 3096, -518, 5169, -4018, 3108, 168, 1256, -410, -3851, -11176, -10479, 2042, 1421, 1488, -992, -1562, -653, -1191, 2246, 467, 4732, 154, 729, 7244, -18, 1313, -51, -1824, 1218, 1473, -6763, -11270, -4295, 4118, 1043, -5782, 1370, 46, -11027, 4086, -1501, -11, -621, 464, 781, 13680, 257, 554, 3119, 750, -1857, 1046, -1252, -512, 739, 14811, 12642, 3841, 2824, 163, 1620, 39, 4766, 1411, -2197, 525, 658, 419, 5, 92, 1544, 290, -2038, 10603, -5764, -3335, -6629, -2579, 4020, -3107, 2779, 849, 5678, 260, 2804, 99, 1339, 544, 1438, -450, -598, 764, 1568, -1034, -4560, 2604, -18205, 1644, 1003, -675, 3217, -334, -832, -1452, 322, 608, 300, -4776, -812, -36, 627, 1654, -248, -838, 21571, -89, -1626, 530, -1151, 9440, 522, -6138, 2213, -10095, -562, 1000, 5037, -122, -3, 7064, 397, -2118, 362, 15791, -1047, -15010, -1527, -1356, -2805, -560, -3148, 266, -45, 1324, -3312, -1772, 2382, 189, 6537, 124, -1272, 156, 588, -2678, -3106, 2828, -3684, 689, 3884, 4650, 192, -323, -5426, -722, 11486, -607, 3591, 4299, 2117, 362, -9114, 11700, -3391, 2357, 7639, 2197, 4350, 2970, -2525, 169, -6112, 91, 1520, -19, 1558, -4588, -837, -8163, 897, -7992, 2080, -3102, 774, -10592, -314, -137, -524, 87, -799, -111, 74, 1312, 862, 266, 243, 199, -288, 1205, -829, 1650, 2880, -24776, 3867, -13101, 597, -9778, -2084, -3089, -1112, 548, -638, 3727, -446, 4877, 2099, 68, -2736, -4914, -7103, 263, -9228, -782, -2109, 1088, -1881, -1424, -30, -1353, 586, 4085, -3573, -11921, 2366, 516, -1028, 834, -234, 2150, -15893, 2305, -3619, -2567, -8366, 610, 2946, -2383, 2293, 946, -3550, -6770, -1481, -758, -864, -232, 2855, 40, -2330, 2069, -345, 1801, -589, -1241, 647, 6988, -2625, 14308, 2801, 759, -2740, -680, 964, 365, -506, 22268, 1766, -202, -2751, -293, 3754, 1280, -521, -3355, 4615, 594, -1783, -39, -46, 48, -2638, -551, 2548, -1880, 3730, -1726, 939, -345, -7, -1630, -23405, -1002, 5655, 2100, 440, 1682, 1020, -594, 344, 1511, -1286, 5518, 473, -11398, -4552, 720, 4701, 7726, 126, -1953, -484, -1648, -1766, 1589, 996, -688, -381, 1678, 1498, -528, -860, -667, -823, 32767, -463, -243, -1242, 1074, 2460, -1411, -459, -1533, 1462, -2603, -784, -391, 338, 3444, 2170, -924, 949, 1972, 1520, -3062, -671, 12908, 2636, 2805, 722, -12016, -26, 616, 1192, 1193, -1028, -128, -22850, 191, 408, -3105, -592, -440, 1264, -2580, 847, 850, 2300, -278, 126, 2214, -2693, -21, -194, -594, -533, 45, 570, 38, 636, 1276, 171, 29846, 648, 911, -358, 300, 602, 413, -10167, -54, -1353, 42, -1770, 491, -12154, -1808, 26, 425, 2009, 910, -8134, 362, 2001, -114, -2586, -1049, -249, -312, 160, 1677, 27043, -44, 160, 834, 243, -606, -272, -979, -1605, 105, -491, 754, -230, 2442, 24, -5139, -395, -3562, 14436, -1208, -3232, 2555, -12980, -906, 429, 217, -432, -1263, -244, -225, 912, -64, 780, 1101, 854, -240, 308, -28630, 518, 32, 976, -8642, -3041, 1801, -742, -1513, 128, -3189, 857, -14277, -1802, 1229, -68, -565, 65, 4094, 1614, -8254, -1153, -640, 16225, 3508, -1383, -3882, -347, 1346, 3845, 2665, 2340, -1862, -5318, 1402, -1352, -21682, -694, -1182, 286, -806, 2133, 1848, -532, -3750, 7564, 1054, 284, -3742, 2559, 2748, 3408, -1544, -342, -22578, 1225, 958, 2559, 267, 378, -3608, -1404, -1669, -13, 1135, 153, -625, 1436, 211, 556, 739, 1094, 10452, 850, 5128, 11469, 121, 4937, -3643, 1371, -373, -6686, 229, -3256, -75, 1304, -1023, -452, 288, 12709, 13572, -501, 1840, -1044, -2014, -4077, -2726, -1010, -3826, -629, -466, -923, -847, 5784, 898, -12036, 1253, -1741, 1546, -3710, 2782, -3430, -1810, 263, -8254, 3126, 55, -376, 202, 968, -1686, 944, -15300, -2664, 1393, 783, -11080, 1714, -1666, -1064, -4859, -2344, 334, 1313, -1209, 877, -1828, -2130, -3057, 340, 8030, -3222, 11622, -5620, 1469, 3340, 2862, -3945, -868, 351, -1314, 2277, -2346, 12384, 996, -2460, 1810, 703, -2158, 3168, -9887, 8754, 3503, -1414, 445, 850, -30, 2389, -617, 3271, -1606, -5633, 2993, 10009, 5704, -11589, 4278, 1304, -2418, 479, -16596, -12349, 2915, 327, 895, 1278, 1412, -310, -653, -1287, 880, -4294, 38, 2179, -2074, -1810, 198, -1544, -8008, 2456, -2821, -3223, -3713, 11763, -2081, -141, 4833, 1652, 3598, 551, -1655, -1154, -60, -302, 739, -1494, 2595, -1006, 2665, 10834, -11270, -2996, -636, -446, 1816, -1539, 4149, -184, -100, -55, 265, 2207, 639, -162, -2210, -626, 605, -21149, 2163, -970, -330, -4655, 3396, -3092, -544, -650, -304, 93, -1484, -888, -8982, 1871, 1701, -1423, 1671, -11, -1287, -14292, 592, 1040, -622, 13202, -660, -12745, -2836, -1832, 3481, 1546, 235, -646, 2132, -602, 2391, 1534, 3599, -4932, -296, -1855, -2075, -2646, -219, -10248, 1161, 5955, 6954, 9109, 3498, -5932, -1787, 373, 1234, 1244, -813, -76, 9083, -5120, -499, -1774, -2150, 10601, -170, 1160, 982, -597, 95, 151, -534, 6554, 840, -958, -720, 2066, -50, -2877, -74, -2068, -24760, -725, -357, 1273, 1941, 2525, 46, -819, -230, 1030, 2291, -287, 1092, -2315, 427, -19, 448, 1698, 9797, 10962, 3034, 2622, -2652, -1128, -194, -180, -1176, -1794, -22248, 244, -3, -1856, -1054, -2751, -459, -62, -433, -2274, -1790, -192, -720, -421, 55, -721, 1960, 1094, 2500, -2353, -480, -784, -1221, -505, 1738, -9960, -10772, -13657, 122, 387, -667, -454, 988, 30780, -757, -2319, 878, 962, 753, 1306, 716, -771, 539, -705, 508, 1915, 2114, 937, 447, 935, -1432, -1143, 4435, 11759, -2442, -53, -10601, 1979, 5419, -2296, -172, -5987, -1168, -2012, 2257, -1451, 97, -1253, 5548, 884, -14448, 3134, 2549, 172, 5404, 869, -83, 597, -12416, 762, -1035, -805, -1369, -804, 664, 9644, -4329, 1130, -1526, -2900, 628, 620, -6436, -2370, 2107, -11836, 37, 864, 2105, 314, 216, -529, 810, 3141, 3716, 7019, -2653, 1466, -14940, 13128, 1218, 2287, -145, -443, -923, 476, 2411, 5428, -611, 2212, 1450, -3042, -4750, 3562, 587, -15378, -15151, 600, 1029, -2353, -934, 1986, 1444, -2171, 1020, -700, -1508, 195, -2466, -798, 16460, -2164, 520, 2711, -13832, -2024, -871, -5268, 3556, 117, -416, -8, 2128, -1570, 2052, -3169 }, .cb0808m1 = { 16492, -295, 2556, 1303, -440, 7584, 3305, -3422, -1196, -1809, 2142, -1292, 1048, 314, 1945, 578, 1080, -255, 1109, 617, 1597, 198, -29081, -243, 54, -33, 76, -418, 1332, 475, 1495, 1554, -782, 308, -1286, 1044, 300, 1544, 646, 9441, 2577, -11140, 1421, 1107, -483, -590, 625, 8544, 446, -1814, 1714, 685, 9620, -4981, -3100, -724, 8439, -2333, 506, 3557, -1160, -2199, -659, 4107, 8620, -1406, -3745, 1729, 10756, 868, -82, 2584, -3140, 3632, 2617, 3880, -1175, -163, 1864, -980, 551, 201, -433, -1464, 708, 1926, -8471, 3870, -2376, 15567, 2112, 753, -2450, 72, 1131, 2932, -139, 6392, 1547, 3, 625, -823, -1750, 811, -977, -1389, 1300, 1184, 399, 4684, 196, 3679, -1672, -218, -11023, 98, 492, 4072, 1213, -2004, 3602, -1787, 1288, -9442, 4157, -4267, 3509, 5317, -574, -11094, 1078, 6240, 1593, -12773, 408, 3960, 1116, 1517, -816, -577, -696, 554, 1645, -936, 83, -20255, -754, 1460, 1110, 1412, -757, 377, 2373, -1608, -1414, -1028, -3152, 1534, -4145, 2274, -286, -7058, 2286, 4013, 2515, 2681, -5602, 0, -1740, 257, 756, 11496, 954, 4513, 3968, 4851, 278, -511, 829, 2853, -9743, -3723, -1550, -444, 4256, -679, -11411, -4290, -1470, -4191, -952, -239, -198, 1361, 9527, 1481, -981, 1403, 991, -255, 9326, 1832, -1936, -135, 1123, 2756, 1932, 2543, 795, 12612, 2429, -498, -13185, 3812, -1628, 196, 1822, 4333, 2760, -676, -2902, 1244, -1974, -7046, -12216, 1503, -2176, 1916, 365, 636, -11348, -5030, -3319, -3794, -1016, 1157, -4158, 3424, 344, 4494, 812, -3074, 4356, 293, -3463, 1232, 1746, 2696, -8269, -961, -4316, 130, -4278, -14007, 3025, -2703, 179, -10176, 1511, -1460, -1100, -1171, -1575, -2596, -2026, -11400, 2689, 1480, 743, -1669, 2728, 742, -60, 11452, 84, -662, 1424, -15103, -410, 2141, -1664, -1378, -122, 97, -358, -820, 382, -3865, 374, 1698, -21, -752, 595, -8771, -731, 9368, 1698, -2586, -6790, -2507, -1776, 4993, -3867, -2807, -190, 14465, -13938, 3095, -1198, 374, 1682, 1888, 286, -576, -2094, 454, -690, 1396, -1139, -422, 405, 238, 1718, 2048, 13448, -151, -247, 202, -900, -5630, 3121, -10988, -1615, 1955, -3901, 3360, 1429, 3928, 1951, -1099, -435, 1572, 1500, 19176, 731, -439, 3686, -3039, 244, -4270, -34, 1289, 296, -406, 2216, -1400, -1946, 264, 1536, 2992, 54, 892, -181, -1545, 278, 24923, 989, 1301, -1279, -188, -198, -661, 612, -1520, 2355, -12972, -694, -560, 1364, -2988, -6236, 2555, -6630, 1423, 440, -598, -1092, 304, -2529, -1698, -909, 2560, 844, 768, -2988, -661, 18432, 1158, -639, 5070, 11015, -14, 2313, 756, -1941, -10986, -490, -5235, 2646, 2406, 170, -546, 337, 6499, -4450, 5598, 299, -504, 14322, -972, 9356, -2056, 8812, -1599, -1931, 2084, 119, -983, -305, 1437, 403, 2651, -159, 229, 209, 1438, -1789, -1159, 1017, 416, 408, 454, 858, -652, -1554, 1198, 18278, 122, 433, -165, 162, -10532, 11563, 4754, -2022, 4246, -1396, -2417, -1796, -1496, -1279, 3877, -1217, -770, 983, -609, 1766, -184, -5664, 546, 7948, 1978, -250, 4350, 3498, 2797, 802, 846, -12628, -1092, -240, 781, -11252, -955, 9944, -222, 1177, 1262, -534, 1790, -7396, 1452, 4251, 303, -3714, -2295, -290, -227, 672, 22690, -622, -466, 1599, -496, 326, 871, -1948, 148, 449, 214, -2175, 713, 394, 1921, -28716, -786, 1083, -641, 1232, -246, 1572, 1575, -879, -2962, -57, 369, 1633, -1457, 1194, -1222, 304, -955, 104, -1249, -935, 135, -758, 3483, -1190, 1457, 1130, -1284, -3709, 18042, 6, 25, 1233, -328, 347, -512, 2071, 328, -18037, 4582, 3841, -434, -745, 332, -576, 3006, 336, -11505, -646, 3509, -996, 1270, 2041, 1353, 1193, 2976, 11569, -3165, 1450, 4351, 2522, -10022, -6, 12602, 874, 518, 475, 1251, -3290, -2674, 4802, -11794, -946, -426, -2846, 1619, 1105, -1022, -1, 1759, 646, 10347, -2937, 13505, 1104, 614, 1149, -800, 2377, -115, 792, -948, -2431, -1779, -1142, 809, -3130, 447, -15516, 313, 11235, -1346, -2426, -2737, -1738, 2236, 1094, 802, 1323, 3612, -213, 1383, 2800, 10394, 1210, -2360, -10203, -1991, -102, -2669, 2303, -2184, 1830, -1158, -5633, -4083, -252, 311, 612, -331, -2786, -12421, 9994, -6006, -4996, -954, 1014, -1147, 860, 1252, 1114, -2069, 266, -230, -591, -4442, 230, 20603, 1386, 1130, -1468, -3600, 2168, 836, -1754, -511, -542, 216, 26, 3476, 1165, -4293, 3098, -245, -579, 1830, 2248, 5326, -18357, 397, 5466, 734, 3920, -3678, 319, -1062, -610, -7509, -1064, 1456, -5729, 1088, 9099, 2266, 241, 201, -10017, -1545, -2799, 1491, 27098, -60, -1736, 1387, 859, -1474, -79, -1122, -971, -1302, 906, -1133, -2659, -296, 1344, -2698, -448, -1476, -212, -1585, 1310, 14353, -2165, -2229, -656, 5219, -3266, -1850, 7942, 4997, -2295, 519, 608, -9498, -1700, 1770, -15308, 1286, -2914, 2252, -717, 2136, 2478, -3747, 2362, -5, -237, 2334, 701, -774, -672, -20, -599, 623, -700, -713, -979, -29926, -1090, 848, -141, 1273, -711, 1782, -221, -103, 170, -185, -1059, 3066, 1321, 1182, 3641, -217, 1959, 11806, 2390, -10312, -2575, 1612, 596, -352, 2197, -2041, 2385, -898, -9363, -1144, -10896, 20, -7842, -1047, 3687, 2147, 2584, -249, -72, 32767, 1936, 446, -889, -845, -896, 1269, 448, 327, -3411, 4, 702, -1900, -646, 799, -770, 662, -911, -856, 287, 1667, -108, -64, 16, 1578, -2059, -27327, 112, -188, 2504, -692, 250, 360, 564, 868, 4147, 1340, 18080, -3584, 445, 364, -2623, -412, -2918, -116, 2611, -2396, -44, 10934, -1512, -1166, 239, 913, 190, -14681, -767, 2610, 2931, -2389, 3590, -1680, 6287, -531, -616, 1317, -1034, -900, 871, -329, 467, 1200, -1914, 1108, 3150, -6878, 544, -14411, 2807, 5427, 13361, 1448, -1753, 524, -5851, 1467, -1866, 6888, -8742, 1372, -1515, 4883, -2248, -1042, 4628, 10768, 149, -358, -1287, -1289, 32767, -1137, 941, -2112, 451, -1436, 174, 294, 475, -3667, 1610, 1641, -599, 626, 2058, 671, 1626, -985, -123, 2040, 421, 1797, 18448, 2538, -359, -5042, 3096, -1136, -320, 1823, 30, -12002, -1297, -850, -418, -1497, -1761, 5073, 10944, 212, -4713, -1614, -1752, -2135, 483, 1043, -1989, -293, 39, -1049, 67, -7482, 712, -5358, 896, 12460, -1744, -1793, 1538, 3577, -6, 418, -72, 1072, 1367, 1080, 3564, 1468, 482, -1298, -6442, -299, -12934, -757, -4199, 3842, -11331, -1216, -206, 1598, -1135, -3240, 3294, -286, -540, 777, 1188, -1189, 4516, 2638, 2071, 9702, -900, 1002, 18707, -705, -1856, 1185, -4832, -1694, -3502, -2324, -2826, -4600, 1996, -3110, 110, 117, 405, -16854, -510, -14725, 1699, 1922, -2117, -2718, 45, 1064, 507, -1781, 2106, -2310, 1239, 5860 }, .cb1108l0 = { 2354, 8016, 12528, -947, -348, 1760, 2054, -3960, -2125, -3578, 3932, 1647, -3316, 6053, 392, -3128, 3209, -2445, 463, -2835, -1555, 1259, 296, -1465, 1839, -4811, 420, -215, 469, -1013, -272, 185, -27061, -1154, 8, 298, 259, -953, -555, 472, 617, -1127, -673, 982, -398, -1681, 328, 882, 614, 800, 431, 84, 880, -240, 15758, -14324, 1301, 1578, 932, -694, -1456, 2435, -1651, 1464, 227, 1527, 527, -128, 698, 2405, -726, 1489, 1016, 1938, -1897, -1478, -238, 932, 2507, -519, -1147, 557, 2334, 700, -12914, 14861, 158, 255, 1195, -883, 3359, -1045, 2095, 520, 249, 926, 789, 1392, -185, -1654, 902, 9, -2166, -1916, 543, -2126, 2842, -332, 1356, -344, 436, -404, -174, -489, 858, 258, 229, -45, 327, -316, -1176, -454, 115, -220, -458, -194, 271, -530, 1572, -574, -25068, 167, 601, -1027, -1705, -3144, -4231, -1636, -1012, -1002, -519, -825, -458, 945, 546, 193, -17909, -156, -1067, 826, 338, 1152, 562, -506, 848, 239, 188, 656, 97, -174, -59, 242, 1946, -67, 745, 2043, 424, -192, 574, -524, 1553, 566, 1480, -747, 487, -20623, 872, -1089, 1034, 1357, 919, 153, 154, 498, 54, 555, -989, 707, -85, -21, 700, -1424, 90, 655, -399, 123, -709, 117, 438, 330, -720, 190, 812, -138, 460, -32768, -162, -410, -327, -122, -1208, -554, -502, -178, -309, 373, 4295, -945, -5502, -2752, -6615, -1241, 1278, -1315, -7683, -986, -419, -50, 2384, -4640, -6246, -11804, -308, -446, 3486, -4824, 1736, -590, 960, 195, 593, 164, 3355, 1655, 1233, 66, -787, -347, -13751, 74, -1209, -812, -4098, -102, 910, -1659, -2036, -3147, -2075, -2605, -1240, 4499, 1727, -9484, 549, 728, 3411, 1958, -4439, -1064, 5690, -1600, -1984, 1695, -588, 4815, -138, -3380, -512, 1553, 1978, 4310, -730, -402, 828, 2124, 14216, -222, 2757, -8686, 523, 2516, 1017, 790, -136, -470, -252, -717, 808, -1113, 13766, -114, -1182, 3053, -5238, -2231, 1720, -511, -987, 1592, -1257, -2578, 1777, 1075, 2367, -227, 2330, -672, -2620, -1449, 2122, 362, 1249, 1338, -327, -21631, -1540, 24, -2356, -656, 1981, -92, -207, -2188, 34, -457, -1291, 1231, -460, -128, -396, 593, -671, -1513, 136, -335, 560, -1121, 490, 1008, 948, 8629, -3344, 467, 881, -5731, 1120, -300, -1432, 1227, 1558, 990, -1078, 214, -922, -81, 1120, -15586, -1176, 1203, -1911, 151, 1484, 1555, -421, -420, -428, -762, 292, -59, 1075, -649, -841, 494, 194, 27, -768, -356, 54, 163, -73, 293, -1717, -392, 750, 234, 751, -111, 26858, 911, -389, 451, 442, 260, -117, 270, 19, -2429, 618, -962, 378, 10, -1954, -1336, 525, -258, -693, -4155, 10265, 2924, -1361, 3197, 10199, 6870, -2608, -5792, 619, -1994, -2035, -701, 2598, 465, -575, 311, 175, 162, -1191, 162, -157, -147, 325, 551, 536, -188, -290, -165, 343, 14, -268, -27113, 278, 127, -233, -68, 606, 125, 665, 438, -442, 2510, 800, -1991, -641, -386, -1574, 78, 946, 189, 106, -2249, -268, -1708, -1192, 986, 3076, 1807, 21, -5884, -964, 256, -15916, -1320, -2867, -3562, 491, 3502, -337, -1542, 496, -3182, 1676, -2371, -4264, -2053, 14342, -5674, 1744, 1813, -3731, -3761, 1350, 1783, -438, -920, 2366, 1438, -687, 512, -1934, 323, -3158, 1775, 1964, -6742, 10162, 7763, 1469, 1967, 851, 2742, 7413, -3338, 742, 1854, 310, -192, -936, -1770, -775, -976, -1532, -1436, -670, -4032, 1194, -1336, -4369, 332, 604, 962, -27563, -972, 842, -743, 275, 713, -251, -799, -1190, 372, -213, -423, 202, 1189, -31, 1084, -974, 756, -148, -1669, 640, -549, -339, -1506, -112, -598, -870, 410, -13307, 13141, -1911, 2308, -92, -776, 221, 1503, 1578, 803, -308, -1672, -404, -83, -3517, -1327, -606, -2426, -61, -513, 318, -1805, 2049, 1887, -777, 1268, -542, -116, 3550, -18840, -2986, -979, 2653, -2875, -922, -10520, 804, 107, 3234, -1270, -608, 1042, 3599, 965, -342, -2096, -267, 1704, -3939, 791, 2180, -985, 816, -716, -2661, 99, 1523, 11902, -1782, 775, -12517, 3244, -3762, 2046, -278, 1539, 2895, -2425, -10, 990, 1484, -1377, -3399, -984, 3171, 1513, 696, -785, 155, -1072, 414, 2016, -1932, -3124, -1126, 68, 3855, 1360, 4074, 17596, 1714, -596, 4000, 1656, 230, -258, 2266, 843, -1720, 4624, -714, 854, 696, 636, -1357, 350, -1256, -523, 168, -9933, -766, 198, 2680, 8060, 2168, -2789, -14255, 1444, -520, 169, 1032, 1478, 294, -644, -320, 856, 1282, -216, -1000, -925, 2, -890, 679, -629, 1152, -1329, -13941, -16385, -1050, -1022, 106, -1151, -41, -709, 1771, -882, -729, -1420, 1544, -120, 386, -838, -2744, 1559, 904, 273, -4221, -1065, -312, -1046, 234, 830, 387, 172, -956, -332, 360, 408, 125, 90, 348, 915, -264, 911, 263, 124, -620, -612, 220, 164, 202, 124, -30252, -159, 1006, -320, 283, -1641, -1312, -9057, 5525, 7520, -2884, -12194, 2771, -1164, 1842, 1261, -582, 766, 2498, 393, 953, -617, -756, -323, -1862, 1195, -1326, -436, -965, 366, -6727, -1226, 9014, 400, -1258, -812, -279, -404, 1621, 86, 1622, -16, 96, -515, -257, -39, -134, 1843, -294, -491, -908, -120, -720, -1162, -1555, 405, -134, 528, 23596, -77, 183, -444, 2077, 955, 649, 2246, 3236, 735, -1202, 7954, 9440, 6134, -7267, 28, -3398, 500, 4965, -1230, 306, 357, 2942, -906, -4733, 903, -3945, 4447, 1046, -1125, 465, 1183, -12710, -1018, -11302, 5177, -219, -6232, 1552, 2061, -1372, -1290, -822, 295, 814, -3003, -527, -614, -856, 802, 167, 1178, -494, -1625, 754, -1550, 682, -1286, -480, -694, 86, -67, -1429, -1235, -559, -311, 322, -308, -56, 296, -158, -24, -748, -197, 26954, 1054, 209, -226, 165, 681, -131, 341, 341, 1510, 615, 907, -264, 1355, 388, 198, -5, 418, -783, 28539, 82, -559, -459, -344, 279, -114, 966, -529, -423, 286, -418, -766, 42, 186, 461, 418, -688, 2937, 2793, 146, 1709, -665, 2022, 293, -1522, -2740, -15926, -600, -1503, -1732, -2827, -1027, 1702, 252, -643, 470, -815, 858, -1954, 1190, 1847, -16, 266, 29, 486, 25985, 139, 220, 433, -330, 168, -362, -562, 180, 906, 386, -845, 664, 1064, -616, -1498, -335, -164, -930, -854, -869, -101, -204, 835, 117, -16034, -4478, 2634, 1629, -1873, -1156, -373, -526, 2537, 967, -2433, -857, 1264, -1670, 113, 845, 7654, -1343, 5245, -1605, 2236, -1190, -48, 3340, -1981, -1606, -1369, -227, -727, -570, 1136, 1868, 667, 92, -144, 531, 949, -1086, 530, 1764, 302, 190, -28036, 182, 825, 229, -656, 585, 444, 200, -1195, -1855, -387, -781, 1156, 692, -1164, -517, -464, -275, -328, 218, -970, 174, -384, -561, -38, -720, -140, 1021, -271, -57, 463, -25313, -342, -40, 26, 159, -854, 916, -1532, -1033, 265, 105, -719, -588, 96, -435, -296, -226, 224, 357, 30, 576, -66, -30037, -72, 374, 32, 256, 304, -852, -706, 248, -741, -379, 980, 629, 1344, 3858, -2211, -153, -3914, -3775, 1570, 718, -1042, -1338, -4409, 1338, 5118, 5186, 3619, 2142, 9081, -2784, 4169, 3598, 6621, 4562, -170, -614, 1196, -1174, 5024, 721, -71, 267, 4, 25598, -369, 356, 331, 1099, 377, -356, -938, 1161, -863, 1107, -132, 222, 148, 1410, 908, 60, 377, 1280, 468, 690, 454, 247, -4552, 6122, -267, 2973, -5932, -6424, -4983, -4193, -3386, 1691, 1349, 1419, -3730, 300, 12150, -2927, 1588, -34, -2435, -271, -961, -1744, 1881, -73, -453, -788, -798, 9166, 2744, 858, 342, 991, -287, 822, -37, 1156, -1493, 723, -14127, -1755, 2029, -933, -1276, 632, -5249, 464, -272, 1149, -290, 4693, -728, -1475, 841, 10, -283, 92, -268, -295, 358, 160, 405, 2, -381, 679, 716, -190, 128, 275, 255, 123, -412, -453, -273, 26, -174, -340, 644, -376, 27584, -25, 66, 3107, -1707, 911, 500, -1029, 1029, -1557, 9020, -398, -2512, -582, 1131, -16696, -429, -1284, -3, 2320, -532, -302, -174, -146, -413, 2152, 1009, 42, 402, -1471, 157, 5742, -782, -229, 2379, 646, 2842, -1776, -463, -2749, -3617, -1710, -12281, -566, 263, -3174, -2337, 9590, -1150, 2465, 4577, 2064, -648, -2175, -1877, -674, -634, -338, 343, -1492, 878, -530, 1072, 13670, -4542, 746, 9704, -4188, -7076, 1179, -740, -589, -876, 268, -1080, -986, -4584, 2692, 3032, -2067, 230, -3533, 944, -4950, -1908, 1452, -255, -698, 1460, -606, -250, -154, -22303, -945, 1626, -588, -482, -1549, -129, 978, -631, -722, 1094, 1771, -311, 532, -508, 696, -1128, 1270, 854, -84, 4290, 414, 3351, 1061, 931, -2936, -9606, -35, 2514, -1095, 567, -452, -8520, 4037, -431, 2744, -2276, 2647, -1188, -454, -400, -3698, -315, 11558, -667, 512, 162, -395, -13015, -11, -1944, -890, -14358, -3850, -4296, 1310, -580, -248, 1305, 402, -1049, 115, 2085, -1797, -1172, -321, -919, -313, -512, -131, 1619, 576, 499, -2024, 130, 14, -76, -6324, 495, 2445, -16757, -2348, -2706, -1906, 2377, -2252, -619, -2579, 643, -661, -1276, 935, 893, 992, -2204, -2451, -395, -508, 1163, -216, -13034, -718, -1018, -1675, -698, 710, -257, 2658, 1178, 2046, -2270, -2588, -14442, -1142, -1026, 2247, -536, 314, 123, -1175, -673, 1576, -2600, 5, 964, 619, -1714, -14811, -1502, -1646, -1151, -93, -11652, -222, 343, -2203, -351, -928, -289, 2679, 2101, 742, 747, -2245, 146, -1828, -2728, 1058, 1048, 3046, 242, 4432, 246, 350, -13419, 768, -805, 1819, 14332, 1316, -370, 391, -1421, -1426, -355, -812, -64, 196, -2917, 1097, -1550, -1246, 436, -62, -813, 1350, 555, -2236, -1589, 980, -1483, 10122, -2434, 7236, -15225, 1513, 2090, -1224, -83, -2821, 664, 658, -3242, -1031, 1509, 2667, -160, -1315, 1060, 891, 432, -1311, -2503, 1304, 1295, 1745, -722, -2496, -4409, -3360, -2776, -2793, -4921, 12616, -1031, -443, 1495, -2416, -4640, 4508, -2944, 2608, 1323, -394, -415, -2111, -2065, 1030, -3636, -1338, 2916, -3007, -3680, -3152, -115, 577, 2742, 785, -4429, -1945, -304, -4883, -133, -3136, -1927, -576, 618, 1780, 2568, -2102, -158, -3986, -1187, 280, 655, 162, -1352, -5730, 15372, -1314, 1553, 274, -2873, 4221, 4610, -4143, -13699, 2760, -1255, -238, 1487, 1583, 1422, -2272, 4734, -6368, 795, -406, 1498, 1588, -500, -2744, -875, 2080, 1901, 960, 344, 979, -258, 952, 2526, -11785, 893, 669, 1361, 518, -1368, 3854, 2539, 623, -1835, -4177, 2686, -2956, -2804, 1121, -8890, 1377, 1125, -3990, 140, 3594, 1757, 2271, 366, 1723, 2150, 13557, -1768, -1433, -6632, -578, 3266, 2509, 7142, 680, 1532, 1318, -1123, 5668, 1283, -412, -5404, 2893, -2647, -2695, -1412, 340, -650, 863, 1895, 2867, 384, 626, 856, 508, 1365, -295, 960, -26080, 234, -4, 239, -412, -6, -765, 736, -30, 136, 912, 538, -792, 413, 871, -437, 305, 30, -194, 1105, -1113, 3550, -4854, 449, -549, -7626, 3706, -3698, 1778, 1441, 2240, 73, 513, -3383, -2346, -1372, 3955, 2973, 1175, -6087, 5071, -2135, 8552, 4961, -1201, -1458, -2627, -730, 515, -756, 476, -1104, 2115, -1276, 498, 336, -451, 809, -1030, 556, -211, -70, -93, 89, -755, 296, 872, -282, 380, -298, 2774, 660, 1339, -545, 429 }, .cb1108l1 = { -13570, -9232, -673, 267, -819, 1633, -33, 623, -850, -4376, -1135, 999, -262, 1928, 695, -1751, -2793, 772, 5064, -1158, 280, -2144, 1313, 888, -2482, 469, 2996, -1406, 12525, -1200, -1202, 939, -3, 847, 818, -924, 135, -1308, -12000, -544, -592, -3914, 441, 3372, 3188, 1314, -1836, -706, -844, -1319, 1029, -1754, 172, 2468, -903, -889, -14602, -2054, 11694, -1980, -730, -1661, 214, 1243, -337, -646, -95, 1432, -854, -236, 88, -2, 514, -1643, -84, 3561, 302, 770, -1248, 480, 664, 738, 1728, -1783, -2227, -702, -3582, -16641, 1713, 1506, 660, -2471, 2061, -48, -3161, 1697, 900, -1477, 558, 287, -5515, 1023, -1972, 999, -1856, -3022, -228, 711, 1270, 2644, -648, 1064, 3899, -1205, -754, 1080, 1262, 18, 860, 2274, 655, 494, -221, -15647, 1334, -473, -1648, -341, 3541, 3109, -1671, 639, -2491, 185, 477, -388, 5198, -5680, 812, 700, 2180, -536, -19468, -2508, 2592, 2901, 32, -1165, 1500, -422, -790, -1914, 971, 111, 1226, -1302, 541, -3862, -832, 642, 305, -3870, 8921, 570, 180, 1734, -1572, -891, 17672, -756, 702, 2740, -647, 2122, 102, -1371, 461, 454, 204, -307, -1248, -2330, 1353, -1783, -1939, -601, 512, 2118, -2178, 254, 1190, -1252, 923, 1166, 360, 320, 320, 1210, -142, -416, 1260, -205, 1403, -1025, 19252, 328, 58, 21, -1044, 1786, 2153, 697, -436, -1617, -869, -493, -2419, -3102, 1995, 1519, -1799, -153, 2689, -665, -1371, -915, 18486, 941, -2612, -1057, 1076, -3351, -48, -1478, 575, 728, 130, -168, 40, 898, 2141, 1518, -965, -1910, 896, 838, 1220, 416, -1494, 1404, -126, 21472, 604, 1740, 102, -812, -796, -734, 1082, -507, -468, -1732, 1171, 252, 359, 436, -765, 791, 726, -810, 1838, -1798, 5662, -2362, 1275, -2829, -4041, 1398, 2681, 480, 13740, -752, 2252, 1306, -1026, 1834, 54, 9993, 559, 1370, 711, 1918, -1757, 646, 16, -3262, 2676, 1751, -2595, 4782, -1050, 2401, -15131, 1100, 386, 708, -359, 455, -25, -950, 241, -482, 268, 2327, -2766, -142, -1992, -566, -36, 990, -6302, 3245, -1394, -1579, 760, -757, -2115, -8542, -2945, -800, -4027, -3102, -1319, -1989, -1787, -426, 590, 1031, 467, 31, 2674, 1686, -14352, 1174, -1446, -813, -1267, 2919, 2052, -1574, -753, 3369, -1090, 3830, 2042, 11376, 1140, 895, 1130, -720, -1284, -2277, 49, -724, 397, 13201, -985, 1599, -365, 1517, -496, 978, 2152, 1391, 1777, 3032, -936, 280, 1719, -4551, 4874, -941, -160, 956, -676, -229, -548, 183, -16606, -855, -3433, 1248, -578, 2254, -532, 3081, -1406, -1859, -605, 1809, -1001, -114, -1222, 3890, -609, 3114, -2430, -2142, 440, 1780, 1606, -4211, 1047, -456, 8280, 9, 5866, -1718, -932, -13049, -562, 3097, -583, -21, -1972, 1254, -172, 527, 2282, 5064, -5391, 1074, 357, 1845, 24, -996, 100, -50, 1098, 2905, -417, -937, -439, 247, 18502, -2380, -2088, -402, -580, 83, -282, -70, 969, 540, -219, -1132, -1701, -195, -3030, -2748, -1974, -1304, -1909, 1080, 1042, 1124, -128, 5816, 2303, 2840, -2420, 35, 16550, 721, -2079, -1489, 1023, -654, 2025, 1479, -185, -2449, 500, 3034, 2663, 3911, 1203, 998, 594, -533, -163, -262, 739, 13, -426, 182, 394, 350, -30055, -371, 150, -430, 147, -1122, 43, -390, 298, 831, -194, 158, -114, -257, -1346, -585, 206, -456, 478, -502, -1710, -1719, -581, -536, 45, -861, 825, 1093, -255, -685, 38, -20, 419, -594, 10, -1408, -526, -19191, 196, -1496, 255, 1844, -8759, -3565, -1009, -926, -818, -1195, 236, 2898, -182, 14344, -1384, 1064, 1181, -1846, 543, -583, 170, -3305, -1187, -2406, -40, -1051, -1071, -28, 1482, -1060, -1057, 3028, -2023, 913, 1052, 980, -5158, 4642, -14067, 3920, 1450, -4497, -1591, 842, -2222, -392, -42, -3546, -258, -3566, 2595, 225, -2696, 4624, 2283, 1483, -1506, 2164, 151, 380, -3207, -1086, -10594, 2005, 2379, -2567, -925, -363, -1261, 13174, -73, 1168, 2215, -1721, 726, 525, 1048, 322, -827, 2117, 3890, 1346, -3512, 2243, 638, 2259, -1371, -2260, 10590, 851, -1247, -894, 1871, -882, -1955, 3822, -3654, -1730, 906, 2074, -548, 885, -2501, -1316, -3275, -10694, 2031, 1077, 3013, -1105, 2951, 1907, 1218, 194, 1860, -1662, 178, 915, 1092, 809, -451, -610, -728, 799, -129, -101, -905, -2, 2470, 1292, -137, 544, -18795, -1081, -300, -59, 282, -329, -544, -1324, 2155, 9326, 462, -388, -303, -2940, -608, -13652, 532, -1350, -1026, 1330, 5559, -333, 4961, 707, -1832, 1070, 2483, -2016, -315, 2197, 849, -348, 379, -2179, -15691, 903, 3192, 3888, 396, 4610, 3261, -2589, -4903, -643, 3604, -1380, 1524, -2155, 469, -3528, -790, 429, -3862, 1797, -104, 2364, -1162, -1559, 1011, 1849, -235, -1952, -2088, 1436, 2502, -3862, -1704, -14859, -2863, 710, 624, 4373, -6302, -616, -807, -1577, -2492, -620, -917, 948, 4957, -848, -863, 514, -2210, 2162, -753, -15168, -2068, 12472, -2611, -723, 2797, -8573, -2270, 978, -2597, 2215, -684, 2535, 3114, -261, -178, 2385, -4869, 1161, -32, -1469, 2074, -1407, 3226, -992, 4546, -3158, 1044, 463, -5285, 4, -1396, -1395, 1770, -1767, -860, -6, -2242, -1548, -667, 587, -982, -2246, -1312, 1550, -542, 5302, -716, 135, -15895, 3382, -478, 1279, 615, 3365, 1620, -12613, -230, 3101, 3230, -1307, 2860, 628, 647, -3595, -214, -1631, 2783, 748, 1088, -57, -6014, 2496, 359, 719, 1476, -750, -1644, -2125, 3913, -3788, 565, -1118, -1411, 1377, -1020, -246, 18851, -1438, -1150, -1492, -681, -798, -776, 960, 911, -1449, 336, -1114, -2111, -877, -532, 668, 1018, 1098, 408, 2032, -607, -656, -5997, 3089, 2462, -18368, -1027, 78, -4066, 439, -845, 1476, 290, 490, -452, 1638, -3381, 80, 1699, 458, 260, 1215, -516, 1883, -62, 35, -2540, -1703, -1042, 1751, -422, 1222, 207, -104, 1112, 151, -473, -522, 26426, 562, 884, -2201, -281, 238, -839, 1037, -588, 81, -109, -2, -32, 75, 654, 489, 524, -388, -1408, -906, -1193, -936, -273, -40, -100, -662, -522, -145, 119, 614, -922, -25329, -180, -668, -574, 161, -448, 173, 750, -609, -812, -125, 814, 572, 2602, 20372, 244, 1820, 724, 515, 932, -1290, -712, -990, -305, -13, -763, -1157, 481, -764, 320, 624, -620, 642, -1494, -568, -601, -655, -790, -1348, 334, -1302, 382, 782, -1122, -641, -23549, 180, 463, -634, -666, 599, -356, -1071, 816, -576, 1208, 912, -377, 624, 1049, 42, -95, 370, 1932, -167, -275, 142, -159, -410, 595, -562, -632, 748, 1192, 614, -41, -18, -156, -61, 1280, -686, 363, 759, 756, -19362, -614, 2151, -1185, 169, 327, 1494, 782, -1313, -134, 841, 218, -76, -2980, 202, 80, 281, 89, -61, -1678, 59, -125, 195, 320, -1310, -56, 806, 47, -65, 249, 18432, -666, -506, -204, -194, -560, -416, -3641, 330, -268, 842, 10600, -176, 424, -1744, -3609, -1682, -844, -309, -538, 435, 14251, -1281, 373, 2748, -702, -1358, -766, 3480, -679, 4039, 529, -5698, -38, -813, 1203, 4734, 318, -1044, -5109, 2187, -3474, 415, 2436, -3021, -1628, -456, -1451, 3406, -1798, 1001, -8648, 468, 1188, 497, 4628, -948, -4073, -11894, -2750, -738, 1520, -4070, -810, -5755, -1370, 2978, 4460, 917, 1221, -324, -1166, 2339, -1221, -2048, 714, 6884, 3096, 6998, 13, -275, -3879, 790, 104, 1383, 2056, 1957, -9216, -430, -199, 261, 764, -109, -210, 795, 884, -334, 1546, -272, -35, 738, -268, -13, -448, 645, 97, 76, 1284, -343, -654, 112, 643, 22846, 634, -597, -621, -784, -380, 951, -452, -685, 140, 688, -770, 247, -679, -228, -26856, 311, -546, -444, 606, 69, -195, 18, -220, -334, -42, 543, -28, 492, 766, 208, -1206, -554, 213, -1112, -1675, -608, 382, 2011, 5077, -17442, 1367, -702, -856, -416, -1728, -1987, 2966, -1952, 38, 152, 712, 210, -589, 3029, -1189, -2016, -8071, 10746, -2143, -556, -1964, 162, -504, 995, 982, -2565, -634, -985, -1668, 444, -2098, -411, 488, 1397, -1134, 1888, -920, -279, 15057, -757, -1258, -3040, -890, -105, -670, -490, -238, -2419, -1302, 915, -784, -929, 1653, -89, 1076, 445, 2538, -1424, 19175, -91, 437, 752, 254, 935, 854, -1666, -86, -543, 1053, 664, -155, -485, -3994, -50, 50, -58, -2626, 1801, -314, -16052, -1831, 1009, 2344, -3030, -938, 1761, -1283, -150, -425, -6660, -900, 1374, 803, 549, -2683, 837, 483, -655, 4610, 1259, -45, 834, 1103, -3250, -3604, -2882, -2463, -5331, 11312, -1653, -3505, -1855, -4962, 8579, 2370, -2474, 501, -1282, 985, -924, 3452, 456, -242, 3878, -2095, 2994, 7076, -459, 2574, 16116, 8277, -88, 572, -38, 0, 1664, -553, 1820, -2096, 1076, 415, -420, 1900, -1696, -130, 298, -1555, 201, -404, -1831, -932, 844, 9606, -497, -16304, 3278, 918, -523, -1573, 2488, -813, 147, 1540, 3795, 1390, 1061, -78, -10, 574, 2620, -1143, -512, -582, -1496, 736, -4323, 786, -2873, -1342, 3932, 14508, 12635, -899, 1730, -673, 386, -676, 2787, -2780, -2960, 375, 475, -2188, 2250, 851, 788, 268, 1264, 2973, -94, 1062, 1006, -697, 669, -635, -986, -4848, -1486, -6, -3914, 6267, -1560, 8, -503, 5273, -3545, 69, 15146, 2263, -1490, -548, 1740, 1636, -892, -895, 769, -471, 226, 6497, -2466, -2037, -1068, 1075, -902, 13668, -1213, 12424, -3523, -124, -1090, 972, -1134, -494, -2568, 881, -3081, 369, -254, -618, -914, 443, -1254, 658, 1322, 546, -14, 778, -116, -378, -802, -268, 48, 1140, 25942, 503, -637, -871, 1050, 298, -187, 387, -406, 343, 212, 110, 723, 695, -47, -50, -568, -66, 347, -1588, 20, 701, -485, -98, -787, 4502, 1046, -1628, -2526, 185, 1016, -256, -700, -403, -154, 103, -752, -689, 2084, -1463, 2294, 360, 17590, -698, -1262, 788, 116, 755, 751, -440, -610, -469, 1235, -2314, 1240, -308, 553, 1065, 24442, -733, 667, 4, -484, 93, -263, -361, -278, -1524, 176, 1311, 1561, 435, -436, -1079, 260, -366, 472, -1049, 647, 158, 302, -931, -36, -990, 736, -444, 1077, -1560, -251, 148, 1000, 1096, -300, -224, -307, -17646, 39, 206, 74, 505, -3051, -1285, -793, -724, 718, 324, 803, 874, 6062, -2235, -3321, -550, 9264, 3483, -4172, -4024, -471, 858, 2682, -1078, -1922, 2088, 1135, -878, 545, 2205, 836, -1088, 547, 12461, 2222, -828, -3841, 4797, -2360, -2510, -4029, -2213, 13736, 1032, -958, 1895, 264, -1499, -2066, -241, 1324, -224, -792, 776, 2130, 2600, -2276, -4239, 3260, 1610, -1620, -1220, -2752, 979, -2028, 19626, -2146, 684, -729, -235, -289, 588, -600, 245, -879, -816, 413, -87, -1158, -246, 69, 970, -111, 500, 1097, 1087, -138, -1356, 30, -434, -452, -22802, 177, 492, 206, -257, -854, 1445, 37, 1384, 97, -258, 811, -222, 53, 548, 1744, 124, -1031, 1076, 186, 453, -173, 1180, -2235, 583, -392, -1542, -726, 2937, -3635, -856, 1446, 7796, -2779, -962, -2277, 1651, 1960, -1460, -1277, -9794, -288, 2459, 2350, -2521, 84, 578, 2286, 480, 1620, 6421, -200, 170, 1513, 198, -1001, -491, -1000, 161, -482, 607, 214, 743, -292, -394, -192, 92, 73, -415, -316, 593, -42, -346, 456, 44, 950, 129, -189, 806, -221 }, .cb1108s0 = { -32768, -828, 9569, 331, 6938, 3122, -1008, 2847, 646, -5690, 1712, -795, -4406, 1368, 307, -526, -2206, 26, -210, 1358, 746, 1920, 667, 3866, -413, -720, -4328, -2475, -1189, -863, -3809, -5052, -8567, 2859, 1915, 4895, 12440, -13002, 2757, -5969, 4054, 1100, -9430, 4930, 10266, -1522, 7092, -8778, -1968, 4325, 8440, 3888, -1966, -688, -2455, 2966, -2380, 1682, 4956, -2310, -3706, 404, 6774, 17562, -12437, -2667, 4864, -9411, -6436, -9316, -903, -5526, 3463, -1690, -5250, -12568, 2338, -1310, -3019, 776, -641, 3483, 54, -10732, -3878, -691, -17615, 4530, 10267, 7830, 8488, -12624, -4514, -17183, 7070, 3115, 4176, 383, -4558, 410, 6379, 6242, 4702, 4853, -217, 446, -3811, -2396, 244, -2120, 3275, 5122, 180, 4523, 8680, -1868, -6164, 2636, -5056, -4039, -11618, 4014, 11349, -2616, 8240, -5119, 1988, -2552, 6060, 3206, -662, 2686, 1116, -10447, -3004, 650, 7811, -12148, -327, 856, -916, -397, -600, 4621, 3011, 5539, 5417, -2374, 9667, -4714, 7821, -2819, 573, 4492, 1882, -26770, 1486, -6963, 1103, 2515, 8196, 1849, -7492, -5243, 2106, -5290, -11000, -1410, -3448, -8548, -4536, -7730, 3083, 6109, -14458, -8624, -381, 7840, 4694, -3906, 8223, 3315, 5849, 13112, -13132, 6081, 11801, -7624, -376, -6372, -6817, 6834, 1760, -1435, 1072, 3505, -1494, -709, 5786, 454, 1807, 2650, 7728, 1357, -1002, -5366, -2368, 2052, 333, 6312, -336, 8274, -1653, -4309, -6630, 2841, 2448, 8398, 5376, -7248, -1474, -1842, -4119, 838, 501, -4206, 4052, -1250, -20943, -3338, -592, -2973, 7057, -128, -3235, -4313, -2510, -11313, -4925, 3103, 1448, -5186, -1322, -16815, 1956, -7950, 2641, -2890, 4396, 2322, -1381, -1911, 448, 2543, 3535, 782, 3719, -624, 1610, -2843, 7583, 1794, 700, 3107, 4528, 5461, 2540, -1074, 5976, 741, 576, 4426, 4400, -4920, 5724, -3734, -1186, 10645, 1100, 10537, 2828, 11670, -8391, -32572, -9405, -6807, -875, 2277, 736, -4546, -18693, 1204, -1083, 3422, -3328, 6013, -2992, 5812, 2744, -11668, -2519, -2384, -3635, 6532, 6874, -2820, -5222, -12261, -14266, -6663, -1150, -2032, 2099, 4642, 1638, -4162, -644, 249, -3133, 11830, -10712, 12370, 4818, -1924, -5639, -6448, 2455, -4898, -613, 1760, 2393, 1414, 7039, -7018, 5901, -2900, 3786, -3230, -3718, 3514, -4040, -4676, 6367, -1449, -2758, -2888, 4066, -7140, 408, -7656, 3156, 19919, -1858, 6671, 352, -3355, 3074, 5524, -1429, 1954, -6664, -10082, 4405, -1598, -806, 1779, -6913, 7062, 5064, 6518, -1042, 3400, -5530, -1192, 590, -3298, -772, 571, -6239, 9810, -12380, 1302, 1344, -3430, 3830, 4106, 5792, -6196, 224, -2604, 3954, -12551, -5539, -8306, 1801, -4521, 3578, -4349, -5716, 4960, 3620, 1516, 5779, 5550, -3710, 3329, 10542, 4198, 5148, -3291, 196, 6232, 6943, -1303, -10306, 1862, 6547, -1544, -2996, 2868, -4389, -6894, 28557, -13130, 1397, -2331, -4076, 2870, 3592, 6613, 265, -4790, -3514, -3152, 8710, 230, 3142, -1264, 1822, -769, 6168, -1792, 2189, 2660, -2664, 3402, -533, -3100, -476, -1164, 6092, -2930, 3372, -5895, 8507, -918, -4716, -1582, 23959, 1506, 2360, -117, 2029, -452, -6575, 964, -13132, -2838, 3800, -3355, 3168, 5230, 11116, 826, -1711, -3546, 7398, -4092, -2884, 743, -1784, -3824, -3437, 1050, -3306, 928, -5109, -7999, 1581, 8609, -4662, -3594, -1618, 9929, -3982, -5591, -8789, -1444, -12011, 1304, 12668, -5138, 10837, -7951, -4089, 3921, -5375, -2486, -2590, 11398, -80, 7734, -4547, -11286, -7098, -7758, 5303, 7380, -11266, -11138, -8676, 30, 6328, 597, 7852, 3144, -3933, 15142, 3954, 12197, -507, -1667, 5517, -4187, 709, -1330, 2094, 4739, 1341, 8276, 8544, -10107, -10151, 3641, 771, 4798, 4839, -3254, -9246, -7304, 14850, -18155, 3068, 4993, -4930, 10985, 6270, 8528, 5904, -13010, -7824, 1300, -706, -156, -4228, 302, 9962, -3087, 4472, 4541, 13179, -6576, -2541, 8284, -51, 5366, -4369, 289, 3890, -3671, 1894, 21820, -3031, 5336, -8412, 2487, -1211, -6759, 1292, 3749, -8904, 638, 6863, 154, 1145, -684, 6648, -3874, 2005, 4670, 4408, 4191, 3984, 632, 2957, -1532, -3974, -2576, -1636, -3714, -136, -4946, 3900, 367, 27072, 1864, 1426, -3321, 860, -1768, -2009, -3436, 2666, -9899, -1328, -2330, -3078, -3258, -4600, 5604, -5248, 1703, 4403, -4781, -8275, 6717, -3860, 10980, -10634, -8360, -2291, 20311, 7602, -4028, 483, -4886, 2677, -4921, 6065, 5393, -2145, 6201, -472, 1796, 2869, -3578, 3053, -2342, -3193, -2589, -3215, 1322, 536, -164, -314, 4800, -1903, -1338, -11833, -23399, 5562, 4440, -1864, 2520, -4251, -1464, 5053, -8553, -3852, -5932, -849, -7113, -3493, -5338, -1671, 1496, 4504, -1830, 5716, -210, 1397, -2060, 2242, -583, 2604, 5355, 13938, 13150, 1346, 2649, -1527, -4568, 8891, 7399, -6492, -10371, -4885, 13056, -8262, -1267, -2959, -868, 5941, 299, -601, 8834, 1436, 5404, 1914, -3775, 980, 8848, -2270, -1952, 6902, 8642, -25725, 9556, 14540, 1998, -13157, 308, -13844, -10126, -2147, 8296, 1772, 1094, -9712, -8560, -7552, 5527, -1446, -1097, -5798, -17270, 2860, -210, 2136, 175, 729, 11775, -5154, -4202, 13342, 3977, 14494, -5659, 9105, -11067, -3694, 4794, -593, 6817, 1875, -4975, 3663, 4141, -8317, -8932, 2127, -4176, 1136, -148, 7640, 8127, -744, 2354, 389, 1600, -6475, -4558, 10735, 11407, 3896, 13098, 1814, 5191, -3850, 2629, 18430, 8343, 4630, -4624, -702, -3834, -2276, -2894, -1556, 1437, 424, 5652, -6260, 2387, -5845, 7496, 10657, -2754, 4806, 1169, 1308, -4114, -5347, 15076, 5686, 7287, 3004, -6254, 5186, -14096, 10323, -1974, -9355, -5544, -986, -5998, 261, 4494, 2467, -1911, -603, -4548, -1344, 1995, -1603, 10464, 5222, 3714, -5342, -8039, 12530, -26465, -1813, 4044, 746, 8123, -12078, -4703, 2971, -4487, 2556, 3904, -2518, 1504, 5774, 5431, 1120, -934, -5202, -6826, -8774, 7156, -2392, 10643, -2918, -4298, 3361, -3758, -894, 5828, -203, -4905, 6480, 11771, -19830, -17545, -4920, -17263, 10066, 10125, -8980, -19719, 23554, 27907, 2607, -7014, 6128, -23759, -4802, -7099, 874, 13103, 21667, -8475, -12938, -13122, -3694, -18860, -3518, -3586, 12658, -793, 10661, 6925, -730, -11373, -7845, 94, -2627, -6044, -2213, -4381, -10198, -5816, -56, -4349, 3722, 3911, -1719, -2513, -13290, 3218, 105, 1876, -76, -1107, 2563, 4520, 10288, 5862, -7738, 6180, 9863, 1380, 6756, 2632, -18798, 9314, 7190, -7454, 432, -15141, 8462, 2128, -2386, -2710, 292, -751, -3125, 6147, 4941, 3146, 3046, 120, 321, -5884, 5105, -4300, 6264, -317, 1667, -694, 7950, 5639, -3284, 1089, -6456, -14694, -3527, -1104, 4313, -20858, 7920, -10782, -13536, 933, 4523, 2640, 2118, 97, -614, 9834, -9515, 232, 5086, -6720, -1529, 568, 3139, -3665, -8567, -13771, 6274, -4370, -5653, -8920, -7667, -9391, -6653, 12489, -3666, -5103, -12324, 4796, -540, 10396, 3668, -3467, 7124, -4398, 87, -12139, -204, 1213, -2190, 11948, -2641, -2434, -5647, 2819, 3148, 3558, -6455, 3705, 1644, -3090, -4225, -5998, 112, 17789, -7220, 2166, 4153, 4516, -1100, -1667, -1402, -8837, 6344, -1586, -3451, 2357, 616, -392, -8163, -11579, 6160, -2783, 7895, 11321, -11847, 8070, 5231, -6496, -3172, -3470, -2960, -11437, 465, -470, -2568, 11197, -9417, -4117, -1162, -1893, -2361, 551, 14478, 3510, -1372, 3117, -8236, -2904, 14556, 3191, 200, 2166, -13974, 2718, 3946, 2444, 1982, 5320, 2087, -2222, 1573, 742, -8828, -3917, -11080, -241, -8472, 6119, 290, -2364, -3163, 1923, -1964, -582, 2564, -5566, -6411, 2069, 7392, 9115, 25316, 1504, 2540, -814, -1746, 566, -1580, -2290, 170, 698, 105, 9567, -6714, -584, -4934, -379, -491, -978, 4580, 1180, -3355, 1882, -4343, 4817, 1503, 9968, -8878, -4908, 3419, -4818, -2254, 6694, -4368, -10849, -5093, 4510, -3129, 152, 1926, -4490, 1510, -17764, -6699, 962, 3474, 4981, 25, -7128, 1432, 5386, 3108, -4545, 1092, 1663, -1363, 3076, -8916, 6158, 244, -1181, -825, -933, -5570, 17221, -535, -2892, -5031, -1297, -3010, 5840, 678, 748, 3944, 1630, -3648, -5457, -2618, 876, 6655, -2834, 2597, -6667, 1330, -40, -4423, 6257, 743, 6083, -584, -3742, -1401, 1779, -5166, 4559, 5558, 8588, -6476, 7521, -1561, 4950, -778, 3564, 11403, -1010, -3151, -14151, -1020, 2595, -3278, 24555, -4859, -909, 2314, 1301, 2098, -5664, 3938, -4050, -203, 3368, -2580, 3061, -9266, -6263, -6748, 3890, 1950, -329, 1050, -1106, 588, 23705, -661, 6913, 722, -5820, 2147, 3789, -1689, 661, 5389, -8519, 1152, 3800, 7160, 5234, 1343, 3218, -2900, -391, -4258, 5084, -4783, 7262, -10013, -811, -5252, 6474, -17338, -2388, -2596, -8715, 5836, 9523, 639, 4652, 3071, 3114, -1648, 1563, -931, -10143, 4394, -2838, -11900, -1012, 841, -5812, -3048, -2715, -196, -5794, -20022, 1949, 3464, -770, 2200, -3564, 1975, -6242, -1937, 3954, 5678, -2744, 1888, -3825, 5770, 3869, 8315, -7386, 1318, 1302, -5534, -4554, 924, -3804, -4292, -22757, -7972, -7469, -3543, 7858, -10125, -2637, -4765, -10644, -5944, 1159, -3293, 4363, -1219, -12248, 5060, -7232, 6947, -1609, -3037, -5084, 6580, 15873, 5336, 7295, 2386, 2961, 4655, 9714, 5080, 11635, 1790, 2897, 687, -914, -692, -6653, -8562, -1412, 244, 4478, 1650, 7175, 1046, -6689, 3693, -3520, 6046, -1336, 1976, 16822, -1176, 792, -1733, 8286, -7359, -2402, -8536, 1392, -3271, 6580, -4939, 1562, 595, -4237, 4872, 4266, -1798, -6589, 7457, 4207, 9978, -3996, -2236, -3078, 1861, 10101, -2394, -3250, -7619, -7082, -14305, 5664, -1337, -11019, -3839, 10190, 7249, 3086, -1782, 24, -3566, 10769, -4102, -6408, -688, -8987, 3018, -5942, 7478, -368, -7931, -3018, 6766, -78, 5705, -3264, -1100, 4850, 4518, -28, -6276, 4905, 7094, -4394, -2846, -88, 434, 2039, 352, 9827, 12372, 1207, -8561, -4476, 1496, -4927, 2087, -6730, 1134, -81, 57, -8701, -2918, 3953, -2844, -1842, 4804, -5315, -401, 7060, -16397, -4802, -9849, 17542, -11715, -12432, -6676, 9323, -13189, -5761, 8054, -620, -7431, 3726, 17790, 7880, 251, 2983, 3736, 7118, 17197, 8613, 1445, -15290, -16184, 11084, -4971, -5922, -1893, 9067, 9321, -8139, 714, 182, -3138, 7258, -1874, -2781, 10800, 2915, 5316, -5206, -2581, 10219, -484, 862, 119, 6628, 1514, 3883, -880, 7586, -2573, 3279, 3801, 4492, -3850, 9416, -38, 7518, -574, 4052, -1136, -668, 9672, -9536, 2551, -4223, -1074, -3616, 8446, 158, 3262, 7965, 1311, -8634, -6786, 700, 4973, 917, -754, -1156, 6054, 2067, 10757, 421, 1030, 11351, 2149, -4286, 12075, 4593, 1193, -5290, -8566, -2965, 6824, -6238, 2392, -3395, 5350, -2789, 7529, -1873, 3032, -1494, -2703, -18535, 1583, 9539, 2556, -4422, -6079, -2699, -7860, -4573, -8236, 4281, -1079, -17578, -2840, 7468, 4675, -5002, -1268, -1529, -8222, 8285, -766, -4314, 6048, 11507, 5046, -2444, 3186, 1732, 7872, 6598, 2828, -2920, 8278, 13263, -10204, 1334, -5552, 10532, 5412, 2554, -10076, 1128, -3959, -3210, 4091, 1824, 4984, 5558, -2204, 2080, -3802, 6614, -7380, 3612, -4624, 6366, -1795, 4038, 6227, -4312, -4910, -2127, 15077, 4144, -16885, 3757, 2303, -670, 5625, -2590, -2594, 2491, -3174, 4199, 1152, -1532, -7308, -8578, 6431, 2975, 6032, 3037, -7451, -2643, 5503, -7856, -2451, 5309, -3678, 8145, 1864, -8341, -15575, 7716, -10337, 8935, 12350, -10418, -4092, 734, 10400, 10934, 5724, 1778, 5836, -3203, -10700, 2766, 4178, -18135, -16589, -5465, -5005, 7239, 25480, 7310, -6408, 6142, -7748, -1423, -4318, -321, -2899, 3728, -3184, -3578, -11598, -1223, -8554, 656, -3945, -4084, -724, 301, 9539, 9695, -1799, -2602, -1379, -5282, -4709, 11858, 9562, -7508, 4886, 896, 5780, -160, -12724, -9598, 1220, -5411, -5072, -6476, -11763, -104, 9311, 5230, 591, 4342, 263, 13198, -17801, -1892, 2619, 18194, -2080, 16536, 18497, -25926, 25541, 66, -6648, 1627, 2794, -3790, 9424, 1387, 20702, 5260, 5211, 1702, 1019, -11143, -6501, -18711, 10869, -4204, 4994, 1722, 8569, 3670, 4386, -16874, 8876, -2297, -2743, -4562, -9207, 8033, -346, -3586, -9451, 3242, 1552, 4278, -6787, 7118, 3630, 4602, -7371, -12789, -10424, -14922, -3010, 1885, 4144, -4490, 4074, 7796, -1201, -7244, 2675, 1221, -7060, -12828, -3520, 1983, -4615, 8207, 1606, 517, 3646, -7252, 816, -3690, -674, 13100, -16254, 4727, -8184, -968, -5366, -2288, -20260, 1174, -19384, -4199, -5292, 582, -13118, 1836, 1698, -2034, -14601, 6642, -10530, 482, -851, 9968, 7050, -13366, -8354, 4740, -20050, -193, -1881, -1205, -4042, 7067, 12872, 5846, -4792, -1833, 2504, -3222, -1607, 2634, 4587, 6761, 1549, 1124, 9427, 3978, -8305, 7524, 2507, -5744, 3238, 5238, -3664, 694, -28496, -1674 }, .cb1108s1 = { -10979, 8698, -630, 4660, 3060, -7292, 10140, 11942, 1448, -5820, -3144, 3100, 10575, 6888, 3505, 9996, 2787, -484, 8057, 1503, 6329, 3074, 3954, 9419, -736, 2333, -1858, 3264, -4026, 16130, -14501, -5284, -472, 850, -7258, 1542, 1473, -2348, -7055, -9574, -2275, -4383, 7542, -360, -2945, -3878, 28, 809, 600, 2246, 587, -1779, -3456, -737, 3242, -2523, -1862, 6127, 899, 1070, -15614, 10990, -3084, 9546, 7339, 8899, -1490, -10379, -9193, -3857, 8289, 7261, 12489, 7814, -6458, 1223, 15486, -10960, -1880, 4922, -7819, -527, -2370, 3687, 1358, 10367, -14266, -1496, 1060, -9325, -5582, -3947, -17536, 1470, 4878, 10793, 2904, -2566, -4995, 6549, 6141, 11048, 3177, -494, 9087, 797, -2575, -5616, 1197, 2966, -11287, 4658, -504, 4571, 1814, 18830, 26254, 2399, 8750, 2656, 8206, -12987, -9119, -1027, -457, 1228, 6137, 2322, 1732, -5694, -892, -249, -178, -7009, -4368, 402, -5564, -5183, 2470, -4745, 2788, -3255, -5181, -706, 40, -4915, 8926, -3633, -2455, 15054, 5376, -867, -7270, -979, 7053, -7433, 13749, 5039, -2234, 8474, 7031, -3917, 5127, -7602, 580, 12067, 2252, 149, 86, -582, -5729, 2193, 4178, -9195, -11824, 3897, 1298, -1044, 6450, 1885, -19562, 6205, -4610, -2544, 5192, -4885, 5021, -2373, -102, 7358, -2434, -3512, -4048, 3070, 45, -1344, 202, -2189, 448, 1172, 2939, -547, 1003, -6370, 3643, -1157, 3932, -6044, -12882, 1959, -1574, 2574, 14854, -16317, -6627, 505, 1102, -9361, -8087, 7525, -1466, 284, 3756, -383, 5147, 5060, -474, 531, -6144, -1872, -1206, 527, -4861, -12410, 7508, -7226, 5046, -12233, -4153, 4628, -14402, -5265, 534, 1528, -13408, -62, -18757, -1280, -9301, -10254, -8990, -6335, -7724, -3394, 1951, -13271, -1389, -5274, -4616, -9643, -10295, 1332, -5618, -10737, -7536, -9314, -7006, -760, 7694, 2955, -404, -2800, 15250, -3828, 5994, 5408, 8411, 16568, -7280, -6901, -222, -1554, -862, -1871, 939, -3678, -4348, -3200, 3220, 1614, 8598, 8162, 1749, -7378, -1658, 931, 3870, 9183, 1509, -5068, -17, 5733, -8121, 2769, -3195, -3296, 8940, 2828, -2470, -2448, 7413, -2851, -1058, -4505, -9653, -5074, 73, -3286, -4014, -1760, 2562, 13690, -3464, 5438, -3394, 16997, -2944, 291, 4224, 1175, -2237, -6894, -5479, -1291, 3390, 5455, 898, 3461, -7914, -4785, 1879, 1059, -3721, -5796, 5054, -3931, 6315, -2460, 1909, 573, -3373, 3052, -178, 986, 572, -5976, 5781, -4928, -10539, 580, -18727, 757, 1759, -4049, 2232, 1890, 4115, 699, -2934, 4926, 2391, 10848, 5103, 4340, -1518, 2288, 2283, 8886, -5131, -4429, -4384, -3265, 11933, 3993, 11474, 3721, 1532, 976, 6112, 1954, -2360, -1783, 2080, -6356, 2482, -4646, -1992, 1590, 1790, 3290, -2312, -564, 508, -1688, -7522, -9263, 3059, 1883, -3005, -1303, -9146, 10282, 1333, 4692, -2083, -15792, 2208, 1128, -11574, -7149, -1126, -4995, 18963, -6262, 5045, 2179, -822, -1249, 10092, -338, 5744, 1635, 2535, 6114, -1339, -8337, -4370, 4288, 2468, 3051, 12491, -9554, -4034, 522, -1085, 5852, -2759, 4918, -10717, -194, -11376, 3059, 12075, 1037, 5260, 816, 5918, -1987, 7924, -6022, -10374, 11607, 25035, -11598, 16894, 2458, -5461, -2039, 385, 6002, 7574, 1229, -834, -1032, -7453, 2694, -1447, 3632, 4215, 3541, 2936, -3294, 1001, -6451, -4595, -11682, 7880, 2261, 3786, -2849, 2276, -826, 3742, 7586, -334, 2837, -2331, -12849, 1170, -1150, -5253, -997, -8996, 8124, 2234, 904, -2294, 3144, 7352, -5452, 1536, -8800, 1886, -18282, -9787, -8066, -12066, 1536, 4460, -1345, 1418, 7471, 13451, -7299, 5507, 6795, -184, 8905, -2040, -4933, 4998, 7317, -6667, -5134, 9094, -8561, -2534, 3422, 2278, 3118, 205, 5811, 2247, 5946, 1078, -2105, -6946, 170, -1625, -4734, -1447, -4329, -4553, -2230, -8738, -15289, 7311, 6665, 5047, 1984, 11896, 13922, -10490, -9313, 1424, -2991, 1408, 335, 8914, 3773, 8814, 7917, -4560, -114, -624, 8984, -1598, -580, 3233, 590, -2172, -3162, -3985, 5394, 13842, -11625, 73, 12826, -1204, 5119, 10304, -10006, -2695, 1318, 156, 84, -760, -4638, -3804, 3041, -782, -2994, -3113, 637, -3256, -5831, 452, -1204, 1614, -11626, -4769, 10612, -8710, -20019, 10542, -4279, 6912, -1429, 3812, 2844, 3903, -11622, -8954, 180, 3898, 3858, 119, 1385, 4038, -5899, -969, -5454, 13305, -6748, 5934, 8027, -7348, -3797, -29781, -4956, 2037, -2331, -3292, 8254, 6597, 4446, -7848, 6250, 1400, -1182, -4966, -3490, -1410, -2286, 3334, 350, 9271, 2987, -934, -5702, -3881, -97, -671, 5108, -133, 1302, 11630, -8858, -3027, -42, 3682, -1507, 3992, 5641, 2778, -8698, -2509, -1360, 77, 2116, 98, 2853, -6334, 5915, -1214, -2721, 8921, 1380, -4158, -4315, -4740, -21049, 7044, 866, 2094, -9442, 9003, -5147, -4897, 3407, -11558, 4280, 4508, 6697, 1612, 1508, 8547, -14257, -151, -9530, -7250, 11321, -14430, -4944, -2488, 1349, -248, -1490, 1749, 3970, -5830, 20767, 4642, 3236, 36, -17079, -11099, 5996, -10759, -39, 7822, -7527, -1431, 179, -3841, 2298, 1407, -241, -2303, 9244, -3626, 6609, 1959, -518, 368, 1678, -5334, -5849, -4986, -2363, 607, 2809, -1006, -7695, 10022, 2216, -8992, 4282, 807, 14707, 9528, -11065, 3014, 3157, 5597, 1139, -1298, -3642, 7839, 860, -4336, 2624, -4171, 1791, -2825, 5362, -529, 1494, 337, -4487, -671, 5360, 3283, 4933, -14692, 4033, -4365, 2713, -6903, -1784, -10862, 6173, 5278, 14859, -852, 10020, 12304, 8898, -3089, 9183, 1841, 8276, 4929, -261, -1264, 615, 3615, 14535, 6557, 519, 4228, 7382, -1805, -4529, 4992, 4277, -342, -9610, -5193, -7022, -23264, 2402, -740, 2875, -5052, 1983, 4987, 3336, -3806, 1335, -2868, 846, 7652, 936, 3510, -4570, -3010, -8805, 6177, -4413, 5879, -15204, -1632, 13416, -4543, 3838, -9293, 1744, 920, 15544, 3820, -5852, 3935, 2357, -6486, 1932, 12044, -6374, -2545, -2389, 2755, -8073, -8203, 4659, 4286, 16128, -987, 434, -4495, -4428, -4816, -10329, -4529, -13408, -13283, -1136, 4002, -1271, 3547, -5274, -5577, 701, -365, -2764, 370, -369, 2611, -832, 3862, 4604, -7786, 11170, -1453, -1568, 10758, 168, 402, -1985, 1436, -8858, 10080, -8559, 3998, -4310, -13478, -3104, -11458, 506, -18194, -3724, -6768, 7960, -4213, 1121, -1658, -1141, -1874, -383, -5090, 748, -1032, -1207, 1046, -1865, -2387, 2126, -3672, 6733, -2794, 3797, 15562, -11989, 170, 6129, 658, 929, 4800, -4296, -955, -2189, -188, 3180, -118, -766, -2182, -6928, -2254, 6615, -4422, 6324, -31, 3742, -5832, -5022, 4671, 1574, -6309, 288, -2768, -2492, -4818, -5192, -248, -3236, -429, 120, 1182, -10486, -2964, -3713, -5978, 11817, -20052, -6525, 2054, -879, -602, -2843, 7244, -1372, 417, -172, 3322, -6556, -7021, 5842, 7357, -2799, 3660, 7579, 4682, -2242, 73, -9247, 21061, -2060, -3614, 2486, 4793, -2959, -510, -74, -5982, 2274, -4147, 3260, 1994, -1678, -7494, -13624, 2560, -7375, -896, -4945, -2838, -11096, -1969, 5879, 444, -3220, 14630, 4915, -2376, -8475, 9854, 11380, 11060, 1534, -14413, 4366, -9544, -10646, -7654, -17916, 3481, -3240, 1776, -2436, -8403, 3679, -1914, 12537, -5540, -5294, 5995, 5968, -2609, -16882, 789, -9506, -10075, -12142, -7580, -7090, -2046, 11065, 7617, -3503, -2013, 3516, 6347, -195, -3119, 2444, 14, -4998, 767, 4976, -3974, 9038, 579, 1804, -8206, 32767, -5633, 1018, 13388, 996, -12737, -3179, -2058, 13663, 1274, -4475, 7386, -1698, 17927, -6118, 15942, -2922, -3434, 5903, 6333, -9149, 14140, -1488, 2999, 1151, 2361, -1935, -10243, -11566, -5319, 965, 5146, 3652, -441, -2173, -3484, 3685, -13595, -1703, -78, -1408, 18517, -3788, -3266, 3162, 996, 19950, -8560, 4989, 6593, -5329, 2950, -13896, -3524, 5590, 4055, 6084, 2493, 12659, -5786, 4858, 7252, -7111, -7318, 5411, 7393, -8714, -3454, -1562, 1919, -49, -680, 7285, -398, -2956, 7100, 5563, -538, 1719, -16, -3824, 437, -6842, 1504, 5694, 1214, 3209, -15562, -4365, 9329, -25577, 1425, -2598, -8389, -6891, -3275, 3304, -3993, -6391, -934, 7862, 4844, -134, 9890, -4646, 2468, -9901, -4111, -3080, -5056, 476, -13099, 1447, 205, -2424, 7098, -12075, -4646, -13725, 8367, -2910, -8461, 1387, 3553, -10228, -2771, 4698, -6483, 12234, -8086, 3329, 2374, 452, -1805, 5083, 2014, 164, 7143, 81, 6062, 2838, 5318, -4982, 1440, 2014, -3273, -6658, -798, -3204, 1398, -599, -5834, 2070, 4644, -17238, 390, -1684, -4932, 8961, -12217, -3079, 6574, 1387, -5991, -7803, 1285, 7439, -395, -3048, 2038, -847, -690, -5127, 2228, -4180, -3499, 530, -584, 9884, -323, 446, -15644, -9162, -1683, 3643, -3578, 2634, 496, 8097, 109, 1056, 1422, 5452, 6517, -449, -2389, 302, 6827, 1507, -3106, -7188, -4909, -441, 12955, -3933, -5322, 5155, -23171, -2780, -2655, -4048, 12844, -3709, 6555, -5700, 3780, -6566, -4415, 11091, 11291, 6443, 9146, -796, -1420, 5600, 12098, -5790, 6619, -10474, -12177, -5890, 21700, 11148, 3427, 3130, -5727, 14646, 13953, -2721, 1404, -3102, -4693, 4762, 1757, 2533, 3998, -530, -758, 5301, -1426, 8948, -720, 6877, -3863, 2396, 5266, -685, 890, -7188, 2742, -270, 8125, -804, 32292, 6964, 8599, -3466, -1080, -8423, 2070, -295, -157, -5432, 152, 2478, -3738, 1104, 1500, -5290, -2463, -6386, -2537, -2331, -3290, -2398, 159, 6588, -2547, -2424, -2184, 8316, 5670, -5608, -2600, 2659, 166, 14828, 2622, -10490, -16378, 64, 434, 4576, -3010, 2479, -6798, 3431, 360, -1067, 3421, 664, 4029, -4050, -240, 3875, 672, 3587, 501, 2494, -48, 9997, 3259, 8551, -7624, 17342, 10765, 4328, -3721, 1729, -2844, -6330, 5114, 15589, -261, -7554, 2708, 7260, 5852, -8736, 436, -6160, -588, -5919, 5752, 3127, -4558, 540, 74, -4048, 3735, 7873, -2869, -544, -111, 5182, 1032, 2315, -159, 5105, 4106, -494, 678, -4756, -3865, -7389, -2492, 7193, 5146, -7926, 12043, 11137, 1719, 2307, -5476, 12679, 7996, 726, 933, 3222, 7515, 678, -5858, -2716, 1503, -3014, -2125, 4982, -4984, 467, 986, 5450, -1472, 5314, -1285, 218, -3411, 4511, 8047, 4268, -8307, -10587, 17200, 3303, 7553, 5361, 1108, -7982, 8240, -5856, -3376, -3952, -2884, 4401, -7252, 4078, 7538, 3420, -13834, -1139, 10742, -2536, 636, 7758, 4282, -3505, 1190, -7382, -8164, 5306, -408, -5005, 2776, 7806, 4781, -7903, -2370, 13884, 542, 5643, 6948, 6471, 2699, 815, 4454, 1882, 2290, -3856, -3086, 8215, 3234, 4444, -1580, 2835, -3083, 6706, 7409, 4626, 2658, 2308, 7965, -1034, -2584, 344, 704, 12280, 10344, -8032, -4410, -6168, 6860, 7977, -5630, -6680, -5001, -6199, -10378, -1764, -3322, -4284, -1048, 2721, -11738, -11800, -7975, 2754, 3424, -7641, -2245, -4945, -194, -1948, -2850, 4111, -21846, -8750, 4306, 24494, 10428, 26998, 4976, -2701, -3283, -723, -1539, 6758, -9730, -3517, 6401, -4546, -410, -9900, -4947, 6996, 10983, 5110, 19948, -78, -1794, 11051, -14, 316, 6447, -20430, 9363, 9062, -2134, 13711, 6448, 6655, -5232, 4610, -10352, -3042, -8713, 5777, -2438, -2602, -7293, -755, 6736, 2960, -3676, -2882, -9806, 1342, 1242, 2122, 2749, 631, 6502, 2266, -12996, 13620, 19762, 8096, 702, -4394, -8668, -1460, -3228, -173, -6239, 4643, -1916, 4098, -2234, 1202, 1763, 6170, -6320, 12984, -5936, 8301, 6021, 2191, 466, -4044, -1913, -3458, 8197, -3249, -5935, 2383, -4241, 4977, -4415, 704, 3488, -8356, 10229, 562, 14, -4828, -3890, -7599, -4208, -3166, 1132, -16584, -506, 1397, 6266, 3307, 5782, 2349, 3257, -3017, 7814, 1216, 7440, -10096, 12698, 944, 1221, -1683, 152, 6020, -7910, 3897, -6954, -9439, -9838, -3860, -5383, -4228, -1980, -4045, 7442, -5504, 2145, 636, 2857, -4538, -820, 4275, -2104, 5076, 5191, -363, -23254, 1962, -66, 7550, 88, 8721, -1361, 7733, -2661, 5282, -5112, -24, -975, -3200, -2235, 5144, 213, -6340, -3974, 1266, -2383, 2432, -124, -233, -3504, 10604, 806, -918, 11601, 19332, 206, 7456, -8885, -9692, 3087, 3685, -2183, -7538, 11970, -5098, -7364, -1173, -3099, 6532, -6850, 4622, -828, 390, 467, -5364, 4442, -1878, 8949, -4340, -261, -2720, 6659, 16184, -6552, -3736, -15416, 15774, -306, -4240, -1807, -10304, 11073, 2743, 3974, -5557, -3499, 5315, -10742, -378, -4517, -5949, -7664, -2830, -6510, -6096, 2052, 3425, 1971, -3328, 5326, -1362, 1806, -14286, -12774, 6058, -3365, -735, -2586, -18658, 6664, 9502, -1590, 323, 6445, -17766, 14694, -9786, 3696, -4547, 1601, 3645, -584, 910, 2516, 8197, 3898, 4306, 631, -2020, 4309, -765, -6591, 2083, 8969, -1474, -27, 9130, -5808, 8492, -135, 2230, 2296, -4509, 4600, 4951, 1930, -2564, -5889, -1338, -11737, 6387, -3649, -5447, -2462, -4751, -1012, 3523, -3504, -9510 }, .cb1108m0 = { -2417, 4623, 2916, -4257, 120, -10323, 1198, -10252, -117, 8767, 3160, 2323, 1162, -650, 2237, -4171, 2386, 432, 1627, -7255, 38, 124, -3658, -1558, -11711, 10, 8146, 1700, -1975, -16731, 2397, 1056, -2502, -2660, -2731, -2477, 1488, 1220, 4880, -1156, 1805, -3, -3009, -6233, -2216, 3440, -3082, 2124, 70, -2461, 1125, 1919, 11949, -2506, -622, 2209, -702, 2685, 9183, -510, -2806, -1129, -1823, -1746, -3600, 2298, -3360, 10793, -1714, 1662, -62, 395, 14142, -261, -144, -9896, 11481, -884, -2197, 352, -326, -453, -1984, 2027, -1466, 3290, 94, 3481, 2533, 4401, 5492, 3803, 247, -896, -1688, -3166, 1130, -1125, -1973, 322, 867, -1936, 714, -880, 8, 2313, 23418, -1682, -677, 384, -2140, -386, 920, -2523, -495, -1494, 3027, -707, 1172, -1403, 2177, -2137, -885, -1035, -1637, 375, 2452, -3709, -1171, 2069, 1095, -1937, -686, -956, 2034, 3410, -3075, -359, -598, -2084, 18550, 1781, -45, 1400, -1580, -13180, -609, -1376, -3145, -248, 5661, 6886, -3915, -194, 9876, 1065, 3879, -1726, -837, -660, -7467, -3055, 3516, 283, -1604, -625, 1165, 3023, -1531, -1825, 1430, -561, -881, 1346, -129, -1817, 851, -32768, -294, -188, -116, -646, 1176, 630, 903, 417, -2487, 352, -789, 571, -127, -2054, -2112, 418, 1631, 266, -270, 362, -2765, -1198, -182, 3586, -1272, 1470, -66, -18384, -1230, 823, 1171, 1350, 1101, 1410, -3730, 1535, -101, -3234, 2315, -34, -458, 1361, -16497, -990, 1438, 2542, -1193, 586, -1708, 2689, 2741, 6010, 4209, -5974, -628, 1556, 2238, 6134, -3040, -2937, 2188, -1660, 1137, 1316, -2650, 905, -502, -93, -1177, -31964, -1170, 1504, -1284, -104, 168, 55, 3478, -161, 2818, -484, -32, -1536, 1218, -854, -351, 4465, 16922, 681, 4198, 419, -414, 6824, -3906, 11598, 75, 4904, 1374, 64, -2692, -3759, 3065, -1397, -202, -347, -2466, 96, 1035, -765, -258, 3711, 1437, -18250, 566, 976, 2483, 4, -1096, 1906, 3745, -2621, -2756, 1864, -560, 98, 821, -4094, 5349, 1369, -5245, -2170, 2932, -1052, 3932, -413, -400, 31206, 1125, 1631, 43, -764, 1666, 780, 2036, -564, 64, 1311, -202, 843, -2030, 856, 1766, -3163, -1158, -626, 316, 127, 1783, 1918, 3384, -2887, -5885, 1763, 4910, -248, 17100, -3022, -1880, -2927, -1287, -3308, -1767, -2622, -1460, -250, 3597, -4526, 946, -1533, 1059, -8, -807, -1283, 1436, -11184, 643, 398, -1565, 1983, -60, -9862, 1219, 322, 3132, -2043, 1138, 6258, -3540, 790, -923, -4692, -1401, 2733, -8918, 4905, 6181, 192, 82, -1094, 4, -634, -1323, -2865, 1036, 1484, -2461, -937, 414, 221, 2179, -438, 1273, -2690, 18442, 2781, 1788, 2264, -1230, 4284, -4708, 1190, -4810, -975, 230, -3728, 2504, 3602, 3488, 88, 1322, 487, 2965, -3731, -2341, 5937, 8545, 1716, 7308, 9017, 6426, 727, 3992, -4584, 388, 3714, 1164, 18, 445, 1253, 398, -1989, -824, -430, 745, -5447, 2176, -1986, -3963, 2861, 194, 17739, 1891, -5368, 4172, 125, 530, -2766, 1179, 401, 1759, -1609, 31234, 910, 1100, 1036, -948, -1101, -614, 1768, -344, 840, -696, -842, 320, -1444, -2560, -3199, 58, -2172, 1375, -3002, -821, -863, -12096, -2484, -677, -2130, 4450, 3568, -3192, -1114, -3218, 3121, -503, 5570, -561, 3896, 10566, -3065, -2768, 1398, 1719, -2708, 1952, -142, 4777, -978, 2238, -5780, -430, 1228, -1298, -2923, 4353, -1621, -2368, -2908, -8012, 4398, -502, 518, -6964, 622, -377, 3758, 6598, 4438, 6849, -7696, 470, 3585, 466, -14664, 3438, 14706, -1944, -2544, -785, 3653, 1274, 443, -694, 1968, -3499, 2855, -3930, -1210, -528, 1931, 3849, -772, -2659, 4499, -3624, -540, -1645, -949, -382, 979, 595, 165, 429, -80, -20468, 1040, 544, 2545, -5010, -2122, -2840, -335, -405, 404, -50, -2996, 1226, 519, -1046, 3745, -2317, 6211, -14500, 9754, -5802, 5230, -3112, 1506, 3741, 664, -902, 197, 2476, -3618, 2040, -1066, 2338, -257, -2580, -293, 2740, -576, 2050, -865, -3666, -2090, -1831, -32056, 658, 1549, 1602, 1728, -534, 390, -1517, -627, -4025, -797, -2351, 2759, -102, 2574, -56, 796, -232, -886, 1639, -2773, 1007, 830, 5880, -2220, 762, -3834, -2865, -415, 584, -3498, -4546, -16108, 344, -4072, 551, -5435, 2007, -1418, 3838, -1662, 1981, 3545, 1424, 769, 2135, 1705, -15076, -636, 283, 3386, 97, -1048, -3933, 204, -8616, -556, -2936, 4241, 5100, 1777, 98, 719, 6202, -1496, 708, 2160, -2396, 4060, 1513, 2253, -46, -1823, -132, 709, -756, -944, 575, 1070, -1583, 587, -24575, -1989, 874, -568, 1040, 1116, -4002, 3196, 2826, 117, 1590, 2456, 938, 112, -938, -1268, 5056, -2851, 2995, 2559, -13121, -8374, 3593, -6684, 3663, 766, 747, 1016, -921, 2241, -1942, 4269, -3312, -1012, 2340, 2781, -3881, 2532, -1976, -1436, -3219, 420, 22088, -742, -640, 3270, 1446, 1935, 1279, 1913, 1377, -3297, -751, 4209, -1052, 2381, 2938, -1330, 2154, 2784, -2420, 1270, 2334, -526, 1480, -435, 2206, 252, -510, -1018, -1469, -1294, -950, 424, 1058, -2317, -846, -20737, -1877, 88, -431, -1268, 116, -378, -2326, 3115, -246, 30, -4725, 648, 2084, 14286, -817, 2496, -1947, -4869, -9703, 1505, -2476, -2108, 747, -449, 3002, -5464, -514, 1805, 2559, 2494, 12782, -1232, 12091, 2118, 3996, 2592, 1058, 510, -1384, -3050, 2533, -408, 5219, 3044, 3242, -185, 2654, -3723, 16, -1723, 1823, 6144, -4806, 182, 1772, 4841, 16390, -96, 2505, -7713, -5244, -3316, -6776, 1448, -1470, 4238, 294, 889, -2372, -6281, -2423, 5423, 2119, 2897, 1378, 817, -993, -1599, -14662, 3014, -3397, -6182, -245, 4897, 5116, 2285, -2863, 1174, 415, -6777, 3863, -6009, -4722, -119, 606, -2247, 4447, 1064, -1935, 2705, -2629, -1144, -11980, 3805, 882, 1634, 5446, -4300, 643, 3436, 7632, 592, 998, 674, -2647, 4644, -6854, 1368, -146, -3395, 10599, 1369, 3852, 1689, 2437, -3937, 3405, 2517, 1895, -14092, -1142, 2570, 10163, 1608, -2445, 850, -1678, 3112, -3465, 3138, 4413, -1973, -4151, 1163, 1822, -3819, -1568, -407, -2642, 424, 365, -3599, 164, -1448, 1062, 1536, 1590, -1982, 200, 18572, -230, -638, -1253, 1650, 2280, 4945, 4527, -2353, -4216, 3752, -3807, 3686, -4816, 2382, -14833, 1306, 17246, -739, 2012, 3521, 1473, -1436, 1514, -142, -461, 1038, 2462, 971, 1354, 1272, 1787, 2420, -922, 3364, 2250, 497, 1349, 2795, -32768, 425, 1874, -72, 2461, 389, -306, -1180, -646, 251, 299, -2735, 577, 1055, 1826, 1620, -1214, 1422, -901, -1273, -2367, -1241, 366, 521, -433, 55, 4000, 3035, -1390, 2505, 1786, -15397, 413, -5916, -234, 3559, -6776, -5068, 2251, 36, -180, 596, 5744, -2450, -1276, -4786, -1872, 24, 252, 464, 2833, -136, -25600, -33, 873, 2646, 1471, -1336, -1330, -276, 1778, -242, -951, 1580, -79, -858, -927, -4310, -604, 7568, -1713, -948, 192, -260, -1334, -1116, -705, 638, 132, 1186, -952, 1157, 428, 2039, 1568, 1778, 22453, -2190, 2176, 1674, -3996, 1294, 1162, 274, 415, -2877, 464, 505, -1842, -1066, -2241, -761, -291, 8, -987, -104, 796, -32768, 1302, -809, 571, 1214, 455, 686, 656, -1752, 886, -790, 644, -1114, 2358, 11452, -4398, 1334, 13095, 3230, -1818, 4053, -1990, -1093, 878, 3796, 2712, -1523, -1229, 1077, 960, 1250, -75, -3233, -7734, 2783, 8430, -327, -1428, -1687, -4092, 269, 3161, -569, -1267, 1774, 2772, -2033, 171, -520, 1551, 3719, -3364, -220, 1904, -1282, -2008, -818, 4261, -886, -19201, -3454, -478, -2645, -2601, -2124, -3977, 2960, 1563, -432, -989, 2682, 1734, -9085, 4614, -4454, 2535, -7201, -220, -10022, -431, -7907, 889, -9658, 6653, 762, -1827, 5886, 862, -1836, -430, -16363, 5709, 851, 1814, 304, 5045, 1685, -1004, 5108, -5936, -3143, 940, 1832, -270, -674, 1441, -241, 3222, -551, -434, -69, -3584, 349, -1354, -12080, 3639, 5219, 7583, -1023, 2078, 3263, -5807, -873, 4085, -5153, -3623, -436, -4717, -1803, -6274, -2049, -247, 2516, 1922, 10204, 2194, -1574, -535, 656, 1638, -3091, 1156, 1377, -1220, 4956, -221, 4984, -1154, 4603, -1618, -5655, -2583, 13494, -2442, -3968, 3086, 1098, -1625, -13781, -12826, 2659, 3604, -702, -1900, -3508, -6283, 2320, 1979, -2823, -4890, -1728, 2, -4402, -437, 1932, -3272, 2853, -3018, 840, -632, -6691, -484, 9579, 1008, 11677, -2814, -2029, 8048, -1170, -7366, -2664, 3349, 1319, -1160, -1864, 606, 1568, 5428, -4763, -2470, 2145, 1798, -502, -1538, -3736, -1376, 1330, -3567, -78, 478, -743, 890, -800, -44, -1832, -1761, -1022, -996, -846, 1188, -1042, -3202, -2439, 1602, 3601, 564, 18338, -17, 1327, -387, -1998, -1260, 3352, 849, -4780, 1932, -56, 2625, 10753, -1676, -10536, 2980, 1542, 1177, -3113, -859, 522, 3092, 9588, 2882, -4540, -1406, -5183, 50, -4245, 3649, -420, -3612, -5290, -1919, 14559, -2605, 1169, -2009, 10760, -2372, 339, -2538, 4476, 3001, -4570, -3158, -3465, 2873, 650, -2099, 76, 1166, -1469, -2769, -391, 4215, -630, -1448, -1796, -1573, 5914, 807, -1580, 2072, 99, 580, -2999, 1079, -202, 17940, -1233, -4909, 1079, 390, -891, 1834, -2155, -2642, -1703, 1856, -14125, 2081, 3178, -2480, -4342, -11991, -2050, 1046, 2412, 436, 1046, -2291, -1718, -3087, 1710, -963, -1914, -3423, 6190, -1238, -4333, 115, -10550, -2742, -919, -4849, 1502, -3054, -3304, 2300, -1850, 2337, -6643, 1995, -1279, -238, 738, -124, 13593, 252, -1424, -165, 2786, -1717, -838, -11244, -10971, -902, -3330, -2580, -2735, -171, 4041, -2149, 2502, 6726, -738, -4235, 368, 6144, -1718, -8620, -1888, 112, -282, -19, 4126, 10797, 610, -3097, 7783, -2974, -2058, -3558, 470, -5914, 10322, -20, 85, -1652, 6111, -1398, 2613, 3733, -3716, 1930, -4325, -1199, -921, -446, 1095, 1006, 910, -2323, -351, 808, -32768, 274, 1346, 105, 2360, -1184, 2249, -970, 153, 3180, 1307, 2207, -962, 2209, -921, 1504, -117, -2111, -3734, 5738, 8014, 76, 1566, 3013, -462, -3600, 3939, 4862, 1038, 4312, -790, -426, -1656, 20, -10568, -6389, -6597, 4230, 2910, 2504, -2962, 256, 814, -488, 824, -355, 3574, -1890, -2657, -767, 2730, -1087, -2538, -3522, -4067, 6249, -3354, 13923, 4070, -11004, 4703, 909, -5968, -5483, -4242, -780, -2489 }, .cb1108m1 = { 752, -4098, 7726, 592, -9487, 2004, 318, -4322, 6989, -3350, -478, -4308, 2023, 753, -7081, -3934, -866, 6267, -5710, 2100, -8467, 100, -4654, -6773, 4271, 10728, 11618, 1128, 12733, 1471, -5518, -1162, -2159, -402, -632, -4720, -28, -1412, -1037, 897, -1242, -1735, -2632, -3460, 3389, -582, 206, 325, -2547, 46, 1340, -4424, -13408, -4918, -2832, 1454, 2127, 1276, 2292, -3973, -3230, -7810, 542, 4227, 2673, -8490, -902, 1361, -1398, -1986, -991, -680, 602, -2887, -557, 2656, 3214, 1794, 31241, 1462, -1457, -3750, -1923, -2381, 1313, -128, -172, -647, -574, 1045, 2438, 1662, 503, 288, 1535, -1016, 2487, -820, 4692, 2799, -31949, 166, -1655, -2192, -636, 1357, -2361, -459, -1752, 2782, -293, -144, 1900, 685, 1766, 1900, -347, -4488, 590, 915, 798, 1133, -4494, -1388, 75, 884, 13088, -2392, 679, -315, -7520, 1086, 3873, 3297, -812, -626, -9443, 2548, -6417, 1619, 7196, -57, 5, 3594, -1922, 184, 2784, -261, -3310, 2779, 174, 2814, -965, -2912, -1835, 425, -4285, 896, 2001, 3717, 775, -1192, 22365, -175, 1522, -711, -1135, 5123, -517, 870, 4323, 585, -437, 260, -1737, -1984, 2522, -2539, -973, -8812, -16173, 4678, -4107, 130, -7832, 1140, 2792, 3394, -692, -4105, -299, 1488, 1246, 604, 2796, -3767, 579, 188, -1544, 86, 424, 1204, 4441, -1000, 15227, 3459, -3444, -1631, -2177, 3497, 1684, 925, 2872, -3905, 5729, 647, 913, -758, -547, 566, 1787, 792, -1509, -1641, -926, -1515, -116, 1266, 481, -3944, 28526, -2279, 5577, 1026, 4082, -605, 696, 1094, -478, 5732, 7247, 1461, 1521, -234, -42, -878, 270, -554, 3702, -71, 1362, 7719, 305, -13654, -4985, -1072, -2044, 6851, 438, -8435, 923, -537, 1511, -1003, 2056, -2299, -15578, 503, 1944, 3188, 2318, 1761, 1290, -2322, -568, -1591, -2746, -1966, -9784, 1514, -5596, 4070, -181, -3006, -1903, -240, -1143, 393, -1530, -822, 520, 989, -1600, -3374, 946, 678, 86, -1957, 1947, 1188, 356, 719, -2874, -2245, -19010, 547, 9067, 439, -2384, 847, -3307, -116, -1114, -445, -3505, -967, -1252, 4880, 625, 1478, -2970, -2275, 1337, 422, 3870, -1906, -1033, 1724, -532, 1734, 1011, -21848, -477, -251, -615, 770, 7520, 1030, -4372, -446, -3156, -2314, 172, 901, 70, 1837, 1205, -1344, 2933, 1080, -1290, 1353, 10205, 1158, 11135, 560, -3480, -2376, 7539, -5418, -14092, 2138, -253, -9344, -1907, 2177, 687, 2772, -2730, -546, -4180, 2021, -577, 2530, -3822, -7080, 971, 2083, -1220, 203, 3187, 3705, -752, -2591, -704, -17469, -1168, -214, 2518, 308, -585, 1117, -1893, 2488, 1856, -23, 2418, -2922, 1960, 235, -1629, -8277, 1088, 2032, 874, 2763, -1867, 60, 1684, 834, -2676, 1574, -3098, 3250, -3723, -126, 59, -787, 2710, 930, 1384, 475, -3915, -1162, 1640, -16818, 2356, -70, 761, 4151, -778, 523, -183, 19374, -4223, -1379, -1667, -1690, -512, 8742, -34, 3816, -678, 2749, 2418, -341, -1216, 4280, -2208, -264, -2884, 4679, -821, 1824, -6724, -1528, -12042, -9908, 935, 4338, -116, 612, 6, -161, 1935, 1600, -442, 4059, 2510, 2186, -7678, 3600, -2460, -1072, -122, -1817, -246, 2786, 9079, 525, -226, 2628, -2549, 1459, 4533, 1111, -17410, 4529, -2545, -3272, 403, -2758, -1876, 2734, 2136, -6171, -2055, 1163, -2820, 2992, 2978, 1458, 1572, 2508, 13576, -1545, 14861, -796, -6444, 4022, -4358, -529, 3439, -2630, -2457, 3030, -2972, -398, 471, 2547, 1127, 1344, 202, 420, -1858, -589, 594, 1478, 5590, 1682, -1560, -378, -2198, 400, 2231, 566, -80, -2042, -4557, -2309, 8743, -4258, 1291, 11770, 718, 2342, 2912, 5170, 2470, 6832, 833, 4990, 2009, -1258, -898, -1414, 1214, 670, -2104, -5068, 788, -18997, -743, -864, -356, 1592, -5786, 652, 4952, -2319, -1097, 2177, -1654, 2879, -1645, -172, -1581, -3062, -805, -1065, -2222, 20857, -1146, 864, 1690, -1794, 855, 307, 2320, 3618, 6184, -4129, 187, -2423, 4946, -3072, -213, -2621, -2026, -5793, -986, -1597, 2125, 1474, 1766, 360, -4652, -1030, 1546, -1085, -253, 1016, -96, -1608, -7017, -4855, 1295, -271, 3751, 341, 19804, -2006, 2322, -2298, 353, -2077, -764, 212, 150, -1140, 564, -614, 268, -2023, -332, -699, -937, 1684, -1617, -22863, 1202, -144, 62, 373, -598, 184, 987, 3721, -611, 86, 3676, 362, -652, -214, -311, -694, -1973, 2351, -733, -1601, -1189, 28227, -154, 10, -347, 3400, 1333, -1695, -773, 1362, -447, -2999, -626, -1776, 2474, 2195, -1041, -797, 1828, 62, 3397, -1779, -2924, 1740, -1694, 4083, 15100, 3871, -7821, -108, 292, 998, 3141, 5813, -918, -1290, -902, 895, -1336, -50, 2014, -2066, 2383, 68, 31769, -334, 1243, 1981, -715, 125, -380, -1272, 1068, -357, -1734, -1138, -630, 1042, 688, -438, -558, -2460, -2894, 4196, -1004, -2177, -2291, -4701, -13990, 747, -5558, -2754, 1950, -2780, 8414, -1286, -946, 220, -2507, -192, 3726, -1361, 1296, -2215, 872, 8270, -2797, -6732, 1256, -1957, -2916, 107, -14847, 1868, 4638, 1292, -1006, 5285, 2947, -5028, 942, 153, 420, -1152, -391, 3612, 4621, 172, 762, -876, -3561, -14406, -552, -2570, -4448, -15704, -806, -928, 3380, -686, -2604, -3895, -714, -626, -1763, 1144, 485, 34, -1922, 1528, -213, 5050, -804, 185, 96, 3320, -621, -329, -1444, 864, -1684, 16583, 1872, 3327, 2146, 1132, -8216, 73, 6524, 1623, -4147, -4985, 1450, -646, -7189, 4524, -1596, 2120, 3913, 680, 2094, 1660, 752, -1221, 2414, 3986, -10314, 2096, 129, -5458, 634, -5426, -594, -9731, 2083, -2284, -5085, -4777, -1323, -1740, 6157, -841, -126, 247, -1163, -7005, 3863, -764, -1552, 1356, 10788, -745, -12481, -73, 5234, -3220, 2979, 635, 3372, -540, -36, 2887, 5221, 931, -1724, -4824, 780, -49, 120, -739, 890, 714, -1438, -458, -1861, -16732, -1858, -13282, 2182, -6796, -3307, 556, -2968, 542, -2358, 1463, -3536, 1866, 2833, -1369, -1576, -2825, 3561, -1625, 1858, -1052, -1079, 1302, -2049, 19052, -1188, -4137, 1592, -4705, 1082, -1168, 2355, 649, -1900, -2582, 1000, -3065, -2399, 3625, 1062, 860, 2586, -2645, 14755, 3147, 5002, -6720, 1728, -2114, 5090, -2838, 3020, -5048, 4182, 2237, 706, -4945, -86, -1908, -1207, 135, 675, -200, -22134, 1492, 2490, -1324, -1135, -842, 1457, 185, 1342, 3516, -882, 1069, 1159, -52, 1844, -1186, 554, 3860, 1824, -2136, -881, -1281, -13259, -705, -90, 2150, 573, 2787, 1068, -1968, 121, 805, 4382, -1033, -9220, -744, -1446, 7180, 257, -5983, -1643, -6198, 1854, -3524, 1060, -118, 56, -843, 2832, -98, -3493, 368, 6, -1877, -3615, -1954, 17971, 962, 1532, -1754, 3776, 661, -2025, -60, -1013, -1222, -3062, -69, -4933, 3064, -1176, 213, 477, 1081, 1679, -2328, 1984, -21759, -881, -54, -1101, -1092, 598, 1648, -3384, -213, 379, -1318, -1972, 630, -536, -1970, -461, -356, -22416, -1855, -113, 876, -2809, -587, -2323, -56, 2177, -797, 1649, -4069, 1350, -2075, 101, -1384, 1703, 1085, 471, 8093, 1020, -4112, 970, 866, -1456, -341, 1418, -12938, 379, 9787, 1814, 2337, -1705, 9913, 1026, 1962, -744, -2900, -1690, 1534, -959, -629, 2330, 3735, 4742, -3139, -2135, 2298, -2765, -1389, -3634, 27139, 671, 2208, 494, 1015, -1197, -239, -321, -1145, -679, -637, -3116, 544, -952, 882, 396, 1087, -3163, -2684, 759, -725, -2186, -542, 2545, 3669, 24, 1689, 10473, 1836, -419, 322, 2475, 1908, -1346, 50, -6401, -3644, 552, 2348, 1327, 11853, 2467, 5493, 1544, 464, 1796, -2801, 8217, 1014, -2103, 3764, 8091, 170, -12422, 1708, -2438, -1873, 1970, 2160, -5027, -647, -118, 2830, 2379, -1091, -5723, 124, 3017, 417, 55, 1376, -1079, 7122, 3086, 17847, 2468, 3273, -599, 3302, -922, -2073, -1696, 805, 2022, -1899, 3188, 1425, -4364, -140, -3760, 437, 1393, -1298, 17166, -1283, -2904, -692, 518, -404, 944, -1990, -968, 1323, 2376, -11708, 2187, 3164, -559, 2212, 1598, -1741, 360, 633, 3075, -660, -1012, 778, 565, -2020, -123, 5, -2217, -2967, 374, 272, 336, -1725, -408, -2270, -2645, -1044, -517, 1911, -386, -4439, -7603, -1000, 7660, 589, 14931, 2901, 11998, -13102, -1919, 3904, 86, 1617, 7324, 3078, 1714, 4636, -2504, -194, -3274, -710, 33, -1965, -2298, 2513, 726, 75, 67, 884, 2104, 4110, 1936, 10387, 2722, -1970, -12496, 4799, 3086, -2938, 1719, -2138, -338, -1124, 971, -4200, 480, -3361, 6220, 5954, 1830, 1001, 2996, 4166, -2854, -437, -1430, 1072, -312, -12949, 3113, -2479, -2034, 6956, 2805, 2128, 856, -8803, -4709, -1274, -120, 1252, 3898, 6526, -3914, -2276, 2754, -2604, -3038, 4136, 2598, -2172, 4861, -2457, 2, -2693, -808, 3527, -1184, 392, -2202, 2406, 960, -1064, -2589, 1161, 2418, 728, -466, -4865, 211, 14720, -2093, -1977, 85, -12618, -2073, -3028, -1067, 1734, -2491, 9506, -422, -2718, -2966, 3883, -2852, 336, 1306, -2297, 2009, 2589, 3071, 192, -1239, -10553, 2, -1174, -3036, 9939, -27, -1278, 1448, 18655, 761, 931, 445, -94, 206, 448, -1865, 232, -4353, 4596, -260, -976, 594, 648, 796, -1376, -1186, 3056, 3171, -5675, 6179, -1287, 16934, -1478, 1090, 577, 8075, 1119, 2943, -3208, 1852, 1986, 6003, 901, -962, -3196, -1907, 392, -2605, 2796, 4082, -456, -3109, -1219, 123, 2470, 174, -1254, -1350, -4919, 1271, 12302, -1154, -6317, -3346, -1315, -144, 1214, -49, 3491, -1029, -2043, -8373, 4197, 4971, 9808, 9732, 700, 2247, -2755, -2034, 3260, 839, -10554, 1661, 11484, -3180, -1909, 1089, -813, 3116, -2103, -3726, -4514, 663, 1152, 3902, 4862, 2739, -3828, 707, 2712, -8009, -832, -16492, -1472, -2422, -5593, 322, -1894, 2810, 109, -1788, 2050, 3539, -3112, -6178, 2487, 2102, -135, 3163, 2096, 4123, -310, -1090, -2, -2662, -17087, 1373, 1448, 162, 527, 655, -2248, -3530, 194, 1305, 7590, -5515, 1225, 1607, -3816, 2185, -2679, -4486, -582, 4981, -1675, 147, 14790, 119, 11771, -1228, 1012, -6133, -2247, -3913, 1348, -1846, -513, -6386, -749, 6726, 745, -809, -799, 3224, 43, -2230, 2598, 2994, -1590, -11198, -14476, -256, 695, 877, -3680, -2734, -1448, 1336, -1633, 3327, 3497, 2956, -782, 2958, -1866, 2876, 2003, -856, 1282, 5068, 391, -10539, 1703 }, .cb1110l0 = { -14944, -14950, -73, -1141, 1532, -575, -620, -816, 1185, -1597, -2651, 1426, -1458, 1317, -1320, -19, -209, -352, -163, 912, -85, -180, -546, -1121, -435, -345, 229, 364, -850, 632, -426, -359, -32768, 278, -1021, 310, -31, -355, -442, -234, 415, -202, -10393, 1645, -378, -2270, 837, -1857, 556, -935, -1344, 3016, 3452, 1597, 1378, 466, -13740, -878, 1475, 237, -1301, 9756, -592, 23, -192, 335, -58, 285, 376, 40, 24, 292, 426, -1962, -798, 745, 1379, -34, 397, -14748, -6285, 7343, -6374, 4442, -14800, 1878, -24, 1606, -728, -476, 1754, -1052, 911, 3139, -1444, -222, -1968, 1858, 1330, 244, 213, 935, -92, -348, 155, 418, 29128, 236, -190, -226, -309, -178, -690, 46, 716, -534, 147, -630, -75, -826, 37, 4745, -1056, 2400, 1398, 1494, 460, -221, 2908, -656, -15611, -2940, 2342, -98, 581, -3144, -471, 3772, 2057, 1583, 13738, -139, 330, 1175, 429, 63, -14544, -374, 1439, -1226, -422, -690, 816, 1279, -592, 1642, 700, 1338, 0, -714, 46, 377, -188, -366, -197, -637, -622, -262, -69, -637, -1266, 257, 620, -1040, 324, -19064, -602, -463, -1329, 513, 2699, -421, -1918, 2250, -404, 403, -1514, 134, 147, 3, 426, 605, 276, 561, -26, -294, 630, -500, -480, -133, -712, -1144, 238, -633, 173, -29164, -1182, -274, -138, -271, -232, 30, 706, -168, -848, 704, -2132, -248, -108, 669, 1165, 234, 1243, -12201, 2208, -1971, -829, 10305, -3964, -1502, -409, -3918, 4520, -2259, -797, 2235, -5560, -1710, -2472, 280, -1747, -980, -4529, -5208, -1813, 330, 890, -6220, -710, -5583, -4704, -913, 2920, -12484, -4340, 334, -1303, 283, -740, -1261, 3556, 3210, -11640, -14438, -2557, -795, 747, 546, -2488, 1891, 485, 725, 338, 1579, 2092, 2354, 284, 2812, 490, 1442, 187, -2699, 1196, -1783, 1228, 2364, 13364, 258, 2102, -6163, -200, -5475, 2804, -576, 6878, -2852, 2246, 1186, 584, -136, 5258, 3825, 3045, -1661, -5246, 2548, -5054, -4383, -1542, 12912, -1580, 1268, -1415, -2012, 1021, -2106, 979, 2390, 3411, -1076, -439, 5416, 1333, 440, 3422, -13384, 2540, 2544, -3668, -2308, 1042, 589, 4166, 5090, 1539, -3447, 7003, -4396, 319, -590, 481, -471, 22260, -1936, -297, 1302, 1163, 937, -164, 847, 768, 827, -430, 792, 472, -1557, 712, -602, -1007, -278, -974, -3198, 10560, -2124, 335, -1206, 629, -13712, 12, -1673, -691, -666, -2890, 826, 1792, -1547, -2016, 807, 1810, 841, -814, 1214, 760, -1056, 404, -94, 144, 297, -584, 106, 116, -132, 236, -507, 86, 853, -670, 413, 32767, 730, 10835, -502, 1297, -3857, -1035, -1602, -164, -1721, 1468, 507, 1064, 1478, 4323, -760, -882, -4331, 2564, -10933, 3000, 2101, -2492, -72, 12636, 2743, -1113, -8334, 6720, 2348, 491, -23, -1065, 1506, 2090, -1731, -1997, 675, 425, 8165, 695, 2285, -433, 515, -465, -347, -1006, 357, -55, 57, 481, -31494, -816, 60, 76, -439, -328, -217, 265, 123, 839, 218, 1355, 243, -878, -12819, 5168, 318, 1376, -2931, 12689, -83, -220, 2848, -770, 150, 1631, 1955, 1552, -1371, -3053, 1752, -7250, -24, -514, -5568, -1529, -112, 419, -1136, -672, -1847, -1136, 90, 453, 4810, 13012, -2355, -2477, 1393, 451, 3390, 12, -2228, 1840, -2543, -2404, -2969, 186, -444, 204, -265, -11467, 2204, 1821, 3591, 67, 8821, 4015, -183, -5902, -1468, 11394, 3062, -128, -476, 2495, -2888, 13482, 686, -1320, 371, -884, 1829, -1810, 337, -1124, -1442, 432, 1950, -1203, 663, -10445, 2310, 766, 137, 4418, 2821, 135, 116, -12164, -3592, 686, 2310, 1229, 1930, -1756, -1309, 1439, -3741, -305, 1547, -9940, 3198, 1333, 2403, -2847, -3892, -259, -1766, 881, 14310, -1711, -840, 2259, 3027, -1527, 1156, 2904, -75, -728, 1536, -127, 152, -3240, -726, -11914, 1037, -851, -1893, -748, -3294, -1114, 6072, 103, -1539, 4573, -1637, 5242, 2705, -9890, 254, -1565, -407, 1818, -23004, 1110, 119, 256, -707, -451, -679, 374, -935, -669, 403, -10, -594, -525, 1403, -1016, -553, 595, -169, 2523, -82, 947, 11572, -1166, 11668, -4962, 842, -860, 89, -3308, -640, 558, -851, 622, -1002, -4933, 2762, 1991, -121, 1401, -111, -49, 868, 135, -1392, -279, -560, 412, -241, 1414, -802, -1256, -298, 447, 17738, -320, -1150, 1650, -398, 5626, 6076, -8919, 455, 12716, -2094, 157, 1361, -1515, 1494, -6210, -553, -1785, -424, -3049, -4066, -1188, -732, 1992, -1926, 1495, 1085, -22434, 1187, 391, -1512, 747, -313, -502, 1331, 456, -323, 246, -581, 56, 1448, 2071, 535, 782, 520, -136, -290, -12350, -11858, -456, 2340, -310, 22, 2210, -2531, -392, -898, 3919, 1354, -332, -4255, 169, 425, -476, 2577, -1172, 1984, 266, 514, -516, 2481, 81, 2103, -710, 273, 1405, -14811, 5858, 3621, -982, 345, 2044, 158, -2050, -602, 954, 342, 239, 157, -317, -35, -260, 307, -31972, 228, -77, 225, -154, 643, -883, -518, 32, 372, 208, -22488, -458, 530, 104, 254, -775, -1264, -571, 900, -263, -323, -296, 962, 520, 548, -2196, 42, 1408, -211, -16117, 2052, 12656, -822, 507, 321, -772, -786, -144, -3539, 892, -3430, 19, -1831, 1161, 1836, 988, -1134, -704, -2994, 692, 765, 457, 1624, 502, 13, 364, 337, 32108, 1517, -225, 189, 141, 985, -572, 262, -146, 31, 236, 269, -278, -1686, -13968, 1247, -1009, 1046, 13467, 1276, -268, 307, -1383, 1544, 136, 949, 70, 446, 1391, -2188, 745, -374, -14231, -712, -15202, -533, -108, -2244, -1232, 450, -895, 1086, -782, -1082, -718, -660, 796, -2095, 2722, -468, -1717, 147, -23566, 377, -220, -1731, -1416, 486, -241, 266, -802, -322, 1066, -544, -167, 520, -1297, -100, 622, 670, -188, 711, 32, 1155, 628, 350, -112, -154, -1048, -44, 36, -454, 304, 32767, 356, 462, -1194, 549, 138, 0, 1044, -119, 195, 1098, 521, 3294, -3776, -224, 4297, -1256, -303, 2107, 300, -13283, 2933, -3194, -1408, -4152, 4195, 287, -932, 1247, 13453, 277, 418, -598, 87, 1132, -80, -405, -13400, 656, -1310, -1447, -3974, 1719, 313, 500, 1078, -114, 1449, -293, -120, -4754, 5583, 235, -5140, -865, -484, 15572, 336, -1854, -154, -454, -1475, -726, -3718, -4048, 1575, 480, 1094, -2209, -3202, 420, -564, -48, 964, -2667, 2172, -1666, 112, -730, 203, 3618, -15857, -4853, 48, -1084, 1512, -937, 3353, -453, 223, 2267, 139, 190, 1959, -720, 4389, 681, 10383, -112, 12390, -882, 1695, 3539, -169, 3131, -122, 3627, 252, 185, -523, 112, -219, 214, -182, -102, 118, 230, -60, -801, -25, 42, -279, 262, -32358, 344, -542, 382, -223, -404, 1201, -2646, -163, -803, 3041, -1009, 3818, 756, 5834, 14249, -1828, 139, -218, -658, -1314, -4980, -3322, -1461, -1598, -91, 2464, -954, -5203, -791, 1339, -13598, 594, 702, -388, -1115, -2377, -370, -3658, -3322, 1871, 2513, 2910, 4095, -2195, 4291, 886, -567, 1182, -302, -672, -21, -268, -29244, -199, -1024, -1284, 485, 1432, -1086, 119, 1030, 418, -643, -1165, 1847, -30, -844, -909, -416, -604, -609, -289, -391, -238, -94, -391, -810, 413, 356, 954, -1935, 30996, 441, 138, 1381, 1130, -2313, 558, -203, -248, -951, 408, 1815, 256, -429, -892, -695, 1138, 439, -760, -63, 6498, 570, 15252, -3397, 170, 935, 338, 1, -528, 524, -541, -281, -3, 499, -333, 685, 436, 32176, 389, -153, 572, 256, 53, 16, -902, 724, 2849, 2503, 80, 667, -1867, 742, 15205, -8715, -2588, -476, -450, -733, -891, 1178, -1751, -1630, -114, 144, -138, 10145, -188, -1608, -131, -247, -544, 9774, -610, -2868, -3472, 345, -9294, 3724, 2634, -5124, -392, 2551, -649, 782, -18, -160, -351, 12074, 13865, -1294, 1262, -3135, -2861, 18, 753, 167, 620, -2432, 1998, 740, 1902, 400, -206, 3518, -3563, -632, 72, -1810, 1520, -827, -572, 1604, -613, 3704, -736, 11100, 12702, -3189, -792, -3552, 1621, 1841, 1236, 1215, -457, 9542, 9278, 2633, -8801, 862, 1741, -4840, -2620, 616, 324, 2152, 3632, 880, -472, 1927, -3456, -2105, -965, 3426, -1893, 3095, -1152, -3542, 182, 998, -386, 1202, 481, -1951, -510, -931, 1688, 151, -13664, -3894, -973, -906, 1524, 9576, 2607, 12497, -819, -5214, 5936, -634, -610, -4148, -421, -486, -1864, -306, 2421, 724, -219, -1304, -2106, -504, 6762, 5266 }, .cb1110l1 = { -2972, -1201, -1388, -1762, 340, 21127, -999, 126, 111, -1224, -1738, 311, -712, -450, -114, -648, -752, -172, 67, 375, -967, -1032, -10763, -1885, -2223, -3258, 480, -228, -143, -1299, 13128, -3062, 1418, 6, -649, -1816, -288, 767, 345, 876, -491, 948, 540, -167, 1969, -1883, -455, 20584, -656, 114, 308, 279, 1105, -594, 1332, 255, -356, -186, -540, 1898, -873, -477, 1404, 30475, 370, -322, -337, -206, -440, -894, -54, -466, -640, -408, -256, -560, -1503, 626, -573, -1684, 419, 407, 2076, 5022, 3143, -1135, -12118, -12082, -1462, -2060, -5432, -1092, 1575, 1958, -968, 122, 958, -5312, 677, -1952, -12276, -1594, 1211, -1094, 1992, -11032, -2993, -834, -1297, -1139, 312, -1546, -4253, 1191, 21, 2771, 639, -2514, 6623, 746, 1830, 2967, 1688, -14893, 7988, 4099, -97, 1165, -2350, 65, -1308, 1834, -2084, 1683, 5118, -1633, -10, -5282, 403, -1489, -264, 398, -2420, 12854, -1498, -2642, -1486, 826, 699, -2213, -2296, 11849, 478, -2202, -561, -250, 199, -2433, -948, -402, 433, 403, 13031, -124, -180, 1499, -643, 527, 11368, 5833, 938, 3202, -452, 2875, -1163, -117, -2047, -1068, 211, 3122, -236, 13548, -702, 352, -312, -1901, -2145, 2334, -12100, -76, -419, 362, 3501, -220, -3086, 572, 1537, 3240, -1489, -1012, 640, -513, 930, 390, 31019, 724, -78, -706, 183, -157, -122, -847, -1156, 301, 508, -456, 321, 317, 1300, -512, -1743, 10190, -294, -116, 4183, 1374, 13360, -1339, 1832, 2547, -702, -2782, -1464, 1176, -1287, 2256, 2169, 836, 2096, -248, 1777, 11306, -211, 265, -3834, 336, 1936, -586, 633, 1037, -1915, 12862, 930, -273, 2333, -3239, 429, 374, 2518, -671, 570, -2208, 385, -284, -15613, -1752, 1341, -531, -744, -1111, 290, -2302, -1012, -2933, -366, -30, -4595, 1400, 560, 48, 15739, -945, 411, 1876, 2441, -2144, -1222, 12448, 54, -726, -2743, 2548, 2100, 1307, 408, -198, -1802, -63, -1919, 933, -329, -528, -15918, 1704, 3028, 217, 606, -2804, 2052, 9320, 592, 969, 6836, 647, -671, 584, -1, 3564, -2575, 436, -2195, 414, -201, 1099, -772, -220, -578, -467, 125, -934, 271, -21476, 288, 215, 216, 476, -560, 768, 1142, -169, -1112, -14096, -14436, 2769, -1464, -61, 1373, -3539, -1067, 1175, -1549, -861, -332, -1876, 3159, 340, 1711, -2453, 457, 2536, 1114, -2278, 2464, -3253, -466, 12291, 12484, -2868, -800, 1142, -4244, -178, 3781, 1542, -663, 1976, 3105, 145, -100, -1774, -1039, 1627, 15540, 4194, 5392, 741, 1816, -544, -9100, 4255, -1083, -1266, 2580, -4200, 1934, 1721, 129, 2276, -2704, -1341, -1310, -11926, -1478, 199, 755, 619, 4231, -478, -1627, -1242, 1842, 13170, -2416, 778, 192, 273, 782, 774, 2188, -838, 3139, -1532, -1639, -1073, -596, 770, -353, -53, 82, -322, -20584, -344, -443, 158, -144, -554, 50, 954, -145, -336, -2050, 596, -950, -2690, 13908, -13783, 4792, 879, 584, -2987, 967, 192, -585, -783, -1341, -3108, -1622, 2478, -1362, -1470, -1556, -430, -110, -736, -8097, 2073, 964, -417, 1669, -5425, -7846, 536, 12883, -1690, 1143, -242, -438, -2274, 57, 302, -574, 637, 2816, -1642, 2166, -172, 893, 421, -614, -565, -338, -526, -1085, -939, -1138, -991, 1919, 1720, -18845, -1950, -342, 1930, 321, 184, -956, -374, -462, -216, -6, 26, 386, -50, 603, -720, 634, -252, 261, -860, 218, 22846, 11544, -459, -946, 452, -102, -1203, -1802, -1105, -310, 787, -220, -1113, -2043, 650, 13767, -3638, -296, -902, -413, 252, -816, -172, -505, -1335, 890, 768, -523, 808, -331, 20000, -264, 1763, 133, -1, -464, 949, -954, -147, 1780, -190, 30, -7422, -4615, -1006, -470, -742, 500, 7509, 1500, 1550, -3614, 810, 2595, 1506, -12926, 3588, 402, -2547, 1505, 65, 4, 3382, -2201, -2441, -1521, -5450, -3820, 282, 5212, 1186, -1056, -2334, 988, 12987, 390, 4141, -2680, 1663, -8034, -1792, -225, -674, -7147, 13254, 1631, 10163, -3332, -7, -675, -735, 772, -2299, -326, 1641, -1174, -1911, 82, 776, 891, -445, 18590, 238, 1417, -2372, -9718, -2682, 600, -1401, 604, -1791, -22, 1546, -1764, 525, -1355, 348, 3260, 1115, 204, 524, 225, -12776, -679, -15595, -1188, 1078, 82, -859, 28, 819, -1220, 563, 2309, 331, -1158, -2010, -264, -383, 1732, -424, -2742, -775, -329, 132, 391, 1261, 1033, -9812, -11829, 2433, 2690, 606, -2724, 7216, -296, -1834, -1694, 456, -4732, -400, -3192, 1428, -316, -13674, -2702, 2320, -6548, -2025, 1222, 1749, 4005, 2924, -3539, -5104, -2333, -1438, 2598, 62, -757, 760, 343, 154, -31947, -534, 1296, 697, 88, 345, -577, -500, -174, -326, -198, 272, 157, -815, -636, -1163, -867, -273, 1054, 774, 1624, 989, 107, -1088, -673, 2143, -22962, -566, 151, 72, -27, 1034, -444, 501, 1905, -1455, 21, 289, -10670, -789, -2421, -2686, -327, 804, -3009, 907, 960, 1379, -43, -552, 2203, -1406, -911, -11094, -529, 4458, -4152, -70, 3162, -12546, 326, 874, 1426, 3019, 2315, 104, -12516, -1591, -2877, 772, 1982, 1160, -4491, 3417, -1524, -2139, 130, 930, 9359, -18308, -376, 4090, -468, 156, -216, 60, -643, -3440, 256, -835, -2389, 1660, -542, -1628, 4270, 3574, -3136, 433, 1069, 30024, 561, 268, 790, 294, 207, -1552, -736, -97, -215, -98, 690, 686, -202, -736, -453, 655, 511, -156, 1006, 361, 1424, -1254, -361, -1253, -1419, -290, 78, 555, 565, -488, -923, -18193, -630, -908, 188, 925, -1684, 241, -319, -14478, 17007, -1415, 274, 592, 1344, 1784, -731, 344, 992, 141, 290, 481, 628, 623, -1166, -2092, 140, -1056, 13736, 754, 1980, -238, 2132, -1372, -2216, -12057, -1662, 66, 1742, 2209, -962, -1574, -3044, 173, -3066, 183, -4476, -1016, 6160, 780, -1193, -3334, 179, -371, 244, 160, -686, 669, 330, 426, 65, 159, -664, -186, 479, -742, 54, 605, 32603, -941, 370, -91, 856, 825, 1042, 374, 651, 313, 734, -240, -49, -685, -1994, -604, -875, 44, -884, 886, 13012, -1506, -4317, -1926, 3050, -1027, -482, -40, 137, -2560, 1366, -11812, 2112, 2266, -2690, -1339, -700, -243, 2322, -1042, 4635, -3210, 4281, 47, 670, 9218, 1165, 814, -62, -2276, 12987, -714, 2481, 1355, 896, 2840, -1664, 2048, -345, 2285, 1754, -669, 2284, -288, -575, 944, -1528, 44, 1071, -706, -543, -1347, 880, 257, 1364, 1444, -17896, 99, 1539, 1813, -611, 355, -2290, 980, -787, 132, 300, 2353, 204, -798, -296, -594, 895, 842, 18755, 1129, 79, -189, 515, 882, -286, 109, 305, 374, 1323, 861, -18, -78, 294, -320, 674, 504, -159, -549, -95, -32403, -90, 658, 1082, 1611, -137, -74, 1160, -794, -55, 822, 2627, 1203, -3540, 9829, -7860, -9063, -4015, -894, -2218, 729, -879, -1869, -2446, 4050, -488, 13211, -290, -820, 371, 14196, 866, -891, 218, -1838, 2162, 1144, -186, 512, 1416, 546, 3298, -1253, 128, 1202, 557, -1967, 680, 545, -139, -3008, 18453, -3322, -137, 163, 1377, 1116, 2572, -1577, -1846, 651, -1319, 796, -862, 331, 4383, 2453, -1894, 3264, 14137, 842, -3087, 3740, -1100, -2400, -1364, 2406, 417, -2393, -868, -3158, -9712, 3480, -1403, 1896, 201, 1285, -593, -11718, 99, -539, -186, 45, -2266, -12228, -2658, 2802, -1198, 1022, -3840, 1401, -1918, 1655, 1725, 96, -205, -913, 1629, 568, -1285, 1264, -1160, 594, 223, -336, -1436, -472, -19792, 553, 1494, -195, 570, 282, -653, -54, -1115, 153, -484, 141, -188, -278, -173, 464, 13, -634, -42, 390, -464, -246, 622, 1229, -692, 29175, -574, 1150, -135, 2685, 2452, 63, -962, -918, -1657, -1978, -172, -677, -3414, 1345, -3964, 2875, -1412, -654, -3000, 10739, 11348, -2232, 516, 8303, -189, 2564, -150, -373, 903, -275, 2394, -1135, 508, 424, -1704, -2222, -3789, 1938, 216, -12702, 2488, -1364, -2175, 1114, -819, -2756, 1564, 952, 36, 609, -933, -1568, 110, 143, -1575, -4236, 528, 15042, -1920, 348, -2623, 5217, 1911, -1088, 259, -590, 364, 2081, -3585, 662, 249, -119, -111, 778, 2167, 11, 2500, 7182, 14452, 4388, 4121, 3623, 1598, 532, -507, 877, 3830, 372, -2184, -2810, 11748, -2095, -1079, -3070, -768, 2901, -3587, -2572, 10008, 563, -4588, 1026, 1117, 1879, -12004, -416, 317, 2032, 1800, 1058, -84, -296, -1748, 2588, -11019, -1627, -3264, 2480, 96, 2146, -2672, 2418 }, .cb1110s0 = { -32746, 360, -2774, -672, -1808, -14, -1037, -1327, 1409, -2215, 172, 1557, 945, 2031, -702, 1844, -1106, 472, 2603, -978, 2782, -5691, 1473, -5668, 7129, 6600, -2160, 108, -1844, 2062, -2395, -740, 1690, -45, -725, 77, 7236, -12903, -3356, -764, 1870, 720, -2201, 790, 9950, -3694, -5340, -4031, 4115, 6863, 2352, 1484, 3606, -4855, 714, 4104, 6240, 7261, -6855, 4919, -2847, 6701, 7469, -616, -11442, -1935, 9157, -4072, 133, -5976, 2455, -9360, -2898, -4353, -7721, -3098, -3505, 2568, -5432, -576, -10072, 250, 2173, -4196, -4322, 2688, 5220, -6026, -346, 11678, 2071, -7344, -2182, -530, -180, -2568, 1524, -1617, -8825, -4845, 2794, -2813, -2669, -2423, -2709, -8985, 2105, -4629, 708, 2040, -5680, -2470, -7277, 6841, 6523, 4196, -6788, -1982, 3844, -5000, 156, 1930, 1780, -3824, -286, 3908, 1703, 7304, 1145, 144, 1180, 7145, 3175, -13823, 6580, -3066, -6321, -9739, 4432, -1145, 2923, -2636, 3838, -7037, -3913, 1262, -1398, 363, -141, -886, -5667, -212, -2118, -2717, 2724, -18802, -2098, -155, -1399, 782, 797, 766, 2613, 5374, -3767, -1711, 624, 693, 2544, -6153, 7179, 6835, -762, 5061, 655, 2600, 9208, -7030, 7047, 1654, -3404, 176, -5486, 1374, -15378, -487, 7456, -1954, 2404, -2994, -1608, 2362, -498, -7952, -6143, -3996, 1596, -3013, 1181, -1534, -5265, 220, -2677, 1047, -4629, -15066, 3966, -446, -11713, -5694, -393, -250, -1336, -7394, 1508, 6239, 3788, 6273, 6215, 822, 2657, 8057, 8391, -658, -2561, -11587, -2589, -6702, -9227, -1016, -2220, -9702, 5988, 1859, -6100, -4594, 221, 2529, 2217, 8273, 1804, -6128, -2859, -8259, -4707, -2494, 1913, -352, -4561, -289, -1801, -994, -4445, -1001, 5422, 10868, -7366, 1679, -5195, -6859, 2982, -406, 2400, 4520, -3611, -1892, 4900, -3504, 771, 2774, -772, -1929, -7354, 375, 628, 4522, 1069, -969, 8083, -155, 3178, -1138, 1752, -17288, 4390, -2483, -2071, -1353, -1155, -456, -2683, 6798, -1908, 1797, -6657, -2770, 5610, -14518, 5922, -3964, -938, -853, 1416, -1077, -4562, -160, 5820, -3031, 5091, 1987, -2746, -3779, 238, -264, -3074, -11718, 9370, 9806, -6302, 3979, -2938, 4034, 393, -1399, -4466, 2181, 756, 394, 2264, -3664, 78, 470, -3228, 3942, -1714, 708, 4988, 1938, -2722, 4555, -5054, -1026, 19312, 354, 107, -5357, -4364, 597, -2566, -2812, -2278, -446, 1384, -371, -2566, -388, -3964, -8989, 9136, 3389, 8440, -5570, -1262, -5874, 2056, -5973, -185, 4540, -4924, 154, -3653, -1113, -3048, 7099, -2734, 2940, -6704, 1543, -8120, 10134, -9485, -6645, 4816, -442, -32, -2430, 4932, -6129, -5050, 6120, -2147, -6910, -1342, 1075, -2458, 50, -4747, -3080, 1886, 1490, 18972, 48, 787, 2441, -405, 1668, -1399, 2202, 2175, -3592, 1548, -2728, -4864, 504, 383, 376, -1073, 2142, 504, -3114, 6378, -5516, 13462, 196, 1840, 7087, 792, -3583, 302, 1012, -5504, 270, 3354, -4486, -2312, -2522, -2872, -3899, -2261, 5211, 1417, -3075, -151, -985, -772, -1630, 164, 659, 1496, -349, -621, -32, -2982, -1720, -3475, -7370, -1541, 1122, 20474, 1726, 4474, -3228, 7024, 3265, 522, -2193, -2113, 5388, 1912, 5929, 11768, -1162, 2600, 4048, 652, 3360, -3215, 376, 10028, 6054, -3814, -1155, 93, 4512, -3581, -4037, 7484, -1481, 2797, 2635, -12275, -2780, -6235, 5739, 2687, 376, 5984, -2547, -8834, 4332, 2752, 1942, 1002, -3312, 5251, -86, -7794, 918, -2413, 3131, -3316, 2095, -4569, -15382, -5534, 1290, 5179, 2928, 3034, 2365, 270, -7476, -3024, 6910, 1355, -6262, -2040, 10490, 1432, 12284, 1125, -3160, 4518, 973, -2351, -1726, 1967, 1488, 382, 3559, -3742, -2908, -944, -1662, 682, 902, -4360, 5026, -4252, -1212, -3269, -6024, -3788, 9128, -2638, -1625, 315, 3087, -3265, -10441, -7207, -4078, -3266, -7543, -5223, 5460, 2496, -9258, -227, 4048, 860, -520, 13616, -3458, 3837, 809, -104, -4062, -4846, -136, -1631, 13977, -1136, 3380, 1099, -4022, 1831, 3360, -9034, -52, -516, 10144, 5074, 4866, 8282, -972, 2496, 2336, 8766, 2881, 2417, -5588, 3064, 3934, -4202, 627, -986, 1750, 958, -2348, 5006, -2597, -90, 133, 23271, 2431, -3984, 1894, -2094, -1816, 5007, -3164, 2526, -1862, 2651, 1809, 7173, 3410, 154, 14930, 3032, -5314, 44, 8868, -543, -2158, 5341, 258, -8188, 3772, 2804, 7544, 8339, -3560, -63, -735, 1300, -4308, -1085, -4986, 1564, -6744, -2605, -310, 1275, 1166, -640, 4814, 4373, 3103, -1242, 6049, -4786, 597, 182, 2371, 6950, -2265, 389, -14669, -1942, -2733, -485, -865, -597, -1376, 1626, -3956, -1244, 1532, 3918, -3311, 1574, -88, -20573, -5471, -71, -1731, 1436, 2428, 3982, -4576, -914, 5460, -4973, 1650, -2364, -2486, 3212, 5424, -2501, 4595, -937, 728, -5140, -9948, 1437, 10560, -5704, -264, -2752, 949, 5229, -1445, 430, 827, 4103, -1999, -4625, -4171, -8769, -8927, 7161, 4539, 6968, 5975, -4626, -2793, 10080, -10386, -2479, 1724, 2992, 354, 3650, 3328, 4490, -1931, 7348, 7283, -3304, 4446, -1698, -1224, -3002, 4340, 1041, 607, -454, -4261, -18071, -1199, -3902, 570, 5808, 5582, 6710, 235, -205, -4288, 3472, -686, -103, -3658, -436, -9680, -190, 275, -919, 2522, -2087, 9096, 5060, -6450, 10282, 3344, -8167, -7688, 11881, 3101, -1280, -9942, -11741, 2213, 712, 3976, -4218, -5285, 2797, 2996, 4006, 2053, 2344, 6200, 141, 2616, -3981, 6970, -4194, -1621, -13724, 7772, 2800, 2220, 445, -266, 4030, 444, -228, 2642, 1617, -2511, 1699, 8740, 3438, -2063, -2093, 1806, 950, -7112, -1513, -2886, -8789, 870, 3456, -4126, -3330, 541, -10173, -1789, 3156, 4466, -5965, 479, 5177, -2806, 2506, -1646, -3609, 1617, -7373, -3146, -2389, 3601, 7850, 89, -3373, 4670, -4180, -3186, 3056, -1691, 1314, 9234, -7799, 1323, -4360, -9866, -1930, 8091, -13452, 8503, 1980, 11247, 7688, -5953, -4165, -3192, 540, 1631, 131, 2250, 5330, -146, -8724, -3148, 2834, 1148, -3886, 374, -1836, -3898, 9649, 1119, 10221, 128, 8868, -7301, 2601, 1252, 2340, -3789, 4682, 181, 4434, -1740, 4368, 879, -620, 2046, 1842, 844, -925, -2506, -3344, -8820, -722, -451, 521, 903, -1286, -3059, -5308, -4759, -2706, -1429, 2762, 927, -1459, -7274, -12028, 8838, 3987, 2406, 8626, -3128, 6505, -4322, -197, -2464, 2738, -46, 161, 13919, 2252, 2059, 981, 204, 1161, 4910, 683, -4311, 2081, -1932, 1119, -6067, -5325, 8528, -4704, -5522, -6183, 5744, -3407, -2021, 2688, -3230, 2490, -976, -500, -7834, 2064, 3191, 4740, 3686, 1762, 2604, -2442, -5720, -7550, 457, -3478, -8097, -6510, -9105, 8031, -4895, 500, -2436, 1483, -4415, -2023, -3768, -2497, -1911, 789, 566, -969, -4204, 6128, -5076, 2664, -4222, 6755, 1774, 6881, 64, 1205, -9243, 4782, 4432, 5193, -2258, -4787, -7433, 1755, -794, 1297, -7535, 12773, 9124, 806, 2348, -8112, 7874, -4348, -1410, -350, -2528, 576, 661, 272, 4598, 691, 1913, -3349, -1881, -1854, -779, -821, 8444, 60, 2570, -1813, -1354, -4512, -5471, 4728, 3289, 2617, -9326, -6670, -859, -2713, -9839, 4676, -2657, 3106, -1393, 10278, -3069, -2253, 1015, 2246, -2227, 16, -388, 7962, 1493, -3122, -2707, 7982, -6106, -1462, -1665, -1302, 2347, 3640, -15122, -2211, 417, 6819, 959, -2876, -6868, 11060, -2329, -302, 1595, -4610, 9514, 12677, -4614, -2899, -141, -4857, 1447, 6400, -2894, 1696, -2888, 1889, 3489, 2775, -504, -6597, -5258, -7256, -379, -1249, -136, 3118, -3537, 3295, -3458, 2103, -399, 15281, -222, -1809, 172, 2257, 1947, 707, 3562, -5691, 3575, -2210, 5750, 815, 4059, -16, 1306, -13308, -1733, -1338, -3477, 5247, -1950, -5148, -678, 8074, 1740, 290, 2033, 4639, -4240, -536, -5214, -1366, 2491, 501, -59, -4480, 430, -285, -5947, -755, -14559, 5696, 6960, 4462, 2317, 6414, -13174, 4962, -899, 5924, 11100, 5303, -970, -2528, -6239, 2253, 2236, 553, 458, -2229, 8016, -7082, 2869, -4209, -4460, -6536, 3557, -1766, 7815, -655, -6029, -5250, -1627, 2646, -3466, -3584, 901, 10305, -895, -427, 949, -2776, 3436, 769, -4131, 9019, -4898, -3562, -7978, -359, 1358, -1528, -3095, 5840, -6214, 2591, -2086, 9480, 640, 2858, 216, -3625, 5740, -7008, -1097, -2091, -143, 4832, 6210, -1358, 3998, -714, 835, -4004, 3664, 1980, 1240, 2902, 510, -1565, 427, -2052, -4208, -1505, 1187, -1229, 3732, -932, -1014, 4784, 18474, -5111, 3047, -54, -1547, -3892, 8612, 274, 1446, -3548, -7689, -423, 1192, -4508, -10403, -8735, -446, 444, -6353, 4008, -1462, -8906, -1161, -2395, 2442, 2204, -5472, -17376, 2471, -689, 1394, -3657, -2119, -769, 2872, 1393, -2701, -3536, 3650, -378, 859, -3338, 1412, 3010, -3243, -335, -3619, -511, -1931, -7126, -5018, -9332, -4440, 1906, -2265, 1386, 8072, -6576, -1300, 5458, -4894, 630, -7146, 2263, 810, 2968, 1124, -2219, 2292, -3914, -1836, -6683, 1511, -2755, 1396, 2425, -23842, 2249, -53, -891, -1678, -1766, -1788, 502, -4210, 211, 10376, -5507, 837, -6196, 2132, -472, -10153, 7234, -1456, -148, 4886, 2427, 2371, 1234, -962, 6298, 1016, 1735, -566, -878, -8071 }, .cb1110s1 = { 2525, 12164, 4861, 9505, -7371, -414, 3002, 576, -347, -998, 2861, -804, 3034, 810, -788, -539, -2092, 4970, 1828, -2869, -2802, 6649, 3673, -193, -4034, 722, 1642, 3792, 8770, 10428, -3303, -3849, -4520, -234, -4190, -1219, -1300, -4128, 8384, -1150, 1578, -6174, -1072, -4871, -8180, -6698, 3806, -7386, -2545, 1052, -550, -1148, -1308, -8834, -2654, 1982, 8716, 6579, 1360, -2404, 1893, 2680, 3801, 11097, 1455, 2453, -7585, -7503, -12710, -420, 2023, -656, 1124, 2872, 9676, -4309, -202, 1458, -6526, -534, -1535, 924, 3068, -1142, 5073, 1284, -5632, 869, -1637, -2898, 4900, -10202, -10488, -1097, 1890, 11006, -44, 1368, -1979, 6507, 316, 961, 8, -4085, 2561, -2034, -1077, 2594, -465, -5134, -868, 54, -6694, 9608, -3516, 7165, 11011, 9542, 4780, -2800, -1130, -1714, -2684, -369, 4746, -2688, 4146, -7652, 984, -3263, -276, -9134, -2848, -3983, 9994, 3608, 3234, -596, 263, 3102, -178, -2264, 3820, -4293, -5752, -3577, -3914, 1095, -1562, 22110, 4610, 69, -2999, 254, 2178, -2901, -1203, -1292, 2642, -3254, -1389, 2955, 1340, 542, 810, 1369, 3208, -795, -3272, -2717, -1129, 8781, -6854, -3028, -616, 729, 529, -6946, 1621, 9574, -14909, 5398, 854, -774, -9978, -5417, -2516, -4683, 5715, -66, 3336, -5040, 640, -7566, 3494, 7016, -2269, 1376, -13994, 6448, -3948, -1697, -3988, -6559, 2376, 4231, -3131, 2045, -2417, -5919, -7016, -1695, 9046, -7966, 5187, -2553, 1402, -2351, -220, 5931, -1823, -2270, 584, -3784, 2924, 6166, -3035, 2370, 4923, -1080, 682, -7899, -10827, -1824, -908, 1568, -3565, 4033, -4266, -1948, 923, 5488, -203, -2396, -907, 2783, -3278, 1415, 7710, -190, -5208, -2279, 1266, -1132, -3392, 10251, -1064, 11283, 2162, 2213, -5088, 4479, -4658, -1803, 1534, -4233, -4073, 6938, 3966, -4878, -332, 5961, 9217, 488, 6520, 4430, 7988, 9383, -2586, 1206, -6983, -873, 1251, 1849, 5945, -2144, -2032, -1852, 416, 3720, 2419, 8462, 3173, 11524, -2894, -5517, -211, 17830, 3170, 1098, -721, -2066, -1956, -3097, -1061, 2815, 447, 701, 449, -485, 2609, 1239, 2257, -1760, 3091, 7538, 3710, -2689, -3092, 6903, -2457, 3271, 6355, -1486, -828, 1994, -3575, 3949, 3185, 2606, -4912, -16039, -8833, 1831, 2580, 1993, -1117, -3408, -7590, -7278, -141, 2696, 805, 1896, 308, 378, 9308, 2894, -4324, 1042, 837, 4716, -4702, 2493, -5173, 8616, -468, -14829, 3759, 3251, -4237, -1340, 5224, 2099, -764, -8263, -1699, 76, -1464, 2115, -582, 3286, -3653, 1017, 1696, -1414, -668, -9748, -5730, 2413, -1270, -6070, 17002, 2164, -5440, 1801, -2123, 800, 2135, 4801, -887, -2141, -647, -4846, -463, -577, -1846, -555, -1929, 2046, 8272, -8399, 3886, -5950, -4202, -12600, -2805, 477, 65, 6140, 1089, -4737, 8967, 1952, -1968, -3660, 6641, 850, -3304, -1775, 4010, 10819, 14365, -696, -1331, -1724, -237, -3611, 244, 3005, 4349, -182, -4124, 2466, 2746, -61, 3391, -1392, 3788, 1582, 3723, 7140, -2207, -3678, -2675, -252, -7476, 9426, -6196, 3226, 3554, -6326, -4284, 6346, -4432, 5199, -2633, -2499, 1200, -1140, -3910, 6624, 16732, 5946, -766, 2630, -1200, 1988, 5510, -1199, 4126, 1287, 454, -1795, 2664, 5001, 1058, 500, -437, 2992, -2012, -160, 796, -4846, -6572, -10088, 603, 483, -4510, -12799, 3502, -1784, 3510, -3956, 6038, 9044, -6029, 7170, -1608, 120, 914, -200, 3939, -6274, 3020, 6235, -2754, 5368, -1693, -6028, 386, -2006, 1898, -11704, -9973, -525, -2624, 1799, 4140, 3248, -57, -3731, 3764, 5582, -3830, -2484, -2066, 1517, -900, -8250, -8191, 2676, 1147, 6752, 6908, 1196, -2634, 3408, 2980, -1042, 3971, 632, -4946, -5690, 133, 2445, -446, -1294, -777, 3356, -5628, -6020, -8042, 5069, -1421, -2701, -15117, 3074, -912, -2574, 2643, 5252, -2118, 3849, -3793, -850, 4170, 6240, -697, 6976, -3752, 1155, 7769, -8912, -7728, 4224, -2362, -3760, 3688, 2402, -3411, -3165, -2550, -8, -209, -334, -837, 5688, 3425, -4564, 9999, -4780, 3093, 4346, -5556, 1636, 1755, -14696, 1810, 6547, -60, 4054, 10539, 6118, -4414, 1760, 3581, -841, 4471, -23, 180, 259, -4439, -13230, -1326, 1913, -621, -1641, -2882, -4934, 516, -3886, -4468, -110, -4526, -5157, 7550, -4449, 813, -4364, 1768, -8829, 2003, -1372, 1873, -209, 1539, 1076, -12408, -1464, -1878, 1563, 2020, 704, 1425, -275, -3718, 4618, -1120, -5057, -3590, 4022, -1977, 620, 143, -2507, 3697, -3263, 616, -3002, -3347, 21051, -4398, 364, -1924, 284, -2724, -2297, 4916, 2702, 4866, 4293, -2781, 1094, -1525, -562, 5487, -2098, 4658, 1362, -597, -3426, 3173, -5174, 3922, -3844, 1482, 4711, 5853, 1490, 5499, -17537, 956, 544, 268, -4782, -504, -4003, -911, 599, 1746, -7322, 1907, 1990, 16985, 3171, -2645, 1040, -7239, 5618, 304, 3606, -3377, 3630, 7319, 108, -496, 1026, 3062, -392, 2366, 1948, -530, 806, 2700, -2676, -2717, 5238, -16008, -823, -264, -1560, -1014, -760, -3684, -330, 5644, -1668, -10239, -2583, 7411, -593, 2193, -1479, -2892, 3834, -3625, -12234, -1103, 1868, -5121, 3879, 2748, 1936, 2026, 4572, -6037, 3310, -8678, 11724, 5290, -2316, 4131, 834, -3915, 869, -1734, -5752, 1255, 9534, -3625, -115, -5912, -125, 2298, -1494, 5910, -496, -2719, 1320, 3175, -3012, -3906, 4602, -4760, -5918, -2568, 6632, -8802, -5876, 6358, 2349, 207, 5191, 8369, -5932, 2710, 7950, 3673, -2592, 1311, 8384, -4360, 8614, -5662, 1180, 2147, 1044, 1591, -5555, -1597, 4418, 38, -1579, 4675, -1725, -1693, -6470, 3066, -7601, -12822, 524, -2986, -3406, 8860, -1266, -930, 4316, 1171, -2908, 199, -1785, -2851, -3588, 3072, -3585, -2668, -1123, 1508, 460, 6780, -19480, 2854, -1574, 1004, 5074, 1907, -1988, 1177, 74, -1436, 2224, 1232, -3008, -3454, -862, 604, -653, 2778, 2349, 3242, 8426, -430, 3684, 4814, -1886, 5118, 1487, 442, -2322, -900, -2854, -234, -10350, -7922, -745, -1490, -5638, -6014, -4079, -2979, -351, 9493, -2274, -11362, -8166, -7364, 8261, 1554, -1722, 4651, -831, 2276, 1502, 2600, 1266, 4456, -4145, -3837, -3584, 4242, 4058, -2395, -6971, 4486, 3233, 6226, 1306, -11506, -6223, -5132, 1537, -4407, 1510, 5732, 2808, 5817, -4972, -2900, 897, -2441, -1819, 5651, -6988, -10063, -2288, -5820, -1250, 925, 3120, 6125, -9901, -137, 3684, -6601, 1077, 3272, 21, 3341, -838, -3643, -1727, -4417, 660, -6551, -184, -8125, -1780, 5232, 6077, -7968, 6423, 3823, 3026, 4555, 464, 3318, -5504, 837, -3571, 3853, -2277, -1864, -742, -5380, 6096, 6856, 1076, 877, -642, 1926, -4712, -14482, -3323, -2672, 7485, -2116, -3932, 2233, -3270, 326, 2221, 132, -1893, -748, 453, 3597, -2308, -4371, 5632, 3609, -1033, -444, -2591, 17359, -3120, -2604, 3157, -370, 9242, -1606, 2675, -853, 1475, -416, -3280, -1159, 191, -3670, 282, 4282, -957, -2978, 3564, 91, -20520, -3046, 1248, 1277, 3368, 1118, 311, -598, 1406, -2377, -1444, 1417, -3626, 167, -6440, 3341, 629, -2523, 4398, -1187, 4322, -383, 1934, -3298, -8530, 2195, 5220, 510, -1256, -6932, -1061, 5141, -16242, -1390, -546, -3760, -2029, -929, -6044, -3503, 312, 8478, 701, 8865, 4715, 1987, 1342, 1400, -71, -5229, -1547, -8827, 2349, 12836, -1479, 4621, 6003, -6749, -3184, -5667, -2930, -1074, 3204, 330, 4692, 2872, -10808, 75, -1260, 18003, 4100, -1462, 1391, -1667, -2039, -687, -4806, 5913, 2682, 7730, 7034, 2703, 1666, 120, 1601, 2123, 1402, -4702, -11229, 7875, -5591, 4634, -2274, 3015, -597, -7520, -1095, -4814, -173, 5562, 1533, 2807, 8466, 5195, 7806, 2585, -2877, 6938, -3942, 402, -3825, 4162, 9149, -6423, 2447, 7041, 2932, -9813, 2124, -58, -3, -12856, -7973, 1484, 907, 180, 8042, -2124, 4356, -4117, 1126, -9706, -2101, 3957, -1877, 1139, 7148, 3707, -1341, 4509, -1220, 4570, -1650, -6504, 7036, -10268, -328, 4678, -12205, 5062, 6089, -496, -7740, 2207, 4489, -205, 1386, -2695, -1442, 4730, 892, 12061, 3818, -3305, 4431, 9300, 3470, 4608, 4315, 892, 866, -1714, 1529, 2569, -11398, -3068, -282, 1626, 587, -1568, -1630, -220, -2033, 7141, -2732, -3541, 3404, 15514, 1883, -2697, -926, 5972, 6485, -6794, 2111, 2490, 1201, 5467, -2352, 3264, -97, 2400, -728, -3364, 3417, 1481, 2862, 462, 2855, -5233, 5740, 7208, -10508, -3254, 1450, -1270, -293, 3400, -6978, 10035, -1213, 4308, 2641, 8579, 8518, -2919, -351, -459, -2069, -617, 638, -1347, 107, 6009, 2035, -280, 2009, 3280, -1236, -14960, -5177, -2440, 965, -2646, -2095, 5274, 1825, 3705, 3831, -446, -4018, 7178, -2415, 4344, 1850, -509, -500, 1056, -4374, 5709, 1336, 3352, 7915, -2302, 12209, -14362, 6429, 1423, 2912, 6474, -1599, 1116, 2280, -1738, 3108, -5792, -3554, 623, -1110, -6114, 4488, 8941, -3176, 13670, -3320, -327, -2657, -7349, 3782, -1481, 5737, -200, 2968, -9474, 5752, 5056, 4688, -5352, -432, -906, -3832, -8519, -7, 3667, 3583, 6250, 8724, 10737, 9371, 950, -1630, -10740, 5788, 4111, -2910, 437, -2482, 1910, 185, -2168, -3155, -3515, -1754, 4978, 4298, -6921, 476, -2778, 546 }, .cb1110m0 = { 3666, -1078, -175, 1370, 2491, -10050, -685, -7617, 4002, 11104, 903, 5948, 2821, 3050, -2465, 1151, -848, -2139, 12321, -1408, -1469, 2046, -2693, 2479, -3498, 3077, -3822, 1841, -2404, -11172, -407, -3062, -1725, -5475, 597, 1924, -197, 434, -1648, 2678, -2462, 1148, 599, 1284, -13171, -949, -6508, 754, 7466, 5924, 1411, -536, 10825, 588, 297, -310, -593, -896, 784, -242, 716, 501, -52, 4043, -755, -690, 2630, 17762, -2159, 2126, 954, -1316, 11129, 1570, 387, -2639, 13953, -311, 5231, -2297, -3612, -678, -1117, 690, -279, 2403, -1541, 493, -1692, -2048, -771, -933, 423, 700, 840, 739, 1956, -944, 612, -2678, 101, 245, -786, 850, 269, 1355, 21773, 463, -2589, 596, -519, 788, -43, 1220, 10674, 4847, 1192, 335, 875, -106, 10644, 2600, 5391, -262, 2296, -5928, -1072, -122, 2504, 1313, 1117, -981, 350, 375, -810, 8, 1462, -2020, -2368, 8, 22663, 1537, 87, 908, 832, -4884, 312, 620, 1042, -4444, 660, 1582, -2710, -2954, 10012, -9580, 8102, 5696, -1371, -3035, -3347, 402, 218, 1096, -1924, 88, -2270, 4175, -1083, -497, -2437, -3332, -824, 212, -2362, 4600, -7800, -11501, 7795, 236, -1336, -12920, 705, 4532, -1488, 11746, -3213, -2650, 2524, -2638, -128, -328, 3402, 453, -242, -2500, 2224, 708, 450, -3014, -132, 1251, -131, -831, -710, -21985, 222, -2132, -3261, 490, -3020, -860, 2550, 892, -623, -3666, -664, -131, 2018, 2817, -12005, 496, -610, -7238, -3909, -2867, 6872, 1903, 848, 6644, 3812, -5686, -4055, -377, -2096, -10247, -1068, 1486, 415, -253, -2186, 1050, 771, -6856, 1044, 7466, 2953, -7514, 1601, 7015, -1778, -1622, -3364, -1755, 2835, 176, 2700, 991, 2560, -554, 4867, 1571, -5610, 2610, 12438, -3751, -9964, -2753, 4856, -2595, -5423, 10025, 812, 687, 2715, 4013, 3086, -12039, 328, -3992, 4044, -3920, -111, -553, -1720, 2454, 1706, -1365, 804, -32329, -471, 897, -4670, 780, -3680, -1409, -2630, 20, 184, -157, -290, 2794, -546, -160, 1564, 1146, 628, -4787, -239, 11233, -492, 1955, 608, 9273, -3220, 3830, 390, -5982, -3342, -3384, 2356, 1820, -3473, 979, -40, -20190, 47, -200, 5106, -381, 1824, -197, 2280, 2434, -2633, -1409, -1109, -1072, 857, 1554, 7459, 6, 12130, -1078, 1038, -300, -13748, 3201, -762, 2670, -1051, -445, 914, -172, -558, 2634, -1158, 3129, -74, -3415, 1086, -8892, 118, -647, 285, 186, 3022, -5077, 1342, 3453, -7991, -65, 4690, 944, 3717, -1909, -9783, -367, -1699, -772, -32768, 1286, -408, 340, -340, 430, 1274, 596, -109, -727, 276, -946, 139, 1804, -1050, -3562, -1392, -1179, 257, 1639, 25708, 2278, 2415, 2174, 153, 126, -60, 592, 994, -334, -268, 1826, -306, -2241, 2774, -3188, 758, -450, 8023, 542, 6819, -1712, 14195, -2198, 281, -12, -590, -1153, 4568, -3676, 1973, -5221, -1839, -603, 3324, 2492, -3070, -846, 123, -1184, 667, -10886, -65, -2615, 971, 10219, -1245, 7378, -2122, -2306, 571, -2298, 1958, -4356, -9210, 4321, 2805, 1888, 11129, 1282, -5819, -2528, -873, 1123, -5968, -2644, -5515, -2151, -944, -7712, -2007, -2260, -1920, 2100, -325, 153, 1050, 10, 1462, 650, -12559, 3530, 754, 4493, 1528, -6991, -4842, 1483, -2408, 2785, -1651, -830, 1433, -2464, 18899, -1891, -3137, 996, 2485, 3056, -1061, -4015, -2282, 1356, -2572, -490, 1209, 1137, 4, -636, -1282, 1001, -1190, -172, -14049, -4256, -1972, 2225, -4738, -1054, 5254, 8113, 4294, 36, 11765, -3993, -1084, 3864, -3016, -10356, 353, 2963, -1228, 536, 609, -343, 1246, 3617, -3667, 4794, -20360, 473, 725, -1246, -1649, 1900, -2589, -2869, -2550, -886, -1164, -1876, 307, 3784, -4782, -476, -700, 2118, -1860, 1533, -5013, 2356, 3305, 3338, -14312, -1278, -322, 1950, -954, -1990, 1438, 3358, 7479, 3046, -6677, -3078, 1717, 3113, -12484, -1302, -221, -510, 10423, -3497, 4170, -3606, 6983, -2902, 458, 667, 566, 2415, -403, -2898, -44, -1832, -110, 1799, 1172, 7, -1534, 90, 686, -26902, 1601, -822, 658, 182, -151, 345, 1488, 1416, -272, 1560, 9774, 2084, 16, -14344, 1428, 514, 2658, -1312, 2095, 454, -1783, -2056, 4529, 1154, -2239, 956, 668, -1396, -2898, 405, -12659, -12556, -650, -587, 3461, -2470, 0, -3156, 3186, -4104, 1729, 1438, -1842, -422, 4476, 1945, -932, -1439, -702, -1398, 3349, 1876, -999, -2086, -17879, -432, 4036, -2299, 1133, 88, -2221, -2730, -938, -998, -132, -426, 2084, 2060, -1134, -313, 402, -538, -2593, 2022, 725, 1566, -2070, 21622, 1767, -424, -32672, 205, -1239, -3253, 198, -1257, 2342, -1918, 1505, 452, 1348, -604, 978, 1079, -4, 2476, -1247, -146, -861, -1928, -12222, -13042, -1384, -1971, -1428, 1224, -639, -83, 1034, 3488, -2310, -565, 74, -335, 2774, 602, 872, -2132, -147, 2160, 244, 162, 12600, 628, -10194, -1296, 1068, -1824, -4945, 3194, 2066, -895, -784, 2347, -1982, 73, 1030, 12589, -62, -2272, 3827, -1776, 2546, -1417, 3310, 4726, -3078, -548, -8522, 1632, -6667, 1008, 1128, 805, 954, 616, 499, -31526, -1327, 790, -190, 1058, -1157, 1432, -16, 411, -3180, 827, 327, 914, 1716, 1442, 1052, -1635, -1805, -4145, -13678, 3597, -2273, -5920, 3592, 1136, -211, 717, 3901, -5132, 3036, -601, 12976, 1633, 10316, -1674, -468, 905, 2331, 841, -247, -6053, -593, -3281, 4291, 5159, -1053, -1814, 2613, 2221, 1146, 871, -421, -542, 923, -3567, -1138, 10051, 10860, -6121, -661, -5677, -890, -266, 2100, 6223, -70, -2658, -78, 3424, 714, 2138, -1355, -981, 1990, 772, 938, 1311, -1963, 924, -22516, 260, -341, 1251, -1578, 23, 1375, 1068, 2688, -3965, 713, -5342, -257, 37, -6034, -276, 228, -1240, -7171, -3402, -14677, 1708, -317, -2880, 874, 1466, 524, 2091, 565, -4220, -265, 52, -3373, -220, -3175, 2646, 448, -1628, -1986, 2200, 3722, -15752, 7120, -2036, -2170, -627, -1079, -4060, 2257, -925, -3418, -13488, -1308, 3476, -783, -3924, -820, -860, 2418, 2982, -8753, 9001, 294, -11915, -969, 3329, -761, 1459, -5308, 1811, 379, 306, 632, -2732, 2512, 1188, -3470, -2167, -572, -2274, -1657, 24074, -159, -138, -1826, -2527, -3117, -906, -1770, -1182, 1240, -3064, 2313, -790, 336, -3843, -13384, -423, 13066, -14, -1908, -32, 2607, 487, -2426, 195, 135, 2742, 1540, -1034, 856, -2288, -287, -774, 497, 1760, 191, 178, 298, 38, -30898, 801, -1456, 2311, 1272, -1845, 334, -933, 183, -1614, 739, 1881, -13548, -13589, 1496, -2075, -1281, -1510, 108, 3683, -1120, 752, -980, -277, -1289, 2016, -290, 1838, -321, -139, -881, -12391, -14713, 1906, 990, -3202, 2320, 749, 1872, -2545, -1457, -1727, 734, -327, -316, 1062, -3149, -2959, 2210, 912, 952, 1926, -8918, 1098, 594, -1439, -1402, 11097, 3482, -472, 219, -3845, -662, 9715, 3928, 1254, -2009, 12375, -1724, 13938, 1892, -1390, 686, 2174, 1010, -1297, -199, 1855, 463, 2601, 4408, 1978, 1679, -1614, -3, -11965, 16220, 828, 1497, -747, -484, 519, -1804, -3814, 3287, 2104, 1149, 478, -3918, 1504, 2376, -316, -520, -1449, -3918, 664, 2772, -16434, 334, -540, -778, -2812, -6026, -4392, -2446, 3479, 3742, -624, 3895, 1145, -344, 333, 11898, -2725, 12873, -1145, -1807, -279, -452, -1581, 548, -5180, -2012, 3411, 1188, -1407, -4016, -468, 1904, -1724, -11390, -30, 14402, 1610, -2138, 1249, 346, 6097, -1433, -655, -174, 3652, 4010, 954, -1458, -354, -1872, -2689, 880, -846, -1304, -1725, 1750, -1186, 1520, 499, -583, 18201, -1083, -3323, 3072, -5440, -182, 1065, -1112, -984, 2501, -529, 613, 2054, 460, -5245, 2827, -1445, -2403, -12898, 1504, -8428, -1035, -4620, 1704, -2586 }, .cb1110m1 = { 1442, 12425, -2072, 741, -3624, 12979, 2031, -364, 3750, -5082, -1968, 146, 670, -3988, -831, 3962, 397, 6213, -1178, 816, -88, -432, -9620, 11572, 194, 289, -1958, -2115, -871, 5372, -3145, 3612, 1644, 826, 525, -2545, -514, -537, 2485, -1014, 1276, 541, -936, -302, -1172, 183, 827, 23939, 1120, -346, -313, 2759, 3934, -3082, -2260, -906, -967, 1496, 102, -2782, 323, -1109, -37, 2554, -2920, 998, -930, -1952, -1138, 1842, -1593, 17345, -1214, -1065, 2182, -1169, 11745, 278, 8310, 1491, -564, 1169, 8406, 1359, -1249, -2094, -1365, 4069, 1828, 897, 1258, 1083, 4319, 610, 766, 2273, 4057, 621, 338, 1317, -20941, 548, -2012, 563, 1102, -27, 3007, 1129, -1068, 1282, -2939, 2983, 1958, 1800, 1912, 1728, -606, 1804, -4768, 5068, -1365, 4543, 399, -14152, -6206, 6187, -2205, 1174, -1892, -3284, -206, 2872, -2622, -43, 11268, -104, 292, -1836, -6276, 725, 2066, -604, 11382, -448, 742, 2854, -910, -838, -1802, 3678, -397, -530, -10647, 2356, 12161, 1506, 2649, -3335, 3128, 2169, 5942, 2152, 14124, 428, 187, 248, 1592, -44, -59, -2934, 1883, -923, 2673, -847, 150, -2142, -7620, 11078, -595, 6490, -13673, 948, 219, -1314, -3080, 1339, 11020, 1362, 247, -1863, 1069, -3786, 1706, 1064, 320, 4535, 136, 3795, 1465, -1356, -449, 13, -421, 1769, 20470, 2181, -371, 2444, -744, 2263, -155, -688, -236, -4481, 1551, 2812, 2476, -1436, -470, -272, 2276, 594, -858, -978, 1122, 2468, -9350, -353, -1020, 494, 13167, 1770, 1734, -70, -4630, 12358, -818, -979, -3931, 1000, -4343, 2570, 5567, 3322, 2930, -236, -4796, 6987, -1658, 4291, 1118, 1710, -2050, -13566, -2, -23, 2104, 1101, -316, 1906, 1643, 340, 5940, 3180, -837, 1978, -10514, 1466, -6936, 3600, 1205, 957, -211, -8272, 1611, 5330, -5217, -2264, -5681, -3085, -9201, -62, 3366, 1370, -9494, 244, -5516, 1210, 2930, -432, -1265, 376, -1910, -1016, -845, 3228, 1094, -3168, 634, -265, -3426, 4367, -4004, -277, -15081, 3998, 9671, 3418, 691, 9124, -2723, 1939, 2311, 581, -4980, 3381, -1502, 878, -1037, 1496, 3002, 904, -5388, -3300, 263, 1277, -694, 766, 1781, 1134, 250, -32602, -285, 210, 2550, -383, 908, 302, 292, -352, 2615, -97, -1863, 1908, 2685, -502, -3767, 416, 990, -602, -1533, 43, 1288, 1326, 16638, 433, -1204, 1850, -1609, 1407, -7196, 2319, 5770, 1584, 1150, -634, -1686, 1359, -1396, 438, 246, 186, -11262, -1194, -3790, -3267, 2692, 755, 142, 16276, -2338, -1341, 10433, 38, -1510, -2520, -3205, 913, 3783, -1622, -4744, 1891, 2502, -8, -2962, 2091, 14986, 1270, 2931, 682, 1073, -10215, 1606, -1010, -822, 1168, -1403, 254, 1156, 3206, 3958, 1739, -402, -654, -4862, -1869, 2643, -2858, 658, -910, -2548, 5428, -1992, -208, 1950, -15526, 520, -4212, 3182, 4160, 1524, -2916, 586, 3213, 675, 185, -629, 669, -838, 502, -4065, 353, -4072, -1832, -2108, 5034, 2484, 15386, -2102, 4988, 70, 1011, 2568, 1360, -2821, 3352, -11074, -2686, 611, 460, 1811, 3093, 34, -9140, -1163, 26, -875, 2510, 1134, -1322, 2274, -960, -823, -510, 1092, 1490, 1466, -1978, 32767, -2379, -1019, -633, -1306, -242, 2050, 1336, -2668, -2195, -442, 8, 2292, 4344, -2439, -1472, 1035, -14443, -1820, 6309, -2096, 45, 3617, 1561, 1252, 2828, 10682, -894, 10841, 2373, -101, 913, 2160, 2653, 2960, -4433, 1193, 4892, -2123, -7911, 991, -2643, -1364, -3641, -9736, 444, 869, 2990, 926, -1220, -1676, 7492, 4376, -3742, -6964, 4531, 7522, -2686, 164, 1070, -7305, 1863, 542, 146, -800, 18492, -4849, -3876, 2162, 5111, 2606, 4243, -3035, -2990, -1710, -426, -5315, -2332, -1020, -268, -1242, -39, -1684, -32768, 1288, -726, -1768, 304, 702, -2969, -700, 586, 1541, -1099, -348, -2816, -2181, -1260, -1658, 2278, 323, -1548, 2513, 11816, -2416, -5837, -118, 6770, 3360, -4097, -264, -1270, 1064, -9862, -3669, -56, 603, -1475, 1464, -9553, 6, -3091, 5331, -396, 892, -2774, -4674, 3667, -9982, -5160, -1146, -4026, -2032, 2936, 1805, -1026, 1065, -420, -572, 1756, -479, -583, 30760, -732, 750, 270, -1541, 28, -1114, -96, -264, 1167, 548, 570, 84, -1981, -2110, -1136, 358, -6337, -257, -14658, 1144, -9032, 322, -3730, -3086, -1351, -3320, -4116, -396, -129, -3202, 1403, -347, 2400, -371, 532, 1555, -2760, 1078, 804, -1314, 21956, 2231, -2808, -1947, 838, 12428, -14514, -384, -1554, -675, -885, 1358, 1612, -3266, -98, 1876, -447, 2241, 3375, -1765, 2792, 674, -1513, -1132, -3696, 11368, -1916, -2778, -466, -377, 2090, 3897, 5422, -2550, 2360, 3279, 8657, 990, -2128, 2592, -970, -2397, -269, 22742, 694, 310, -2433, 920, -690, 1478, 1370, -450, 445, -1379, -1244, 2374, 1400, -1040, -5692, -1700, -1630, -4068, -1193, -719, -2953, -3562, 264, -13247, -4629, 5, 3245, -5724, 2449, 3190, -5375, -3560, -3834, 1271, 1568, -762, 2938, 782, -1390, 243, -466, 1376, 974, -1646, -1784, 249, -514, -13543, 1904, 10778, -772, -155, 7838, -30, 3634, -473, -9100, -112, -3990, -840, 1495, -2346, -326, 3655, 1292, -292, -10972, 3431, -262, 171, -9775, -985, 578, 312, -2553, 3375, -8316, 1410, -1326, 2459, -3116, 1079, 7194, 2720, 1998, 2742, 4672, -1589, -8932, -124, -652, -72, 2409, -926, -3661, -3762, 14832, -1350, -2234, 1258, -1604, 169, 103, 1263, -400, -765, 144, 824, 855, -13344, -1629, 1977, 2995, -1964, -650, -219, -11607, -6062, -792, -1243, -1438, 1757, 1436, -3739, 812, -856, -9603, -2428, -11372, 3273, -2318, -8263, 1551, -2054, -3646, 3149, 2255, 594, -412, -3030, 1558, 694, -1211, 618, 3256, 6526, -1572, -9054, 6655, -3208, 3616, 2162, 3137, 4254, 4610, -10040, 1188, 335, -615, 640, -1990, -314, 6014, -2392, -2174, 343, 6730, -1320, 183, -97, -3566, 2988, -13343, -1573, -9070, 428, 2839, 6728, -1109, -1113, -1102, 5012, 1308, -3943, 3207, 764, -2928, 1144, -3044, 4033, 1846, 6460, -4165, 8509, 9824, 15708, -642, 748, 124, -406, 13033, 807, -299, 1319, 1499, -1206, -1102, -3129, 3795, 47, -2483, -2470, 2287, 4028, 1656, -364, -1712, -1568, -3940, -2770, -13688, 796, 3380, 363, 1673, 1160, -3934, 2884, -5060, 832, 4799, 364, -3030, -10596, -1805, -3256, -2492, -1831, 1088, 11108, 3236, 5128, 3052, 4486, 84, 2078, 200, -4071, 1713, 1539, 24597, -1019, 32, -48, 82, 81, 1171, -1261, -1783, -1693, 2194, 1714, -225, -1989, 402, 2611, -708, -15901, 222, -507, 12855, 1162, -1536, -2884, 1911, -1256, -926, -1875, -1448, -2730, 3059, -1231, 1680, 1824, 1288, -215, -9, 40, -957, 27662, -1844, -1927, -846, -1144, -439, -3507, -2844, -1880, 637, 1042, 237, 1007, -387, -11913, -2584, -142, 624, -494, 1439, 2225, -13017, -1901, -1253, -1071, -7083, -2154, 814, 3867, 1130, -2611, -2260, 1548, -12389, -1018, 102, 1178, 1058, -14863, 2020, 4094, -1259, -861, -886, -3119, 2638, 1725, -1364, -2086, 183, 507, -978, -3086, -14966, 759, -1341, -70, 8538, 2974, -140, 2509, -4460, 2724, -1372, 491, -6138, -345, -2170, -1187, -330, -11090, 15657, -300, 2105, 496, -2093, -447, 2000, 3451, 1482, 758, 4142, 562, -4042, 1491, 3183, 1685, -2729, 1611, 11698, 14918, 25, 842, -2766, -667, -1564, -2619, 646, 1391, 862, -909, -2141, -589, 1468, -755, 1324, -765, 634, 195, -19622, -1006, -1161, 2434, -1808, 4168, 4108, -2580, -635, -2533, -2170, -3701, -1047, -363, 769, 5064, -8, -654, 2346, 752, 13736, -4056, 7, 5492, 7326, -4894, -3860, 3325, -3947, 4721, 5557, -3699, 194, -12957, 1052, -1317, -2642, -2931, 1050, -3951, 2392, -9683, 2519, 2880, -3700, -1820, 831, 4370, -1177 }, .cb1616l0 = { -185, -20290, 476, -272, 31, -638, 806, -61, 220, 176, 178, -788, -441, -333, -360, -263, -116, -512, 9794, -727, 8904, 1192, -277, 756, -670, 795, -311, 240, -617, -675, -970, 756, 857, 529, -166, 674, 890, -522, 837, 79, -618, -1308, -13832, 744, 5422, 2688, 531, 398, 1500, -1965, -209, -346, 613, 2147, 10053, -1398, 189, -108, 471, -1202, 999, 178, 762, -601, 1116, 9468, -281, 763, -1204, -822, -20, -160, -806, 14720, -269, 143, -1362, -532, -788, -1532, -405, 85, -271, -4959, 276, -34, -28, -66, 112, -188, -582, -678, 128, 680, 982, 596, 12154, -10468, -167, -380, 734, -296, 282, -223, -86, -342, -812, 514, 387, -418, -364, -1216, 14, 373, 357, 10897, 11235, -714, 206, -618, -607, 596, 190, 726, 496, -300, 95, 1022, -153, 212, -540, 252, 281, 238, -234, 28, 24, 184, 32767, -627, 569, 323, 486, 544, -348, -589, -284, 238, 228, 475, 83, -7753, 182, 745, 400, -633, -207, 137, 382, 90, 78, 715, 448, 463, 937, 10203, -12047, -667, -370, -1516, -360, 94, 832, 1027, 1013, 92, -5446, 834, 302, 764, -94, -462, 8095, 1057, 308, -635, 308, -877, -946, -616, 51, 1090, -13351, 490, -819, 15182, -384, 411, -546, -242, 460, -323, 76, 277, 1582, 900, -1119, 345, 1316, 1138, 2020, 1612, -148, 812, 1241, -10350, -9495, -965, -69, 1967, -168, -128, 1042, 447, 491, -133, -5083, -450, -164, 50, 326, 269, -283, 226, -40, -334, -110, 60, -47, 169, 9166, 1188, -942, -14, 2112, -230, 634, -741, -214, -336, -606, 3102, 59, 216, 1805, -1176, 211, -8, 564, 156, -261, 300, 597, -21842, 66, -232, -506, -1126, 1057, 603, 1448, -391, 249, -9445, -10240, 694, 167, -1158, -645, -385, -209, 330, 519, -345, -600, 192, 78, -229, 208, -9053, -383, 10646, -264, 84, 295, -148, 87, 1292, 257, 1080, -564, -2395, -1200, -484, -48, -513, -383, -11, -516, -17356, -1172, -218, 124, -327, 31, 328, -80, 231, 58, -951, 560, -501, -392, 30528, -56, 382, -515, -50, -155, 338, 0, -414, -899, 95, 11, 378, -350, 459, 673, 76, 86, 379, 32222, 143, -48, 425, -394, -60, -348, 450, -489, 220, 56, 1129, -125, 322, 168, -16, 322, -293, 294, -38, 328, 141, 692, -82, -160, -32768, -140, -1543, 1079, 1052, -924, -569, 168, -1782, 815, 706, -1318, -3436, 2860, 10922, 236, 10311, 882, -1911, 11, 1638, -189, 245, -858, 11060, -826, 696, 224, 1707, 1766, 472, 10832, -265, -161, 163, 478, -258, -284, -86, 496, 425, -71, -10344, -141, 425, -1457, 1145, -63, -713, -583, -327, 628, 368, -18, -1746, -525, -338, -110, -359, 92, -233, -21328, 460, -275, -98, -58, 51, 208, 56, -1145, -51, -242, 65, 76, 214, 141, 28, -86, 26, 925, 193, 9980, -326, 11342, 176, -534, -303, 130, -1575, 189, -496, -699, 381, 411, 644, 229, -147, 694, -1998, 523, -1576, 8028, -10385, -1924, 1174, 608, 2402, 575, -1753, 437, -816, 1267, 147, 1448, -614, 865, 1076, -156, 5000, 2020, 2021, 10283, -460, -2381, -3226, -3991, 4904, -284, 105, -268, 1049, 203, -646, 732, 6490, -128, 932, 10, -866, 74, -64, 834, 204, 159, -162, -170, -110, -28908, 52, -512, -72, 327, 615, 534, -484, 131, -262, 31, -407, 284, 33, 11118, -170, 318, 12848, -1126, -659, 500, 310, -403, -234, 237, -544, 1232, -243, -1178, -965, -117, 108, -1304, 11728, -2254, 1231, -1077, -136, -632, -103, -256, -1644, -300, 1680, -1175, -956, -43, 1718, 175, 144, 275, -802, -223, 1116, 321, -871, -1174, -1175, 1008, 255, 31172, 28, -621, -222, -12473, -10995, -712, 247, 1762, 418, -181, 90, 92, -406, -435, -105, -596, 2262, -116, -1574, -3402, 6796, 7944, 973, -2661, 2260, 621, -6984, 382, -1375, -2604, 1550, -1453, 1133, 966, 403, 284, -72, -36, 174, 457, -90, 38, -437, -476, -370, 469, 32767, -267, 350, 694, -169, -782, 2110, -620, -782, -669, -6478, 10550, -3294, 485, 177, 553, -3232, 1628, 2335, -870, -360, -1112, 2197, -474, -5113, 3346, 878, 566, -3823, -1175, 357, 10509, 1077, -514, 1012, 38, 59, 669, 654, 349, -1046, 355, 192, 57, 95, 11869, -702, 10201, 204, 45, -608, -444, 921, -1070, -316, 1286, -2566, 2026, -127, -79, -954, 93, -1288, -10024, 693, 8820, -366, -84, -6378, 1682, -627, 386, 254, 503, -152, -336, 38, -341, 373, -85, 1088, -1707, 119, -242, 242, -326, -162, 109, 70, -114, -831, -279, -32768, 62, 58, 214, 136, 194, -103, -10047, -610, 91, -310, 12059, 346, -656, 986, 478, 364, 1777, -173, -663, -103, 1011, -373, 200, 1632, -13098, 3651, 418, 478, 68, -217, 169, 78, -1176, -1191, -1664, -328, 152, -1053, 547, 527, -10435, -176, 11131, -137, -36, 1062, 33, 71, -730, 2080, 2061, -372, -637, -84, 744, 109, -357, 550, 309, -239, -134, 135, -20461, -177, -690, -488, -36, -415, 275, 64, 378, 11250, -802, -569, -200, 1499, 13103, -1090, -175, 189, -162, -751, 1052, -949, -98, 1249, 479, -1304, 3293, 771, 1642, -381, 1423, 2258, 1184, 4806, -10950, -3873, 348, -815, -5315, -3306, -3307, 2337, 776, -125, -48, -435, -155, -30, 294, 116, 96, -47, 1022, -391, -183, 252, 826, -32, 293, -1369, 18310, -146, 239, -266, 34, -154, -704, -498, -135, 228, -563, -210, -158, -514, -201, -571, -341, -428, 74, -152, 297, -162, -644, -216, -252, -13810, 705, 464, 21097, 74, -169, 792, 12, 131, 320, -398, -446, 44, -362, 388, -22, -13, -209, 1205, 9341, 590, -683, -351, 177, -1618, 495, 14, -319, 755, 11352, 249, -989, 1574, -922, -364, 366, -10348, 337, -558, -124, 12056, 102, 802, -548, -254, 1532, 7, -282, -459, -839, 171, -4445, -1610, -1515, -37, 970, 306, -881, -238, -154, -58, 27, 435, 166, 571, 225, -844, -9967, -192, -874, -459, -1283, -1431, 1552, -38, -686, -207, 709, 11982, -383, 1922, -92, -60, 708, -900, 867, 39, 1470, 517, -182, -456, 90, 1026, -192, 9988, 942, 48, 789, 981, 74, -692, -1283, 1239, 1625, -1121, -286, -1115, 294, 13228, 980, 312, -745, 11711, 1055, 1052, -907, 201, 688, 364, 1171, 96, -591, -981, -246, 875, -352, 677, 881, -397, 12890, 10, 0, 412, 76, 464, 275, -721, -28, -197, 104, -238, -372, -272, 490, 1426, 963, -13232, -1190, 790, 161, -321, 1138, 646, 359, -183, -659, -129, 348, -22703, 1016, -147, 26, -80 }, .cb1616l1 = { 292, 310, -255, 305, 69, 25001, -16, -668, 210, 17, -12, 45, -758, -76, -544, -882, 61, 26, -1682, -8820, 154, -11775, 64, 472, -464, 245, 478, -1560, 869, 2192, 98, 645, -95, -9369, -594, -635, -11132, 900, 1606, -904, 841, 2570, -1464, 961, 1056, 669, 461, 3307, -157, -644, 121, -694, 170, 116, 393, 1507, -233, -654, -162, 108, 98, 17471, 347, -11344, -701, -284, -246, -337, -1903, 14, 9865, 453, -11318, -3662, 2373, 1106, -1424, -1709, -2743, -860, 11008, 1579, -38, -1381, 467, -487, -1306, 369, 426, -424, 128, 1078, 1085, 683, 12552, 792, -184, -278, 186, 2006, 363, 310, -75, 862, 377, 490, -256, -1568, -124, -10785, -1456, -524, -1259, 517, -1844, 914, 769, 945, 739, -1053, -691, 177, 96, -1070, -162, -707, -594, -9885, 103, 452, -734, -6774, -753, 192, 88, -292, 201, -532, 231, -281, -691, -1232, -1768, -753, 369, 1556, -139, 668, 941, 264, 10372, 9740, 976, 2519, -88, 941, 446, -130, 2131, -631, 325, 285, 176, -236, -634, -91, 112, 32767, -233, -726, 156, 881, -217, -497, -236, -1106, 283, 164, -328, -629, -27442, -17, 176, -338, -192, 538, -773, 634, -180, 872, -190, -530, 586, 6994, 3060, -336, 736, -1268, -1142, -69, -1359, -1047, -975, -86, 12489, 1162, -509, -478, 717, -514, -502, -1755, 11064, 7668, 340, 230, -127, 1490, -63, 680, -297, 125, 1700, 2505, 3, -2043, 255, 1547, 569, -2483, 733, -896, 881, 4780, 1544, -13442, 1328, 1937, -4448, -384, 749, 173, 7350, 156, -144, 52, -527, -34, -3, -173, 118, -528, -75, 39, 42, -874, -14636, 474, -413, -106, -115, -431, 54, 722, 156, -468, 369, 149, -68, -791, 1318, 2150, 69, 454, 19032, 3, 111, -40, 349, 88, 385, -54, -395, -224, -519, 0, -219, 179, -253, -11379, 11005, 1857, -126, -248, 304, -616, 351, 324, 500, 1494, -1390, 2349, -1257, -1114, -213, 8156, -2066, 9746, 763, -848, 349, -7, 723, -966, 469, 91, -252, 1336, 579, 1816, 1372, -941, 364, 276, -33, 7, -425, -433, -21, 546, -671, -31271, -926, 101, 147, 302, -552, 224, 568, -2386, 519, -458, 13171, -1464, 1161, 639, -10, -877, 331, 3372, -72, 5158, -706, 906, 2668, 1008, -2732, 3264, 105, 630, 673, -1948, -196, -13130, 1726, 737, 4829, 93, 654, 2175, 3858, -5, 245, -471, 369, 5435, 356, -12934, 61, 1984, 975, 706, -2454, -642, -93, -780, -443, -1487, -460, 1112, 385, 309, -10268, 197, -1692, -1870, 50, -1934, 5380, -1193, 775, -493, -992, -557, 2952, 408, 4616, -1341, 10774, 5305, 854, 3031, 67, 617, 2436, -2072, -1469, 804, -578, 243, 264, 9150, 200, 10753, -350, 182, -52, -406, 508, -761, -161, -1142, 25, 484, 127, 126, 477, -341, 110, 371, 32767, 1090, 678, 175, 146, 1020, -897, 878, -137, -507, -534, 658, 678, 505, -753, -207, 391, 60, -23279, -772, -1323, -1578, -3, 196, -749, 220, -482, -785, 456, 38, 1034, -579, -58, -1539, 421, -746, 238, 1531, 21290, 586, -441, -276, 1512, 553, -1407, -276, 60, -1068, 299, 650, -25, 12590, 2058, 925, -295, -1744, 5152, 4935, -419, 272, -383, -665, -194, -255, 574, -267, 541, 1031, -282, -648, 622, -1464, -28, -269, -533, -80, -476, 282, -336, 125, 104, 464, -8948, 849, 171, 1518, -296, 51, -27, 3097, -5103, -412, -494, -194, -713, -1277, 102, 1740, -445, 3432, 1180, 6404, -10908, -970, 31, 142, -242, -79, -78, -76, 124, 1031, 83, -55, 1522, -613, -32768, -394, 1306, 287, 701, -4725, -1085, 415, -122, -538, -675, 82, 116, -728, -99, 500, 659, -329, 292, -106, 9243, -340, -11933, -498, 341, -825, -401, -402, 142, -13, -309, -722, 141, 0, -681, 494, 671, -1210, 1466, -1335, 11743, -280, 1616, -11481, 52, 317, 902, -653, -967, -494, -162, -685, -438, 756, 81, -207, 577, -7476, -353, 918, -31, -107, 181, 523, -46, -752, 373, -908, -1808, -916, 632, 1508, -35, -6943, 64, 13072, -655, 163, 1221, -1655, -2568, -446, -401, 470, -622, -944, 3744, -458, 203, 125, 238, 5196, 21, 12193, 1095, 1091, -787, -1157, -980, -1154, 7707, -29, 106, 1226, 696, -974, -379, -537, 56, 95, -477, -528, -11245, -1014, 140, 380, 89, 540, 84, -619, -322, -572, -240, -26, 727, 310, 43, -790, -31, -24318, 110, 618, 44, -108, 89, -191, -33, -201, -490, 43, -136, 1366, -2, 162, -832, 469, -140, -278, 600, -15775, -1699, 184, 1825, 728, -1803, -876, 152, 60, -813, 3063, -929, 972, -282, 718, 8426, -888, 1383, -664, 571, 958, 982, 236, -548, 66, 1898, -274, 10715, -1693, 79, -1254, 296, 609, 682, -1074, 272, 157, -18972, 377, -12, 438, 536, -672, 292, 719, -464, 1106, -296, -812, 6, -334, 67, 678, 382, 678, 301, -22165, 184, 80, -671, -86, 139, -298, 416, -610, 1057, 15, -230, 376, -768, 643, 58, 27, 178, -742, 60, -500, 485, -19923, -1016, 717, 1126, 287, 2171, -388, 1453, -21, -268, -1555, -263, 713, 1709, -1103, -10699, 1788, -8, -501, -892, 11476, -2006, 466, 8070, -286, 163, 35, 494, 76, 1428, -2249, 100, -1542, 319, -214, -701, 10130, -294, -11962, -656, 227, -512, -1014, 213, -600, -720, 63, -180, 1286, 1063, -9671, -1056, 1269, 1484, 20, 790, 29, -9906, -373, 608, 361, -659, 43, -1034, -96, -219, -65, 392, -19615, -464, 212, 820, -182, -227, 463, 301, 642, -219, 386, -1170, 108, -5583, 422, -507, 530, -1058, -131, 20, -14487, 101, -14, -415, 32, -1133, -917, 944, -832, 580, 2509, -959, 470, 1184, 432, -1238, 193, -13382, 2329, 1993, 1035, 80, 3139, -553, 1683, 390, 1480, 642, 564, -11173, 422, -984, -559, -686, 168, -777, -810, -1278, -427, -96, 1691, 29172, -435, -50, -968, 221, 685, 52, -373, 525, -563, 350, 528, 305, 705, 313, 612, -254, -220, -1638, -156, 24, 109, -893, -697, 245, 2579, 667, -142, 12315, -694, 3799, 5, -438, -473, -426, 59, -5381, -56, 200, -280, -276, 96, 435, 729, 336, 123, -714, -372, -12609, -12053, -238, 223, -242, 230, 663, -645, 98, 515, 3, 724, 510, -48, 1090, -173, -5024, 536, 635, -143, 702, 172, -196, 164, 190, -152, -180, 238, -142, -329, 191, -296, -416, 11775, -496, -95, 392, 994, -584, -925, -963, 286, -458, 3104, -1990, 968, -1430, 998, -407, 28485, 436, 42, 378, -210, 148, -149, -532, 94, -628, 186, -186, -274, 250, -316 }, .cb1616s0 = { 5604, 1491, -2064, 1321, -2846, -3007, -1899, -896, 556, 1969, -2225, 18515, 4156, 1333, 3489, -2168, 1897, -1440, -1514, -13837, 1017, 4797, 453, -2101, -6822, 923, 185, 754, -201, -4151, 126, -793, -437, 2474, 4286, -6405, 4007, -1644, -757, -13106, 2460, -1874, -1867, -1099, -5146, 2945, 2162, -4427, 1692, 763, 1756, -821, 66, -348, 2001, 702, 1046, -1365, -570, 1073, 32655, -9, 450, -761, 908, -200, -572, -1306, 2589, 2406, 1926, 1772, 11042, -1989, 3914, -1192, 1817, -11710, 2985, -2942, 15684, 1919, -667, -1267, 5212, 444, 864, -3844, 438, -2382, 974, 983, -887, -822, 185, 245, -3192, 1030, 1441, -28152, -2616, -380, 300, 1990, -94, -999, 285, 553, 2107, 960, -859, 1001, -1632, 2208, -1302, 1331, -3956, 10593, -1931, -4486, 9376, -6587, -463, -3605, 2460, 1306, 2, 1987, 1643, -552, 1327, 1124, -581, 1347, 650, -29514, 278, 1062, 1459, 951, 2416, 396, -594, 930, 434, 3308, -2816, 5466, 4831, -2869, -68, -894, 58, -13036, 210, -1940, -2524, 1139, 2044, -32, 969, 2187, 516, 581, 8185, 2080, 176, -708, 1529, 1132, -675, -1384, -10949, 1174, -5245, 580, 7490, 3258, 4314, 2706, -13676, -1735, 1937, 577, -108, 2676, 612, -966, -966, 3255, 1401, 1443, -1850, -252, 9270, 5037, -1492, -1957, -2134, 1198, 3470, 10482, -468, -671, -1655, -955, 3248, 3360, 448, -1854, -25145, -2771, -3318, 561, -672, 1791, 2194, -598, 1673, -420, 547, 122, -160, -172, 1686, -397, 1187, 11, -879, -58, 323, 180, -2588, -2139, -1794, -2924, 999, -26969, -1280, -1401, -770, 6159, -4449, -4174, 5270, -4813, 4139, -2023, 2694, 2884, 3418, -5948, 3118, -1176, 4691, 8566, -32768, -681, -553, -216, -216, -931, -507, 579, -932, -740, 349, 81, 2120, -1222, 564, -1576, 1241, 159, 2579, 3236, 19205, -744, -1727, -1803, 1247, -575, -261, 261, 540, -255, -60, -1428, -14184, -5194, 863, 997, 1043, -828, 466, -12553, 2106, 56, -566, 1142, 401, 1360, 2322, 629, 937, 2954, -10086, -12, 2554, -5760, 523, -15184, 636, 156, 165, -2638, 1134, 658, 4398, -1385, -1924, 1179, 3222, -908, -1153, 18082, 1011, 1948, -1007, 352, -172, -6446, -22, -228, -264, 73, 76, 2229, -1349, 6103, -11588, 576, 3374, -1616, 7904, 3146, 984, 1056, -1626, 3113, -3674, 203, -452, -938, 2074, 2409, -1228, -8186, -2766, 11098, 1598, -8658, -735, 556, 1610, -7419, -5267, 1158, 2841, 4497, 7551, -2066, 1105, 761, 2549, -1764, 2870, 3889, -1478, 1912, 2504, -1417, 963, -14602, 579, 28, -2953, 1589, 3962, -1372, -3304, 566, 2687, 9700, -2464, -13110, 3005, -772, -3775, -138, -4244, 5031, 2523, -2883, 582, -446, -274, 3311, -157, -784, -948, -292, 3085, -781, 954, -2133, -6693, 13909, -2236, 416, -2589, -3194, 668, -1988, -2234, 2365, 1034, 1201, -100, 1688, 372, 156, -254, 931, 576, -4680, 566, -1823, 294, 1645, 27678, -1353, -1230, 1744, 570, 1679, 608, -35, -7150, -4383, -11992, -2910, -2096, 512, 1838, 3129, -410, -2306, -551, -3904, 4140, -12782, -1743, -106, -4190, -5554, 12975, -573, -3532, -4050, 15, 1307, 62, 1643, -1988, 5774, 2064, 4734, 1009, 2038, -2794, -2704, 2275, -279, -1588, -910, 31315, 1249, -1642, 78, 164, -260, -878, 698, 1189, 159, -6137, -1994, 775, 3484, 1635, 1121, 4391, -5883, -11300, 3722, -422, -2180, -3206, -3181, -1490, 291, 1326, 399, 1952, -8405, 2240, 175, 3541, 4258, 1518, -781, 1105, 498, -348, 771, 15918, 120, 379, -2036, -3723, 10948, -1827, 3220, 40, 210, -294, -813, -2349, -707, 967, 953, 2625, -13614, -1519, 9454, 11606, -903, 817, 6237, -8878, -160, -1768, 444, -2812, -1697, -1010, -964, 1846, 2997, 2633, -1924, 501, -1464, 2402, -986, -1143, 527, 1187, -929, 20923, -563, 785, -486, -940, 1625, -796, -697, 348, -428, 1451, 1087, -2252, -2481, 939, 890, -2508, -1357, -1868, 1395, -6386, -21986, 2574, -384, -324, 7752, 2996, -641, -7903, -5745, -4226, -4178, -4394, 9307, 3906, -227, -496, 4556, 1099, -838, -2546, 1190, 9937, 11057, 3846, -156, 433, -2873, -1769, 36, 3188, 4490, 4369, 4714, -4681, -2804, -1525, -947, -5064, -4180, -1348, -1404, -1097, -3922, -1088, -444, -13636, -1547, 1685, -1625, -8494, 2492, -72, 9893, 2470, 705, 105, 5609, -5403, 846, 90, -688, 1184, 6286, -253, -1610, 3348, -2082, 8838, -2453, -1315, -1235, -719, -4607, -2138, -5522, -10466, 1900, 1541, -2688, 729, 368, -8845, 1282, 438, -2532, -2328, 4833, -6145, 4037, 3584, 7965, -1495, 6999, -5037, -1364, 7095, 4253, 2711, -8336, 3946, -1347, 192, -820, -328, -1152, 1554, 869, 5053, 9707, -5888, -4294, -3858, -3344, 8344, -644, 1750, -1796, -149, -3706, -14823, 656, -1487, -2466, 640, -2286, -2902, 2906, 44, 211, -336, 29976, -298, 2092, -688, 1857, 1807, -1705, 3211, 425, -1046, 128, 1191, -1966, -726, -3040, -3632, 1212, 2986, 5266, 1086, 3624, 3068, 422, 989, 24479, 3791, -2229, -3713, -2379, -1370, -1799, 2742, -3259, -4973, -626, 2287, 5655, 663, -918, 13266, 7762, -1131, 2490, -3123, 2869, -846, -2828, 119, 14540, 4588, -2784, -3713, -2547, 3698, 3189, 3372, -5436, 856, 4382, 4124, 3406, -336, -911, -137, 4268, -4436, 1566, 1169, -3020, 13980, -162, -7226, -2550, -946, -2408, -1056, -587, -273, -932, -219, -8021, -1086, -2587, 3852, 1235, -22, 222, -1100, -1594, 137, -1985, 10225, 4998, -348, -450, 6651, -2217, -7705, 2508, 10061, -4512, -2262, 6156, 2962, 150, -2456, 1089, -927, -609, -3130, -1682, -1215, -9251, -130, -3776, -309, -13872, -276, -6922, -82, 2660, -1255, -6562, 2640, 2646, 422, -84, -6020, -11551, -1710, -3462, -2666, 12510, 3145, -218, 2956, 447, 30, 2268, -2410, -1400, 660, 431, 3068, 258, -2862, 3919, 2693, -744, 3070, -2179, -1192, -932, -2095, -279, 2045, -8205, 15263, -4415, 2116, 4047, 10308, 3110, 1368, -1547, 10919, 988, -81, -907, -1728, -1052, -3539, -4769, -2576, -1038, 9255, 152, 431, 2455, -1544, 1880, -312, 2724, -13336, -4197, -1199, 709, -695, -1687, 442, -2564, -1626, -1888, 1870, 3539, -2922, -3506, -7890, -5486, -1640, 2178, 2173, -3200, -4626, 1116, 13161, -5221, -852, -1047, -3328, -3975, -4441, 2870, -1458, 5664, -28, 3853, 1809, 2721, 658, -15262, 3611, 3223, 595, 44, -5327, -2486, -1806, 606, -2474, -1236, 983, 1741, -8390, 1948, 1875, -1806, -6294, -814, -747, 2209, -1332, 2058, -1326, 5808, 1113, -10765, -584, 4038, 1412, -3356, 24, -12826, -4322, -2287, -10793, 3008, -6903, -1273, 1590, -608, -514, -309, -144, -2024, 1822, 4375, 1122, -631, -76, -595, 192, -11323, 8168, 10180, -646, 2478, 4516, 1095, 94, 6, 1251, -658, 2620, 626, 3078, 727, 7769, 966, -3593, -6990, -2358, 1022, 1288, 2733, -259, -291, 2482, 297, -1268, 10338, 739, -1862 }, .cb1616s1 = { -12873, -2429, 6659, 4401, -2250, 1684, 1508, 1780, -1081, -10, -6012, 895, -2373, -1263, 125, 1448, 4744, 1556, -7267, 2354, -11368, 1155, -7699, -1424, -914, -591, 2472, 538, 1431, 953, 5, -3066, -1063, 3, 406, 979, 922, -668, 1633, 2, 649, -139, 964, 860, -18807, 1944, 2183, -1358, 1395, -1167, 5369, -3525, 735, -2698, 10556, -1137, -3979, 1383, -1997, 5995, 6465, 2310, 1781, -311, 3376, 7199, -2745, -1656, -5702, 3180, 3017, -5673, -712, -8902, 2058, -570, 170, 2276, 3869, -9332, -7965, 1130, 2111, 5638, -1507, 2944, 1574, -919, -1459, -970, 11093, 544, -2952, -146, -4684, -303, -528, -1199, -890, -2720, -1665, -10952, 373, 1657, 1960, -1386, 299, -4356, -4527, 8948, 7378, 1580, 1301, -6057, 7650, -7399, 4646, -1768, 2756, -263, -286, -334, 1369, -786, -3760, 824, -13524, -5099, -1693, -347, -1821, 1992, 3462, 1421, 4900, -462, -13331, -1617, -2350, 4083, -8721, -5880, 4900, 2912, 235, 10369, -1340, 776, -2598, 14344, -3805, -568, -3788, 3591, -394, -1077, 3908, 6080, 1953, -1454, -1013, 507, 10097, 3396, -4662, -763, 2506, 1486, 3088, 580, -86, 1117, 1606, -3454, -10782, 4870, 6170, 4020, -5675, 6848, 439, -8765, 3877, 6250, 734, 3245, -874, -4312, -879, -4368, -1287, 3212, -2130, -1435, 1619, -280, -3082, -1070, -18921, 940, -2428, -1548, -1142, -271, 193, -240, -890, 918, -4350, -5042, -8994, 5060, -6495, 3455, -259, 892, -1290, 1348, -1049, -12681, -49, 18286, 75, 791, 1830, -4116, 10240, -12, -459, 2477, -2582, -3344, -1598, 982, -324, -48, -4229, -8476, 11120, 100, -6238, 1164, 2369, -2052, 247, 626, 2213, 2279, -2627, 289, -471, -1136, -1818, 15413, 579, 1034, -6835, -8645, -12667, 758, -932, -4398, 565, 458, -2024, -4050, -3100, 1897, 1324, 3191, 1876, 7660, 385, -1066, -1539, -1317, -2632, 766, 63, 389, -189, 1136, -653, 802, 755, 70, -29812, 640, -11953, 10901, 2078, -529, 10373, 2509, -2776, -104, -2232, 174, -837, 158, 1507, 1963, -273, 1534, 1084, 8469, 2568, 12662, -2276, 2808, 2052, -7430, 434, 3777, 991, 664, 2724, 1631, -3632, 2099, -582, 4140, 757, 11248, 540, -1425, -10204, 1604, 600, -2034, -1060, 977, 1843, 3831, -933, -816, 2975, -6413, 1589, -915, -696, 2155, -556, -17893, 3348, -1239, 1014, -2539, 1588, -320, 2402, -1485, -8062, -1046, -1458, 200, 1323, 357, -3752, 2836, 5774, -11638, -913, -648, 1676, 246, -1277, -1065, 2334, 14911, 228, 880, -2172, 3072, -2520, 1445, 1442, 2568, -1254, 730, -1950, -192, 12003, -1587, 2558, 714, 33, 4324, -4642, -231, -279, -255, 17824, 1292, 3530, -766, -64, 245, 1677, 1716, 2507, -3594, -3532, 3000, 1996, -5342, -1868, -5642, -21, 1132, -1202, 1104, -6543, 1242, 457, -1711, -32768, 49, -458, 295, 858, 2043, 1268, -1257, -346, 793, 554, 1260, -1082, 985, -1453, 1704, 2431, -2858, 1466, -5424, -8870, 4714, -1539, 5767, 110, -2568, -1482, -348, -11580, -2838, 1213, -599, -1591, -3472, -6907, 6191, 3928, 4708, 1326, -1510, 6322, 3849, -4112, 7689, 5976, -3298, 372, -5450, -2208, 6564, -6915, 911, 4216, 1682, -739, -2146, 203, 350, -816, 351, -3386, -3016, -15045, -10824, -553, -4969, 138, 256, 1672, -1840, 2851, 15838, 2934, 1871, -600, -3293, -845, -2696, 1463, -1075, 720, -1177, -1538, 2415, 7315, -484, 1082, 962, 766, -845, -10687, -5932, -4410, 3840, 362, 194, -4576, 10209, -3548, -127, -1202, 246, -734, 770, 311, -3126, 772, -2422, -1141, -12330, 960, 1567, 2816, 80, -4414, -778, 665, 2308, -420, -180, -1242, -423, 12138, 113, -1477, 2899, 214, 348, -927, -764, 26, -1127, -2288, -32768, 1302, 394, 646, -453, -946, -838, 1649, -2292, 1182, -1558, -6413, -265, -1942, -3467, 1863, -3526, 3446, -863, 886, 202, -202, 15706, 2226, 1763, 894, 936, 16191, -693, 1682, 6678, 1742, 1365, 700, -1765, -803, 299, -2194, 1259, 689, 1670, -635, 28, 11890, -14, -878, -5439, 103, 11124, 528, 1179, -62, 868, -664, 749, -1128, 1429, -485, 1920, -866, 1176, 1051, 379, -29470, 2354, -252, -1648, -412, -804, 1339, -383, -812, 959, 893, -1741, 1462, -1868, 470, 2112, -1889, -2236, -1668, -755, -2562, 1354, 6183, -10964, 5651, -1062, 2550, -6225, -194, 1687, -782, 1568, -85, 10, -8, 1128, -521, -1090, -1933, -3441, -2698, 3049, -5822, 20847, 710, 789, -1872, 1082, -1242, 4152, 1624, 10795, -2149, -134, 1087, 900, -7943, 5178, -3429, -11622, -3617, -7444, -824, 3462, -579, -830, 1010, -3301, 12202, -5446, -1763, 340, -744, -509, 554, 1140, 12266, -1328, 4652, 992, -1931, -708, 1074, 2762, 2931, -414, -217, 10166, -4167, -903, 660, 1000, 27, -1037, -1532, 1308, 8655, 9087, -2998, 9928, -3722, -556, 4812, 3062, 600, 1281, 3879, 114, -5404, 1869, 2174, 2083, -11631, -301, -3609, 2443, 2300, 4863, -838, -29, 2166, 1319, 2110, 1387, -741, -1225, -1729, -13536, -7376, -1520, 619, -4919, 2517, -4338, -1650, 475, 456, 4372, 792, 3224, 1963, -547, -2071, 2142, -254, 1549, -6846, 2430, -96, 19844, 595, 1197, -1367, 2019, 2014, -1547, -3775, -1186, -9690, -394, -4106, -1728, -1036, 2945, 509, 14242, -1893, -2494, -3004, 458, -1753, 2628, 9790, 3450, -1652, -322, 8263, 3952, -2156, -2110, -442, 1256, 1561, -4913, -3452, 74, 3051, 8907, -3376, -96, 16654, 557, 520, -446, -2520, -1712, 2151, -2423, 3761, -3507, 487, 2103, 777, -416, 509, 468, 3629, -3155, 11460, 2106, -2191, -1014, 1154, 9317, 704, -282, 3098, 2722, 84, 150, -5922, 3063, 8373, -11896, -1157, -2286, -1781, 7331, 1331, -334, -974, -1653, 752, -1970, -89, -3470, 2418, -1334, 3615, 12770, -116, 1965, -1643, 1480, -2225, -10686, -1174, 530, -972, -933, 719, 722, 1530, -317, 105, -14155, 2569, 4506, -8502, -681, -1544, -542, -2814, -1161, -629, -1776, -3540, -1366, -3681, 1838, -1630, -703, 12613, -12335, -2020, 2173, 27, 315, 4766, 4590, -1603, -68, 1154, -2940, 1198, 7884, 2502, -586, 440, -5124, -2454, -2597, -826, 7401, 2803, 4552, -3212, 2966, -5567, 588, 2216, 7444, -2633, -5922, 434, 3423, 4084, 2296, 13258, 2070, -4624, -1226, 166, -367, -527, 1110, -1407, -150, 140, 584, -373, -2649, 862, 500, 3292, -3506, -679, -20109, 1775, -726, 3378, 754, -1962, -5764, -1338, -3628, -691, 4554, -1890, -6021, -6566, 2590, 262, 2509, 257, -4386, -2480, 6352, -2026, 1234, -399, 22808, -2221, -626, -714, -339, -1196, -455, -80, 713, 1662, 474, -2324, -527, 4101, -10526, -4617, 10492, -1143, 805, 1360, 3796, 942, 684, 2596, 1313, 1589, -570, 5476, -27, 9220, -1493, 2631, -6726, -2976, -14295, 137, -734, -2015, 658, 323, 83, 2539, -1230, 1714, -2080, 658, -18803, 2978, 996, -3374, -28, -1335, 150, 2154, 1069, -852, 293, 535, -1004, -993, -3692 }, .cb1616m0 = { -16476, -11442, -305, -196, -767, -2167, -1, 378, -2200, 22, 2405, 944, -1786, -806, 669, 952, 10435, -2752, -1625, 1060, -12314, 1283, 234, -2405, 627, 798, -1058, 311, -2794, -2715, 73, -214, 813, -2749, 10732, -445, -12147, -2507, -1972, 1652, -1920, 215, 298, 1106, 826, -7445, 69, -1679, -675, 1249, 1444, -1109, -48, -1452, -2368, 3034, -492, 13068, 311, -3446, 326, -1426, 2384, -2146, 2916, 8957, -196, 2212, 447, 1775, 2607, -11962, -278, 4335, -1743, 135, 212, -41, -92, 164, -11, -504, 828, -519, -834, 251, 1919, 762, 24917, -180, -132, -330, 138, 225, 1038, 3, 32138, -388, 208, -638, -1338, -165, 200, -230, 225, -777, -2270, 8198, 583, 3946, -1534, 1666, -1032, 11384, 202, 30, 2758, -505, -2815, 1265, 64, -17, -360, 636, 134, 502, 259, 872, -28148, -1046, -348, -86, -739, 55, 448, 168, -656, 1094, -1074, 4552, -834, 2296, 2356, -572, -1917, 10979, 3127, -52, -9969, -527, 1994, -1626, 1041, 3310, -2319, 2232, -11444, -2400, -1788, -1254, 5265, 3198, 7088, 4522, 1292, -191, -15886, 2787, 22, -1610, 184, 380, 521, 336, -158, 877, 468, 6515, -756, -5484, 100, -464, 9244, -2726, -1644, -2741, -5362, -1635, 894, -1849, 10118, -3264, -4472, 1255, -3571, -437, -1050, 1505, -11178, -193, -11513, -24, 719, 212, -1999, -725, 502, -1164, -1060, -618, -91, -738, 740, -2254, -4635, 2700, 95, -853, 1093, -11620, -968, 9492, -25, -664, 367, 1105, 5501, -112, 7516, 10286, 821, -484, 204, 1425, -3491, -1234, -4240, 3807, 2877, 1824, 423, 466, -428, -845, -86, 13276, 8436, -690, -688, 574, -2874, -552, 4540, 926, -5443, 629, -395, 2090, -17468, 335, 2419, 1275, -3750, -1589, 470, -1735, 330, 2532, 1094, -6218, -884, -236, -9678, -9945, -447, 542, -728, -1922, 108, -2193, -946, 3270, 2121, 2624, 1010, -10742, -102, 2813, -13070, 1523, -1532, -1291, 420, -1999, 262, -1194, -4226, -1450, -275, 83, 1168, 1590, -1517, -426, -1424, 152, 676, 11463, 824, -2092, -1106, -11502, -2327, -278, 2597, -11, 793, -118, 393, 580, -499, -743, -77, -427, -408, -692, -29195, -247, -2014, -922, 97, 581, 5469, -1419, -698, 1490, -3814, -2818, -13816, 680, 3595, 1544, 2366, -3018, 2479, 323, 346, -260, 337, 2730, 12214, -1118, -11301, -3028, 212, -41, 1764, -580, 553, 5454, -8, -366, -1202, 901, -796, -8350, 4380, -1452, -300, 1152, 3058, -3476, -27, 13046, 34, -11438, -1321, -1528, 13237, 114, 2514, 976, -571, -1192, -2050, -1635, -964, 416, -23, -1083, -9, 32767, -548, 556, -1217, -56, 325, 1048, -145, 202, 1520, 44, 402, 400, -611, 8667, -1083, 1068, 1224, -12031, 2318, -1109, 1266, 1306, 4673, 285, -5603, 1555, -100, -1059, 403, -213, -680, -904, 11443, 581, 12160, -638, 309, -65, 933, -2280, 1958, 2642, 1808, 7945, -2088, 850, -428, 785, -989, 1234, -1413, 745, -10756, 1943, -184, 3252, -96, 932, -664, 13222, 11326, -1374, -327, 1901, 1069, -1540, 104, -139, -904, 106, 1664, 925, 46, 353, -835, -554, 1618, -956, -437, -727, -3403, 1038, 968, 436, 46, -4385, 340, -16903, -498, 47, -554, -399, -2418, -347, 358, 23280, 234, -172, -338, 1058, -2172, -1, 1710, -64, -583, -2224, -780, -637, 3500, 108, 1045, 828, -728, 9466, -2487, -12773, 1924, -1158, 208, 49, 136, 12055, 42, -1381, -375, -11534, -249, 1602, 996, 204, -710, 4761, -511, -15761, 166, -1184, -192, 50, -105, 890, -9566, 2062, -1536, 133, -185, -643, -172, -894, -355, -16, -1395, 542, 2160, -481, -1104, -793, 517, -20454, 698, -181, -135, -434, 1677, -181, -415, -738, -1574, 1664, -14058, 597, -12354, -460, -313, 1724, -686, 85, -1162, -648, 865, 165, -225, -1947, 2818, -778, -4010, 402, 686, 11170, -332, 10336, -757, 4794, 2204, -477, -292, -366, 8412, -2476, 494, 510, 10514, 769, 642, -441, 1079, 6954, 4246, -2272, -290, -224, 1312, 398, 1536, -692, 330, 157, -946, -100, -1830, 214, -25652, 1382, -1836, -440, 110, -506, -438, -2370, 126, 562, -3515, 1014, 8526, -1641, -2493, 4411, -9210, 2110, 625, 114, 323, 2450, 2407, 682, 1999, -9424, 2480, 69, -2091, -11845, -3684, -429, -1622, -919, -518, 70, 1450, -3523, 5126, 5706, -1451, 2633, 820, -204, 11338, -8014, 753, -103, 290, -923, 1408, 298, -1962, -887, 9691, -1366, -11048, -55, -223, -1040, -163, 132, 676, -760, 4990, -310, -9286, -2427, 14442, -418, -802, -359, -323, 2877, -210, -1436, 1574, -1206, 265, -155, -225, -32768, 347, 222, -1165, 200, 924, 1135, -843, -66, -343, -334, -113, 209, 14, -203, 1214, -896, 910, -1496, 1831, -7833, -841, -10453, 1605, -8514, -477, -48, -241, -58, -32768, 213, 108, 450, 1155, -30, 89, 240, -768, 1332, 290, -1377, 951, 586, -8939, 1298, 496, 705, -1661, 1798, -1906, -2233, -1716, -986, -2204, -1149, 2686, 8578, 32767, 645, -661, -135, 770, -432, -550, -385, -272, 625, 1234, -729, 19, 1753, -284, -106, -655, 750, -442, 23143, -328, -520, -506, 790, -1048, -730, -471, -438, 483, -374, 939, -226, -397, -849, -12054, -772, -40, -11776, 232, -540, -2497, -679, 337, 1357, 458, -341, -7542, 1001, 492, -416, -1496, -8966, 9814, -1752, -674, 2526, -544, -2900, -1318, -1578, -238, 75, 11181, 1750, -3182, 564, -570, 528, 1004, 146, 1144, 7430, 158, 9524, -36, -340, -441, 596, -1659, 1420, -686, -36, -596, 2215, -1295, -19722, -2149, -1046, -2339, -1166, 3057, -370, -556, -33, -322, 260, -23, -106, -323, 147, -57, 179, 458, 684, -1283, 1251, 1231, -18548, -513, -480, -695, 593, 3072, 1960, 322, -702, -1043, -544, 6005, 1378, 100, -225, -848, -1294, -3346, 828, -2610, -3010, 9623, -1329, 1956, -1098, -3730, 1137, 12413, -1260, 2457, -10844, 6824, -4289, -653, -302, -4415, 650, -1684, 6129, -370, -652, -3245, -473, -150, -3018, 1864, -1258, 928, -2379, 14451, -119, 2282, -248, 3139, 6502, 4318, 2214, -1627, 126, -422, 326, -622, -302, 32252, -268, 456, -260, -260, -968, 391, -497, 152, 1764, -10580, -369, 277, 70, -13137, -1114, -1111, 464, 2266, -2968, 728, -1216, 1726, 1044, 344, -16436, 1558, 3178, -551, 604, 442, -891, 9570, 1596, -541, -2182, 730, -906, 242, 935 }, .cb1616m1 = { -116, -53, -24868, -544, -783, 97, -912, -1202, -622, -147, -215, -362, -16, -522, -1694, -358, -724, 2628, 439, -18106, -1566, 3048, 4133, -1238, -3233, -1130, -2884, -2762, 1031, -1037, 63, -2219, 10701, -1518, -10322, 1425, 792, -1820, 380, -777, -3017, 1531, -1052, -3491, 1085, -428, -765, -113, 42, -265, 365, 99, -859, 35, 610, 44, -495, 262, 1689, 2082, 21605, 412, -717, -1163, -3285, -5062, -1583, 599, -277, -62, 615, 6014, -1781, 465, -544, -14114, 11277, -20, 696, 663, 1156, -240, 631, -2802, 333, -2544, -1628, 775, -960, -553, 496, -378, 526, -421, -426, 290, 555, 403, 390, -31714, 25, 449, 654, -334, -1317, 165, 496, 1554, -88, -777, 626, -1511, -9020, -1725, 12705, -798, -1240, 195, 1932, -833, -939, 43, 182, 2547, 4879, 9234, 370, 2058, -7757, 544, 1106, -660, 546, 9983, 225, 124, 952, -2153, -1732, 2760, -1270, -176, 3334, -6735, -526, 10475, -627, 7835, -2263, 475, 731, 908, 8264, 1605, -192, 5026, 2414, 5223, 595, 1093, 2345, -796, 8663, 1028, 8188, -185, -1506, -3044, -100, -1818, -6369, -170, 1728, -9249, 886, -2111, -349, -1146, 2127, 11622, -8043, 2880, 2215, 1693, -2303, 1698, 1121, -3575, -927, -716, 1940, 2514, -11672, 1619, 916, -7, -585, 508, -1316, -972, 778, 9774, -2126, 2368, -56, 6716, 1169, -3656, -1330, 9530, -12158, 1188, -11426, -353, 945, -1941, 1750, 962, -1133, 1793, 2318, -2641, 1109, 933, 804, 505, 60, -1642, -2238, -2328, -1558, -1568, -27, 952, 4, 1376, -862, -18404, -1828, 4107, -454, 52, -1202, 1150, 686, -1950, -497, -10883, 400, -422, 1734, -54, 11165, -3309, 6402, -877, -19967, -400, 1642, 1305, -2432, -3115, 375, 3898, 1812, -5305, -946, 1717, -757, 3322, 126, 747, 1836, 9957, 1904, 658, 13043, -1779, 675, 716, -453, 670, -1572, 210, -1533, -133, 294, 546, -24084, -1036, -485, -117, -184, -624, 273, -901, 866, 609, -1119, 28, 250, 13, 70, 1178, 882, -632, -21624, -1339, 926, -1814, -1279, 1868, -181, 383, -679, -1070, 5091, 1148, 1034, 2144, -2779, -3810, 4536, 1713, 1003, 13322, 2866, -3217, 2508, 4395, 480, 14, 167, 763, -34, 1034, -1342, -1349, -100, -225, 464, -914, -1403, -1851, 23767, 770, -457, -257, -1072, 1201, 583, -59, 2627, 1469, -7, -11642, 3352, -1003, -6, 11588, -311, -2435, -2180, -2352, 1952, 5532, 1945, 7281, 504, 11882, -603, 45, 42, -1396, -1115, -1041, -1061, 566, -2733, -765, 687, 118, -1174, -20412, -244, -986, -151, 2888, 1102, -1303, -135, 529, 1186, 13220, -183, 906, -4209, -4455, 2247, 246, -6474, 2794, 1450, -6495, -1819, 598, -438, 244, -1064, -673, -672, -1563, 543, -2278, -3087, -811, 1866, 80, -18987, -682, 569, -551, 514, 6876, 7582, 839, -4031, 823, 2342, -1300, 1180, 702, -10168, -1957, 485, -374, -151, -11066, -461, -12824, -1221, 1281, -718, 2012, 330, -289, -487, -207, -722, 394, 156, -2023, -11006, -373, -4, -238, 10581, 991, -1236, -814, 553, 1295, -2269, 2783, -1973, 681, 9759, 3674, -1680, -12118, -1340, -2372, -288, 2143, -328, 809, 312, 2038, 736, -10, 908, -11319, 74, 6362, -1122, 1546, -184, 1630, -1851, -2143, 1048, 8858, -462, -6458, -1540, 811, 130, -3542, -10424, 9353, 388, 1168, -1797, 1796, -4151, -2329, 1, -272, 846, -642, 248, -1144, -9863, -1684, -190, 7611, -4147, -692, 5354, -2363, 609, -4926, 3166, 2094, 857, -369, 118, 725, -899, -601, -6, 556, -32540, 950, -478, 757, 136, -560, -754, 562, -448, 223, -704, 616, 365, 22610, 1191, -1264, -94, 927, -294, -1270, -16, -2520, -2026, 420, -6621, -504, 9666, 452, -379, -1888, 536, 1161, -3021, 609, -4890, -231, 3926, -943, 32767, -615, 300, 870, -742, 429, -42, 155, 1060, -900, -347, 34, 491, -3625, -1529, 10175, 178, -7938, -406, 1628, -362, -7340, -433, 489, 568, 674, 536, -2886, -6118, -16, -531, 182, 1498, -4194, -306, 63, -1429, 1109, 631, -10386, -16, -5938, -52, 10638, -793, 35, -874, 1633, -252, 709, -286, -780, 17172, -32, 912, 137, -1684, 2781, -5637, 338, 10961, 1401, 176, 1890, 563, -371, 578, -235, -882, -616, -591, -672, -821, 12194, 917, 778, -427, 358, -1411, 2032, -1372, 1891, -1784, 1830, -1808, -464, 13973, 2016, 8606, -914, -7329, -1853, -2627, 2219, 2628, 2161, 2185, 2414, 8857, -273, 1016, 2253, 1070, 907, 367, -430, 574, 1039, 93, 170, 255, -267, 550, -668, 287, 1827, 19833, 244, -3731, 4, 21365, -127, 356, 643, 2016, 3290, 1242, 46, -734, -2298, -316, -6618, -296, -1465, 657, -1451, 469, 212, 2823, -2803, -11862, 931, 44, 660, 1576, 1848, -10529, 2813, -1163, -260, -195, 16320, -3447, -262, -76, -439, -3487, 1292, 3330, -616, 1477, 1900, 8843, 81, -846, 6845, -95, -112, -231, 129, 6982, 165, -3115, 2456, 2032, 12201, 2747, 1691, -728, -1935, -239, 968, 15578, -2260, -1813, 440, 2188, -3845, 1278, -136, -1388, -7850, -462, 2921, -1740, -136, 164, 103, -206, 32767, -459, 1249, 736, -590, -797, 628, 612, 327, 396, 552, -128, -76, -258, -557, 429, 170, 532, -347, 169, -593, 28319, -633, -1339, -997, -258, 324, 628, 3254, -1118, 8902, -63, 4762, -2820, 2429, 820, -46, -5366, -2193, -9005, -1304, -597, -10143, 555, -3000, 294, -1577, -871, -140, 726, 3700, -2226, 903, -253, 10330, -2946, 656, 725, -668, -920, 1653, 1312, 1623, -1150, -11970, 2157, 4532, -340, -8648, -616, -1429, -980, -30, 647, -474, 442, 5098, 188, -1258, -8172, -10927, -4207, -112, 2501, -3241, -1949, 159, -525, 1090, 420, 10418, -11897, 1072, -78, -1028, 3367, -2647, 3421, 2021, 2358, -973, 272, 27911, 472, -402, 1397, -927, -1032, -1274, 848, 221, -2745, -710, -692, -409, 1922, 142, 594, 1053, -11350, -791, 3767, 1569, 541, 11921, 134, 368, -948, -2689, 896, -1193, 1190, 8514, 1436, 1017, 599, -3358, 4002, 12936, -675, 1044, -1210, 296, -1109, 1992, 1282, 774, -13102, -608, -11145, 134, 278, 944, -888, 350, -1574, 189, -2542, 3476, -3018, 3368, -9304, -1839, 533, -2, 1057, 686, -11819, -1146, -973, 1594, 3526, -2890, -1528, 3489, -475, -259, -9610, -475, -984, -3559, -742, 408, -130, -2291, 899, 12177, -1934, -162, -3238, 1610 }, .cb2220l0 = { -12528, 350, 1782, -474, 1439, -14269, -8, -1782, -753, -1720, 167, -440, -2706, 222, -1629, -288, 671, -111, 10270, 878, 152, 330, -1000, 639, -1280, 1111, -2072, 1439, -476, 553, -2974, -614, 1666, -466, 11811, -1393, 154, 624, -697, 176, 1108, 504, 250, 572, -6, 704, 16, 647, -1143, -1407, -411, 23745, 319, -189, -404, -641, -86, -707, -770, -302, 13, -398, 76, -681, -525, 354, -1225, -757, -23170, -484, -965, -430, 13477, 898, 505, -17, 13812, -890, 357, 662, 1000, -935, -60, 944, 400, -432, -221, 1047, 307, -180, 5260, 16509, 650, -269, 1563, -6002, -3082, 186, -3334, -5770, 1010, -394, 128, -699, 537, -27, 1014, -531, -50, -163, -1664, -1026, 732, -1296, 21856, 574, 416, -745, -443, -1382, 272, 791, 1308, -308, -1636, 168, -10922, 119, -1190, 1123, 1492, 1706, 1076, -2016, 3270, -994, 876, -2316, -2992, 12625, -412, -159, 5249, 1424, -304, 557, -431, -360, -340, -561, -292, 1748, -224, 1789, -352, 386, 136, 76, 1309, -270, -24204, -515, 1142, 2119, 1144, -173, 1008, -693, -430, -1052, 1890, -12483, -11416, 2918, 1591, -1202, -1782, -1335, 1354, 1703, -510, 4287, -854, 1153, 2018, -518, -960, 11825, 1295, -563, 11252, 190, 4078, 222, -3115, 3306, 747, 2638, 1015, -1674, 8032, -2386, 573, -349, -832, 96, 9564, 11708, -483, 1326, 1804, -2903, -2024, -234, 1009, 3229, -232, 803, 275, 444, -629, -192, 381, -1289, -109, -29019, 270, -420, -408, -466, 113, -537, -266, 296, 180, 506, 1015, -565, -517, 1494, -11053, 3968, -1735, 3474, -1991, -8326, 8075, 1740, -3995, -1287, -2558, 1030, 3742, -618, -2600, -1783, 2696, 1480, 1054, 341, 3762, 4225, -1742, -11582, 4348, -8756, 493, -404, 3840, -1049, -683, -962, 163, 10997, -97, -848, -4632, 2794, 2684, 2540, 739, 8534, 3688, -878, 3138, 2576, 6444, 3674, -2371, -218, 2864, 12270, 2866, 189, 4549, 4894, -6378, -1050, -3166, -5897, 2245, 2803, -70, -1909, 2783, 3951, 153, 11221, -658, 12780, -238, 3418, -2235, 754, 311, -739, -2414, 702, 1076, 303, -320, 47, -3288, -234, -1376, 3022, -103, -1780, 716, 11886, 10942, -5402, -5431, 1196, -624, -885, -652, -3248, 74, -435, -686, 154, 8675, 3325, -1779, -341, 564, -901, 1335, -639, 3494, -1820, 290, -92, -3088, 4775, -2140, 2334, 710, 10536, -15042, 14823, -1082, -1045, 1008, 734, 241, -1048, -933, 245, 913, 114, 322, -1798, 246, 1067, 348, 408, -183, -728, -12915, 685, 1525, 1694, 183, -168, 12703, -1268, 1613, -2072, 1546, 743, 2356, 2135, -550, -153, 1327, 2, 12487, -3111, 2347, -1722, -300, -193, 2222, -1928, -658, -384, -5738, -1141, 3634, 10312, -69, -1549, 10879, 1795, -361, -1838, 143, 1202, 327, -15549, -1268, -194, 3284, -12, -344, -2042, 1663, 334, -798, -873, 1736, -324, 195, -417, -382, -22936, 812, -478, -962, -451, 730, 382, -135, 1311, -290, 122, 148, -775, -305, -32218, -84, 98, 374, 369, -44, 923, -432, 156, -1471, 236, -39, 143, -146, 835, 135, 229, -297, 1690, 6786, -12169, 815, -176, 1868, -9, -3052, 108, 114, 260, 11337, -2689, -132, 765, -239, 54, 691, -9737, -627, -474, 12212, 2222, -7595, -239, 1793, 2115, 563, -2390, -1991, 2906, 675, 923, 146, -3605, 981, -1725, 92, -562, -21192, 304, -450, -323, -889, -726, 688, -1186, 2590, 466, 326, -734, 308, -782, -3219, 963, 454, 1348, -513, 953, -1414, -320, 1012, -1148, 1185, -17356, -15, 1546, 1346, 2182, -2457, 1426, -1690, 155, 8793, 1394, 510, 2608, -203, 2697, 608, 2612, -13542, 177, 4642, -824, 1877, -1864, 1681, -1033, 1487, -749, 356, -11, -1, -366, -215, 1531, -38, -922, -378, -296, 1245, 19967, -2389, -459, -3729, -163, 6578, 354, -1471, 195, 353, 1831, -605, -2291, -359, 947, 8409, 3454, 12416, 2434, 3485, 40, 350, 1640, 738, -9827, 935, -171, -944, 1407, -399, 571, 2805, -13108, 784, 678, 2405, 328, -417, 1188, -1596, -649, -1358, -1130, 341, 202, -2459, 11307, -2250, -3518, -1812, 3338, -924, 10027, 3004, 703, -184, -666, 223, -1644, -7221, 3507, 10108, 1324, -412, -371, -92, 2496, 3182, 10, 10269, -998, -1010, 610, 3296, -1842, 407, 406, -1609, -181, 2202, -662, -1450, 1360, 1488, -212, 1501, -214, -555, 168, 275, 301, -950, 3272, -323, 20632, -21, -1729, 11013, 2149, -9278, 6735, -593, -7374, -430, -2776, 2343, -1374, 519, -4876, 827, -2477, -1971, 1249, -23380, -1810, 199, -761, 2182, 1654, 447, -488, -1219, 364, -53, -382, -989, 154, -545, -872, 776, -211, 7706, -767, 8006, -138, 1989, -180, 306, 486, 1112, -648, -12, 1538, -300, 2458, -5833, -1181, -7680, -6700, -621, -308, -29995, 602, -24, 94, 752, 517, 86, -249, 1058, 704, -404, -387, 106, -632, -159, 1275, -197, -1263, -1210, -1689, -10488, 1950, -2037, 5974, -3960, 38, 1284, 2851, -2813, -1613, -1646, 10164, 138, -2956, 196, -118, -484, 860, 124, -262, 30, -1448, 128, 287, 327, 590, 27272, 391, -738, -1631, -481, -1511, 82, -574, -737, -614, -447, -80, 292, -19, 252, -2, -28117, 332, 141, 1485, -154, 1382, -1755, -422, -1692, -2144, 910, 1004, 1894, -1537, 897, -458, 19483, -1321, 2280, 622, 288, -2253, -1001, -976, -408, -394, 132, -250, -428, -22, 140, 287, -141, 30981, -293, 631, 729, -2, -231, -127, 377, -879, -294, -107, 253, -964, 1258, 570, 71, 9421, 8358, 9295, 8354, -546, 1153, -1807, 1577, 2911, -1808, 1808, -1631, -1348, -6977, -382, 1625, -2793, 10633, 1977, -1793, -12480, 1, 2010, 23, 423, 1102, -1920, -478, 1845, 1016, 465, 758, 800, -1540, 5448, -10472, -2749, -989, -6362, 9283, 373, -2560, -5478, -1618, 20, -564, -1074, 4075, -471, -515, 409, -2069, 359, -788, -11618, 2524, 917, 2757, 243, -3261, 6922, 6268, -3148, -2804, -3412, -4262, -1903, 1043, -12255, -162, 1598, 496, 454, 1401, -1635, -12711, -673, 3392, 1255, 1602, -1206, -297, -2066, 3009, 1149, 1285, -1307, 412, 27971, 183, 569, 1304, -706, 824, -635, -358, -340, -28, -1344, 955, 14, 676, -243, -20, -11947, 1350, 8122, 196, -10161, 4925, -3764, 1661, -401, 145, 253, 680, 718, -614, -613, 498, -293, -4257, -684, -14853, -10522, 698, 1537, -2016, 1162, -2684, -1578, 8, -238, -3214, -2749, -1577, -1187, 113, -1457, 1068, 590, 25, -644, 1000, 2430, -1612, 13246, -2684, -1642, -4648, 816, -1103, -7556, 5753, -3998, -1338, -776, -1958, -9652, -1288, -290, -4240, -2788, -8191, 1625, 2558, 1238, -1824, -39, -3129, -8916, -3302, -5632, -1768, 866, 708, 684, 3530, -8772, 1485, 677, -10398, 686, -852, -6974, 5286, -2658, 612, 1180, -3367, 4285, 5708, 1416, 166, 2787, -3697, 1431, 1648, 7942, -544, -1064, -514, -840, -870, 1246, -3582, -9310, -3802, 4025, -8251, 5978, 132, -619, 2792, 9786, 1244, 242, -1948, -4701, -5904, 951, 7486, -3494, 48, -4468, -2403, 6090, -2343, -4175, 1336, -2546, -281, -736, -1758, -1720, 11066, -918, -1354, 3885, -33, -4116, 1246, -218, -8082, -766, 7796, 1505, 1559, -964, 1741, -454, -1628, -762, 3034, -804, -888, -9682, 9603, -2556, 2874, -5456, 3066, -7747, 956, -660, -1538, -381, -760, 1747, 547, -517, -697, -1411, 410, -514, -3988, -219, 13358, -2393, -280, 11230, 2640, 795, 2534, -8094, -1838, 71, 16, -203, 4224, -96, -2829, 2010, 1961, -1312, -1266, 3952, 6894, 6996, -8062, 4708, 1193, -3439, 1549, 935, 170, 614, -868, 43, -246, -188, -940, 130, 126, -736, 697, -510, -56, 1596 }, .cb2220l1 = { -13582, 1049, 15596, -101, 707, 2677, 542, -522, -636, 194, -2361, -1252, 524, -32, 227, -419, -652, -601, 84, -10428, -1417, 13117, -573, 3774, -3632, 2025, -1237, -692, -1486, 192, 1221, 452, 436, -764, -2636, -153, -685, 118, -424, -635, -458, 209, -577, -12042, 4240, -10861, 49, -1534, -991, -2416, -280, 2095, -1841, 1278, -94, -423, -572, -949, 734, -1087, 12449, 6514, -4582, -7845, -3722, 1446, 2531, -1238, -2070, 1515, -1331, 2382, 1066, -1298, -1189, 6811, -1868, -1082, -1732, 356, -2622, 493, -3345, 1367, 1737, 4497, -14734, -1350, -354, -1340, 8478, -1152, 1832, 1793, 830, 974, -13918, 522, -1472, -2502, -2625, -157, -360, -17, -830, 673, 36, -1339, -14860, 522, -13377, 851, 937, -1103, -44, 408, -364, -953, -392, 1837, 2342, 1236, 111, -218, -919, 985, 10077, -1065, 1840, -124, 3780, -11015, 204, 437, -830, 6712, -1720, 288, -991, 1094, 5647, -1296, -2284, 1642, 1000, -35, -115, 208, -244, -1099, -832, -2092, 802, -163, 3343, -964, 314, 126, -1204, 754, -17838, -826, 4414, 8331, -770, 1246, -3500, 1680, 833, -108, 494, -910, -6314, -2832, 2553, -6230, 1165, 3631, -1717, 2404, -32768, 520, -38, 1228, -708, 58, 260, 771, 588, -448, 389, 156, 606, -830, 400, -488, -188, 536, -1428, 11982, -156, -1407, 1796, 1036, 905, 1371, -1472, 325, 3098, -1436, 6449, 2105, -11183, 1632, -1848, 1019, 1247, 1308, -1351, -823, 1679, -651, 978, 296, 1088, 3965, -1414, -11838, 139, 8664, -3452, -1804, 3088, -2044, -221, -1347, 1232, -909, -1323, -1409, 1399, 2557, 14552, 1535, -5088, 1699, 1012, 3333, 3940, 2294, 1189, -2256, -484, -3307, -1333, 464, -305, -744, -24, -20464, 332, 2968, 308, -649, 292, -402, 1226, -2575, -1505, -100, 1413, 733, -1024, 616, -121, -322, 67, -161, -708, 251, 462, -26697, -1112, -1381, -324, -286, 1091, 662, 15830, 13124, -1049, -1816, -355, 1848, -801, -1710, 2513, 458, -798, 386, -726, -356, -1240, -1133, -388, 631, 91, 1867, 2511, -306, 3097, 14399, -571, 2191, -2916, 2850, 761, -2442, 698, -2193, -2739, -1914, -4077, -4631, 12702, 333, 1162, -6248, -12466, -310, -107, -2465, -163, 1970, -998, -1253, 2007, 79, -426, -276, 365, 568, -520, 23642, 276, 1059, 184, 1081, 650, 2286, -191, 883, -1946, 246, 64, -225, 800, 910, -136, 1187, 955, -15604, 12847, -747, 874, 506, -646, 1920, -449, -321, 1152, 1341, 1653, 341, -32, 907, 673, 1045, 1245, -499, -10331, 4683, -1121, -3164, 3382, 6397, -1341, -769, 1186, 229, -1354, -7370, 155, 1858, 5617, -3487, -247, -783, 724, 508, 14029, -528, 1853, 1572, 580, -708, 528, -1394, 8922, 2284, 550, 3084, -1726, -3235, -700, 7132, -3540, -200, 3288, -815, -2189, 1232, 2412, 2088, -1101, 12592, 806, 1508, 1741, 13, 1124, -3883, -687, -8180, -3094, -3346, 1781, 11836, -657, -3469, 1429, -1822, -3433, 87, 3871, 651, -965, -1757, 6778, 109, 112, -131, 710, 11943, -12107, -3460, -726, 1002, -3803, 580, 2756, -1293, 116, 457, 581, 3834, -1678, -977, 1242, -2040, 232, -10034, 1644, -2290, 1368, 172, -3012, 1423, -2620, 3608, -10831, -303, -1610, 3246, 562, 5212, 448, -877, 954, 688, -8981, 579, 717, 1315, -952, 6817, 662, 3218, 7213, -2116, 10446, 1012, 2270, -858, 10, -1066, 10618, 6108, -547, 3221, -893, 3888, -1088, -10085, -247, 1064, -3500, 3123, -2480, -2128, 2788, -2253, -9756, -472, -166, -680, 727, 74, -14151, -189, -1734, 610, -1169, 845, 94, -786, 394, -581, 500, 1981, -10940, 354, 500, 399, -1952, -373, 2197, -4712, -2582, 2751, 654, 613, -1254, 1406, 2056, -12518, 1583, -582, 4834, -1541, 508, -20580, 270, 1214, 515, -1082, 5, 7, -533, -28, 1270, -1307, 497, -57, -331, 933, 92, -856, -10458, -4576, -9991, 2426, 6552, -3022, 279, -562, -192, 1878, -2237, 4978, -1753, 332, -1462, -853, 238, 478, 9746, -7385, -10290, -8278, 457, 3121, 841, 48, -3745, -1298, -637, -1820, -468, -248, 1400, 394, -125, -950, 11524, -1860, 426, -773, 12669, -1620, -158, 1625, 1045, 768, -66, -12, 1625, -770, 559, 54, 593, 14468, 14994, 490, 543, -811, 700, -277, 900, -178, -2000, 475, 241, 950, 106, -1260, 874, -862, 18907, -1947, -844, 205, 1253, -83, 1966, 2300, -2694, 852, 2450, 661, -334, -518, -1136, -2377, 325, 1152, 511, 881, -22205, 898, 574, -582, -265, -1362, -253, -40, -780, -1967, 469, 1484, -818, -926, 958, -415, -7934, -330, 330, 1439, 1643, 77, 1034, -156, -12094, 3782, -5725, -520, -598, 2345, 3506, 5333, -322, 99, -48, 1490, 20, 11393, 3468, -1144, 7013, -3728, 7145, 1432, 1810, 26, -912, -6530, -1079, 1771, 95, 4007, -11346, -43, 249, -14616, -249, 1, -725, 244, 1053, 1815, -626, 408, -344, 1972, 2222, 2288, -2324, -411, -3993, 494, -706, -5078, -11695, -3645, -2090, 2465, 5893, -5096, 6815, -537, 5003, 1258, 185, -1555, -875, -2047, -170, -433, -194, -1020, 349, -724, -31811, 197, 251, -418, -222, -618, 278, 554, 363, 183, -898, 14, 350, 745, -2054, -1623, 806, -770, -1246, 1594, -54, -18501, 1516, 840, -86, 484, 514, 1209, 978, 564, -537, 34, -431, 128, 938, -1807, 832, -90, -29509, -642, 1397, -52, 523, -393, 216, 908, 9, -63, 710, -949, 3, -184, 175, 613, -687, -408, 27, -855, 18258, 1282, -948, -219, 2374, 1668, -4567, 1063, -2045, 12026, 461, 3074, 1050, -1788, 169, -13442, 612, 19, -2019, 685, 452, -152, 299, 310, -2327, 348, -215, 1634, -201, 2162, -10300, 12452, -3733, -420, 2388, 518, -2308, -160, 1552, 3347, 1650, 3293, -1108, 2065, -12618, 20, -42, -643, 202, -1298, 251, 2489, 1322, 2362, 3698, -190, 592, -12484, -937, 2072, 1531, 302, -409, -899, -1016, -388, 1103, 30006, 789, -1609, -548, -1002, 1055, 605, -955, 1557, 452, -623, 810, 597, -696, 10628, -1174, 606, 2628, -553, -2297, 6668, -2600, 787, 3504, -3606, 4087, 1052, 6276, -7619, 337, 2565, -13, 1205, -124, 1222, -28082, -79, -553, 628, 542, 1315, -609, 322, -895, -377, -694, 610, 239, -152, -2901, 9890, 716, 1030, -3306, 988, -738, 562, -2209, -1676, 4507, 1165, -12924, 866, -154, 3664, -367, -2580, -7286, -572, 2167, 118, 508, -4429, -480, 842, 2489, -1636, -2042, 1125, 1847, 2586, -5639, 3361, -760, 11189, 623, -282, 1353, -279, 515, -816, 713, 322, 417, -2820, -1114, -1563, 401, -21604, -1300, -972, -2298, -483, 2176, -830, 2135, -4084, 1095, 1950, -1937, 539, -374, 3197, 682, 472, -1368, -8095, -12026, 4833, 5586, 467, 2400, 148, 381, -138, 954, -459, -724, 970, 156, -1955, -1363, 560, -761, -1708, -1599, -17408, -1064, -1372, -500, 1160, 735, 441, -773, -228, 420, -1128, 260, 930, 12879, -926, -231, 1355, -850, 559, 11377, -1729, 2478, 961, 336, 1056, 5081, 9788, -555, 4067, 8664, -2720, -1462, 3012, -7280, 965, 1462, -4703, 3649, 2084, -699, -262, 408, -188, 2193, -2216, -4509, -736, -1039, -4848, -8243, -7958, -172, -1318, 9566, 4665, 3363, -3672, 1581, -551, -2024, 1630, 1543, 90, -1728, -792, -1799, 2571, 80, -412, -301, -2870, 1796, -5327, 111, 17342, 592, -2108, 477, 1541, 1266, -1062, -215, -2210, 223, 1215, -197, 87, -18340, -67, 804, -398, -118, -3457, -741, -1935, -704, -274, 566, -872, -1821, 12874, 5057, 2069, 1742, -6205, -6115, -1614, -294, 187, -5210, 1734, -1466, -2162, -2266, -642, -148, 440, 2, 233, -319, -637, -734, -230, 301, 508, -433, 311, -313, -1206 }, .cb2220s0 = { -15119, 7508, 1337, 4182, -2914, -3733, 2686, -470, 2249, -3901, 1444, 3805, 99, -1771, -354, -903, -2755, -709, -4980, 214, -2750, -652, -1042, 1434, -1090, -612, -2574, 1274, 1310, -760, 1420, -112, 2776, -4843, 15060, -4929, -3942, -5721, -1628, -1142, 3023, -1435, 1402, 1010, 623, -3527, 2624, 184, 988, 98, 340, 16676, -1262, -1162, 3183, -4816, -592, 1019, -1406, -2478, 2371, -1004, 3944, 803, 5665, -2261, 16427, 349, 3113, -916, 442, -1754, -3551, -1351, 1563, -1316, 532, 343, -392, 1509, -717, -122, 2462, -929, -185, -683, -18780, 2682, -123, 518, -379, -5160, 245, 1940, 13964, -12311, 590, -30, 159, -1558, -1940, 36, -1528, -515, -1178, 856, -395, 29, -5854, -12943, 13286, -2572, 1049, 768, 3292, -3921, -52, -462, 1968, 4933, 630, 930, 1026, 2606, 319, -277, 6333, -2119, -4700, 2164, 1583, 154, 2107, -1467, 339, 634, -17240, -595, -3525, -2690, -1788, -476, -41, 165, -1016, -1456, -348, 11289, -2920, -3804, 2357, 12012, 3848, 1796, 2164, -5555, 4527, -201, 965, -4893, 3419, 6441, 1691, -77, 348, -769, 27319, -345, -336, -541, -320, 972, 926, -1026, 1052, 702, 224, 76, 742, 220, 6292, 8625, -3742, 4139, -5989, -5615, -641, -231, -837, 6156, 4141, 3792, 4746, 9972, 1800, -397, -2237, -2218, -7595, -2761, -496, -1451, 1178, -970, -1226, 2527, -2105, 1778, 1446, 1986, 9970, -13107, -985, -1142, -1367, -329, -4498, 590, 36, 2073, -1069, 862, 133, 2516, -27, 4494, -11602, -1638, 2524, 1449, 5684, -611, -9452, -2618, 5006, 3481, -639, 379, -2333, -498, -713, 382, 784, 269, -5692, -350, 524, -18705, -1042, -1349, 1210, 1770, 3964, 4908, -1131, 17535, -788, -1896, 30, -2682, 1044, 1604, -3740, 18, 1771, 331, 4279, 2634, -368, -447, -6995, -1224, -688, -5368, -236, -8872, 2449, -12189, 4465, 1895, 2484, 1315, -5446, -457, -575, 101, 2356, -1585, 3204, -104, -7244, -1678, -801, -2620, -4603, -11876, -1787, 2962, -1796, -3385, -411, 5796, 2900, -562, 835, 293, 7127, 4939, 721, -2972, -482, 121, -2694, -2277, 412, 12770, -342, 718, 3306, 502, -7281, -307, 552, 7158, 3289, -5051, 5230, -1185, 3024, -942, -1347, -283, -13937, -208, 2576, -906, 1848, 5692, -2434, 175, 7837, 1872, -4536, -3341, -957, 14787, -1598, 9058, 3776, 407, -1734, 1259, -3011, -131, -3589, -614, 272, -2968, -1611, 3645, -8126, 2120, 4868, -5462, -13235, -3452, -6077, 5064, -1593, -1395, -2427, -1139, -958, 1585, -1330, 2178, -778, 3545, 2836, 7712, 5993, -432, 3575, 929, -7951, 115, 2180, 3904, -193, 1556, -252, -913, 2574, 11948, -4525, 1391, -8513, 4540, -12815, -3379, -4676, 1838, -5676, 1321, -6168, 1397, 1020, 438, -141, 3424, 392, -512, -1614, -1396, -318, -2451, 1545, -7132, -1763, -424, 3575, -828, 19216, 1978, 1624, -1969, -1667, -772, -2031, -781, 1732, 244, -212, 416, 900, -8960, 1002, -1077, 4667, -3527, 1586, -13109, -2442, 3829, 4358, 1056, 2960, -1087, -662, 4775, -6316, 6157, -3736, -2040, -187, 904, 1254, -636, 2032, -734, -1271, -2691, 3376, 564, -7769, -5482, 840, 14171, -5828, -966, 1685, -10192, -388, -434, 3706, 594, 2188, 365, 209, 298, 1825, -236, 12762, 1644, 3199, -468, 12876, 130, -2169, -3406, -3571, -4655, 2339, 10757, 1292, 2920, 289, -314, -591, -1631, -1778, -1296, -254, 469, -9408, 1154, 334, -4, -1922, 2787, 317, 416, -1703, 14075, 1601, 638, -2260, -973, -824, 2816, -2954, 3282, -3716, -882, -3447, 3058, -6701, 1233, 177, 3579, 3508, -3539, -10511, 7507, 7608, -1928, 2482, -719, 2278, 5167, 9828, 10572, -3635, -2750, 3407, -116, 3343, -3432, -3375, 982, 903, -3239, -444, -1574, -333, 9613, -1914, -532, 1879, -78, -17944, -7029, 1586, -3122, 360, -401, 1219, -2086, 3066, 878, 5780, -948, 102, 1952, 418, -416, 1002, 1380, 1297, -92, -640, -555, -1159, -28517, -1757, -696, 124, -618, 1590, 300, -598, 924, -190, -1734, -4196, -5345, -14068, 5971, 8293, -3878, -1448, -1777, -174, 921, -1555, -866, 560, 232, -1914, -4002, -772, 1960, -4945, 3424, 6492, 3675, -800, 5346, 4404, -639, 10697, 1631, -1446, -4469, -7804, 3721, 4824, -620, 1099, -2956, 5175, -2453, -4894, 2562, -1842, 4940, 1391, 2818, 1095, -4285, 6469, -1966, -14564, -2232, 592, 5570, -2682, 2651, 4678, -7444, -2387, 6812, -12757, -5664, -42, 134, -2861, -1780, -158, 1410, -4990, 673, 2083, -2639, 3019, -2, 8305, -1981, -2114, -54, 2892, 1659, -14913, -74, -1092, -1187, 2465, -2218, 791, -608, 3077, 26, -1096, -1692, 3234, -7116, -1835, -5244, 398, 10137, 698, 2298, 498, 7060, 6430, 1393, 2540, 487, -1534, -1926, -5139, 3425, 4533, 5067, -535, -924, 938, -1799, 16120, 2037, -3727, -821, 2986, 2314, -223, 1358, 9, 2697, -1806, -940, -3630, -1843, -2776, -2246, 580, -1678, 2427, 2126, -1935, 2956, 849, 18234, 638, 342, 1036, 249, -24, 2713, -1973, -134, -4469, -2014, -6162, -19776, 703, -50, 2295, -2294, 1971, 1179, 1014, 2374, -1480, 1513, 630, 1542, 24716, 3534, 2926, 662, -2886, -521, -348, 402, 1112, -371, 1587, 1822, 1880, 1284, 302, 1873, 1284, -924, 6420, 4650, 7986, 427, 361, -8276, 304, -11911, -1305, -2018, 189, 258, 839, -942, 479, -3162, -1195, -1138, 1560, -1850, -5304, -10132, -10533, -1301, -3147, -680, 56, 4260, -6867, -1350, -1094, -1385, 1831, -2, -941, 3740, 7701, -855, 3304, 3444, -4467, 269, -4092, 588, 13957, -1566, -3561, 1936, 2816, 2982, 1804, 2710, 419, 685, 4468, 488, -9520, -2738, 3974, -9978, -1681, -2418, 2340, -717, -899, -2855, -10470, 1030, -2346, -5555, 2559, 2180, -5324, 1832, 10294, 342, 11318, -2376, -3904, -1524, -3806, 1078, -1896, 7199, -3522, 1364, 2291, -911, -156, -4327, -778, -30451, -577, -158, 560, 2749, 799, 2689, 337, -301, -1218, 1243, 687, -880, -419, 40, -280, 4, 1834, 9908, 1953, 408, 1080, 8777, 3861, 552, -6906, -3546, -6666, 35, -1903, 4788, 5080, 2865, -233, 1031, -4519, -13752, -2417, -1742, -7389, 3191, -626, -411, -7351, 3063, -1801, -4377, -2974, -124, 2778, 2733, 349, -1191, -6528, -1699, 6907, 239, -2765, -5706, 3627, 2096, -20, 2285, 7164, 3523, -11582, 3616, -614, 6266, -285, 3643, 1506, 3665, 1261, -2338, 418, -5062, 4893, 2945, 1923, -2990, -4531, -8858, 2769, -5029, 2202, 3337, 10703, 716, 5614, -14982, -2366, -5415, 25, -1665, 4353, 3060, -2159, 1005, -1587, -368, -949, -2788, 1063, 1307, -59, -46, -6337, 500, -1194, 2914, 2372, -1393, -1914, 3820, -1160, -135, 3777, -14151, 5208, -2290, 5738, 1018, 385, 1883, -2626, -9289, 1082, 1558, -1756, 2720, -519, -13050, -3672, 1759, -13, 3471, 4071, -5977, 167, -4210, 2219, 1344, -2412, 4497, -6946, 660, 8774, -3141, 6080, -4478, 2520, -609, -3080, -741, 7864, 7428, -333, 1154, -1849, 1478, 460, -338, -6651, -2480, 1692, 2104, 1642, 2720, 1017, 2759, -1822, -2668, -2265, -1019, -8926, 1487, 733, -15128, 5543, -4214, -7044, 666, 7108, 2222, -2454, 4995, 5108, 1481, 2242, 5743, -487, 9669, 295, 3539, 4836, 487, -1541, 824, -5946, 6692, -368, -1390, -6103, 4545, 2671, -12272, 3160, 760, -2080, 3523, -2752, -2940, -718, 2202, -5523, 2346, -5580, -5007, 6212, -5406, -11348, 1272, 5389, 2331, 3691, -1184, -3585, -4500, -603, -38, -5285, -531, 4844, -3850, 3944, -6525, -5723, -2313, -985, 879, 578, -3217, -3600, -2814, 1432, 11568, -1461, -1761, -4110, -4104, -103, -1803, 5195, -1477, 1348, 107, 3902, 1215, 3522, -3404, 9098, -237, 68, 34, -2524, -12040, -6183, 2122, 470, -1257, 346, -232, -1725, 5913, -1525, -5873, 1846, -11368, 1043, -1027, 4201, -3864, -4294, 7756, 1847, -3688 }, .cb2220s1 = { 32767, -2256, 16, 2156, 267, 1128, 1394, -1936, -488, -405, -345, 1068, 578, 1504, -1192, -405, 292, 1149, 4243, 152, 1036, 1782, 2655, -23349, -1100, -1933, 354, 966, -1554, 1173, -1186, 495, 618, 1009, -2715, 461, 5974, 939, 3552, 1325, 3385, -956, 2177, 2101, -145, -1000, 2326, 2466, 2822, 15822, -581, -713, 4398, 828, -3249, -3942, 1990, -862, 2272, 348, -2972, 241, -2678, -1881, -22307, 417, -587, 312, 280, -2524, 2380, 299, 3931, 178, 2910, -2544, -356, -786, 546, -73, -862, 240, -1653, 1286, -3875, -2072, -1477, 16800, -1148, 2099, 3216, 5174, 2177, 3042, -796, 414, -506, 883, 1837, 1451, 2864, 850, 2395, -414, 3254, -1937, -16379, -3976, 2178, -1473, 4759, -832, 8890, 3324, -3053, -407, -1530, -431, -1220, 128, -3472, 980, 52, -14716, 1732, 1931, -6518, -1784, -11113, 4466, -24, -8559, 105, 5478, -4116, -2213, -3006, 1738, -4189, 3310, -753, 1869, 580, -885, 3089, 8146, -4990, -1825, -524, 3620, -6920, 621, -1064, 4633, -1509, 80, -10949, -2752, 476, -3684, 3547, -1967, 3364, 2887, -729, 7921, -4216, -3681, -14417, -3978, 261, -1146, -1124, -901, 777, 783, -2, -989, -1582, 3988, 7785, -6371, -2258, 3344, 354, 13289, 3339, 316, -3186, -2088, -1951, 310, -545, -704, -40, 4416, -392, -1033, 5650, 99, -3008, -3716, 2448, -3758, 9463, -1793, -130, 1705, 6501, -2214, 2970, -10476, 564, -5952, -541, 2077, -90, 6588, -2858, -1733, -9247, -345, -3170, 4986, 3353, -4868, 8873, 113, -5223, 1562, -163, -2446, -4459, -8052, 1106, -10883, 1185, -1756, -152, 3109, 181, -1427, 8291, 11419, -6265, 2116, -469, 5150, 1355, 182, -740, 779, -7754, 1868, 144, 3936, -60, -784, -231, 879, 17032, -2273, 1886, -538, 1015, 1798, -633, 1090, 1910, 128, -6094, -1946, -1570, -727, -18457, 498, 784, -4419, 1656, -21, 154, 2430, 3815, -41, -2708, -1594, 228, -784, 7284, -452, -7634, -12868, 3564, 5473, -1244, 2231, 28, 4321, -1464, 1402, -1358, 2241, 656, -1128, 1160, -2352, 3641, -680, 1816, 6864, -42, 1269, -280, -1265, -2048, 238, -653, 13571, 3874, -269, 7977, 2238, -1246, -2066, 4741, 1706, 3498, 595, 2559, 55, 593, 1681, 1612, 43, -2756, 2702, 2439, -2471, -809, 1890, 17032, -787, -4280, 1167, -1926, -4973, -1181, -2764, -4151, 2962, 3444, 844, 2446, 14013, 3326, -1195, -1829, 1588, 1765, -3140, 8562, -14425, 4040, 2003, -738, -1032, -3314, -2236, 548, 768, -2348, 436, 1755, 31, -4616, 1259, 269, 1543, -1393, 5338, -16463, 2900, -2480, 1659, 217, -5864, 3878, 5268, 1244, -520, -1202, 1238, 182, -1049, -695, -320, -6832, -5904, 2914, -2616, 2586, -10958, -3258, -1846, -4633, 2371, 3251, -3583, 2631, -4162, 3035, 2718, 616, 2890, 206, 16128, 979, 3551, -6864, -3221, 5881, 3692, 1718, 234, -2844, 1668, 102, 2687, -838, 988, 1116, 533, 4026, -7235, 5972, -13781, -3394, -3518, -294, -6383, 1675, 4507, 5444, 385, -1931, 930, 699, 1639, 415, 6720, 7854, 1514, 3192, -2253, -14786, -1307, 871, 1329, 1881, 6628, 2851, -85, -2284, -4538, -837, -2232, 269, -2227, 13930, -2063, -7540, 8978, 1195, 2717, -1282, -972, 1305, 3864, 2412, 2308, -4824, -3282, -864, -489, -1458, 2192, 15903, 2460, 2792, -4137, 1034, -359, 5, 2297, -6, -3859, 478, -1535, 2080, -741, 2030, -603, -2640, -1902, -8208, 3818, -1273, -8138, 2015, 9169, -3440, -1779, 4076, -576, -93, -1718, 744, 2563, 6744, -3841, 1355, 1590, -4196, -13924, 356, 13381, 2552, -2862, 2790, -578, 3562, 2711, -686, -3783, -489, 1230, 896, 1208, -1101, -3482, -2478, 772, 1254, 320, -1825, -327, 1070, -1712, 295, -18141, -2618, 1537, -603, 3782, -1272, -1901, 414, 169, -6574, -6966, 2711, -3292, 13204, -1324, 3620, 4962, 2835, 4177, 4861, -2378, -5534, 3701, -4224, -631, -3199, -653, 4785, -1045, -2097, 580, 2190, -140, 48, 3075, -1346, -810, 2016, 566, -2543, 235, -5930, 1956, 481, 19003, -3938, 6489, 2697, 4796, 3435, 7102, 3062, 1460, -5814, 2723, 4181, -4979, -2534, -2058, -136, 3554, -2684, 15252, 4112, -3146, 2812, 7182, -2642, 5443, -1043, -803, 2786, -1622, 1988, -780, 1482, -13015, -1762, -1377, -4005, 161, -9568, 8166, 1832, 330, -6484, 945, -4388, 1090, -524, 1556, -582, 320, 770, -938, -8757, 977, 1084, -7062, 3552, 775, -4708, -2281, -552, -10027, 4263, 1197, -672, -93, 5716, -3825, -4526, 1781, 9799, 4450, 1981, -3149, -9664, 3119, 3794, -91, 6710, 840, -1098, 11310, -2933, 785, -2573, 748, 1803, -1401, -1547, -4118, 849, -580, -1404, 1536, -9382, -1610, 2335, 403, -2939, -3015, -3753, -7593, 1640, 3346, -2594, -8028, 5485, 2189, -3369, 2106, 5369, -2573, -515, 1459, 6996, 1344, -389, -7009, 10332, -840, -3869, 901, -6449, -2348, -2461, -4103, -810, -2060, 1040, 117, 32241, -231, 945, 999, -1183, 180, 1443, 188, 855, -1634, 774, -202, 99, 1714, 286, -849, 1968, -9743, -15458, -859, -3726, 2257, 355, -167, -1674, 1808, -488, 1118, -1416, -1685, 2928, 1471, -1145, -536, 2307, -972, -1191, 1625, -1436, 378, 20178, -638, 1826, 472, -300, -845, -1045, 1074, -1041, -510, -39, 516, 4548, 2741, -10197, -2336, 3828, 2093, -4148, -9138, 4239, 2520, -3536, -3807, 2998, -2226, -6898, 4838, 2552, -2024, -5579, 1370, 11706, -7626, 1566, 989, -4934, -1345, -5962, 4259, 1158, -3712, -2710, -1037, 105, -2733, 1068, 3682, 3904, 2044, 184, 537, -3438, -1376, 332, 17812, -3170, 2386, -2090, 3481, -1352, 431, -1016, -1062, -564, -1752, -2602, 1299, 6720, 789, 1275, -9801, 5320, 2327, -4048, 4443, -7820, 1112, 1232, -1139, -920, -744, -845, -3754, 5958, -5388, 3336, -3578, -4027, 688, -7043, -136, -163, -1395, 13400, 1729, -1862, 2612, 321, -3874, 947, -990, -3164, 11487, 46, -1978, -2139, 1222, 3897, -9664, 3692, 5431, -3364, -3706, 180, -4009, 2563, -313, 3228, -1631, -9763, -9184, -6058, -4594, 1040, -3323, 321, -3233, 5035, -1919, -5525, 1899, 1196, -1834, -391, 549, -2114, -1436, -2624, 2441, 618, -27606, -841, -936, 1067, 1157, 230, 784, -755, 1798, -219, -1026, -1119, 320, -2611, -1382, 8776, 1151, 3739, -607, 2997, -7704, -5870, 1800, 1357, 4973, -9674, -5182, -50, -886, 2056, -802, -1909, 574, -1716, -6388, -2882, -3526, -3188, -543, 244, 9648, 5129, -5069, 598, -9049, 1834, -3375, 1369, 1461, -1295, -380, -274, 7258, -9353, -2401, 11915, -5087, 1505, 4211, -719, -902, 1762, -168, 642, 699, -2067, -933, 1092, -958, 715, -1978, -1968, -1613, -1263, -777, 1170, -9652, -9570, 612, -3935, 237, 386, 4237, -1468, -10172, -4964, 2919, -6428, -7184, 119, 3610, 59, 3168, -5474, -853, -5735, -1765, 3063, -1352, 944, -1934, -3500, 9282, 5920, 784, 90, 275, 3211, 2418, -8570, -10498, -2026, -1020, -2989, 1511, -41, -11462, -1980, 5296, 2614, -21, 770, -156, -2817, -4748, -8672, 3447, -7231, 4598, -1347, -689, -3198, 434, 56, -2065, 1798, 13761, -533, -1280, -796, 2481, 56, 1377, -5473, 9116, -1185, -602, 2547, -3693, -8880, 2978, 9093, 1829, 4844, -649, 316, -162, 1520, -5814, 4860, 199, -1330, -5182, -6269, 2642, 1220, 2816, -4098, -3981, -13264, -398, 361, 2768, -4786, 1023, -97, 655, -397, 2403, -1576, -386, -1112, 792, -1195, -759, 742, 729, -2916, -1020, 21350, -26, -3577, 659, -1263, 1378, -4339, 1880, 4842, -669, -1203, 5936, 816, -8356, 3660, 1673, -677, -2370, 1652, 8710, -1254, 6171, -6868, -891, -6752, -169, -5678, -7588, -3247, 2982, 5281, -4941, -359, -3354, 851, -1609, -11194, 610, 261, -1936, 2715, -3540, -2488, 2086, 6110, 914, -3224, 1777, -1558, 937, 3736, -3109, 1903, 4250, -4478, 2636, 2292, -1451, 10231, 7600 }, .cb2220m0 = { -26430, -533, 1599, 208, -293, 2303, 704, 1586, -1064, -1630, 690, 1697, 623, 1786, 332, 682, 199, 12695, 475, 1288, -2471, -797, -68, 9659, -816, -2465, 546, -1421, 1596, -926, 4471, 2360, 5551, -900, 297, 96, 400, 936, 1548, -1066, -1625, 652, 1416, -118, -525, 683, 1545, 1340, 20684, 936, -1033, -773, 8416, 954, -4822, 4223, -10815, -312, -896, 531, 3140, -1649, 508, 10294, -315, -2078, 584, 1523, 118, 997, -11837, -605, -262, -1732, -613, 12220, -2666, -1802, -507, -4410, -100, 2127, -114, -886, -2806, 500, 1034, -2811, 12642, 1015, -9193, -4201, 238, 1096, -1159, 1619, 2534, 1644, -3465, 4797, 639, 2583, -1316, -9884, 948, 1479, 1186, -1760, -343, -1286, -1653, 678, -7439, 4542, -6295, -1600, -6978, 48, 448, 369, 1597, -3696, -2121, 1002, 2428, -11368, 5385, 827, -10674, -2252, 2240, 1230, -3074, -1894, 296, -2216, 571, 114, -497, -1675, 1311, -2297, 1843, -350, -856, 2067, 1198, -588, 270, -470, 2640, 274, 19586, -762, -11471, -623, -506, 4236, -10981, -214, -1856, 409, -1276, -935, -1681, 5116, 774, 3008, 4388, -112, -9493, -1108, 1454, 1385, 1065, 519, -486, 308, -1141, 289, 1424, -3672, -15989, -3738, -1592, -258, 3304, 62, 1441, 45, -686, -1070, -1616, -701, 2313, 1918, 4843, 654, -16902, 263, 1837, -4062, 2727, -709, 1524, -1628, 2025, -281, 264, 1238, -1023, -11981, -2990, -1293, 801, -9606, -604, -210, 1248, 4014, 3652, -11286, -2094, -470, -1330, 14523, -2388, 1413, -3968, 641, 2936, 161, -1687, -1260, 1722, -1968, 364, -854, -14386, 10146, 792, 133, 1746, 261, 3345, -408, 2036, 272, 1412, 720, -3302, -1495, -4334, 2210, 799, 17546, 2600, 1314, 764, 1327, 3433, -377, 4296, 2402, -1074, 470, 7220, -2556, 3326, -4338, -2086, -1945, 11865, 3525, 1513, 1520, -1814, -13020, -929, -2001, -1496, 580, -3293, -3146, -2185, 1442, 390, -2026, -2141, -192, -18700, -2039, -4330, 1691, -250, 1451, -2913, 2832, -3284, 2899, 1529, -888, 486, -2381, -1459, -2663, 530, -717, -248, -1714, 12662, 1820, -11488, -1044, 3035, 3872, -2430, 679, 1075, 475, -593, 930, -1751, 405, -2308, 2148, 510, -2798, 445, -240, -6865, 2106, -11323, 670, 4342, 154, -7748, -1805, 5381, -842, -697, -709, 688, -498, 5525, 15212, -2006, -4146, -2452, 2392, -3522, -2023, 1306, 5522, 916, -3616, -287, -653, 333, -330, 4, -24886, 635, 119, -1949, 899, -36, -37, 2658, -133, 2064, -534, -549, -1745, -70, 32767, 1089, -869, 150, -599, -1146, -574, -424, 377, -648, -303, 590, 453, 1910, -351, 553, 304, -752, -752, -502, -42, -31211, -634, 449, 638, 1086, -1406, 1220, 802, -924, -1874, -212, 86, -200, -1140, 618, -621, -605, -10976, 1699, -603, 2056, -4448, -1519, 2564, -743, 12304, 1482, 547, -1589, -817, -217, -1633, -1089, -2270, 181, -634, 3890, 734, -1319, -2035, 3304, 13144, -9076, -4067, 70, 1309, 1067, -354, 1529, 1379, -1002, -3324, -525, -817, -1438, 10834, 1036, 12441, 1242, 2461, 2858, 2257, 430, -1177, 1142, -870, 844, 1102, 1208, -1482, 830, 17622, -2753, 6, 174, 4385, -339, 2157, -155, -68, -190, -1181, 29, -2046, -2140, 27, 949, 1889, 446, -54, 16696, -49, -3304, -1929, 1833, 3735, -495, -1225, -11743, -2259, 891, -1954, 2848, -504, 1164, -2489, 861, 579, -11547, 976, -42, 1477, -2428, -1561, 112, 74, -2721, 12046, 632, 1283, -1900, 1990, -1193, 1606, -1370, -2812, -1309, -1419, -12526, 3391, -4213, -2710, 269, 90, 14575, -345, 820, 6118, 892, 6302, -2825, 332, -3071, 2279, 3756, 185, -3029, 2402, 245, 1010, -273, -32751, -140, -600, 482, 1516, -462, 1931, 1941, 272, -310, 544, -422, -815, -1116, 803, -617, -1640, -4336, -11735, 3656, -1176, 1170, -6209, 2139, -1571, 2067, 1011, 9842, 790, 1702, -191, 911, 2771, -253, 794, -3862, -1885, -494, 2070, -2682, 772, 763, 4304, -15657, -2194, -1998, -963, -5222, -175, 238, 32, 10067, -692, 2824, -474, 3016, -11994, -51, 713, 2423, 2864, -338, 4838, -1095, 215, -13471, -2, 704, 752, -14654, 1396, 484, 564, -886, -775, -1099, 775, -1035, 1661, -1013, -1118, 449, 822, 14253, -13238, -1084, -1107, -1672, 996, 472, 2237, -440, 1186, 1200, -2112, -1388, -1093, -1902, 555, -328, -1493, -2034, 426, -2144, -388, -20028, 1285, 1122, 730, 1661, -1576, -2084, 2930, 337, -66, 1591, 8685, 2361, 146, 1370, 22, 1371, -105, -4190, 371, -13252, 328, 1301, -995, 3689, 6422, -79, -1407, -384, 828, 840, 854, 266, 1222, 796, -550, -729, -1213, -87, 524, 1070, 22334, -2333, 574, 680, -624, 463, 4047, -236, 114, 1020, -692, 1575, -320, -3229, 222, 520, 996, 2104, -5404, -18197, -1105, -184, -1057, 10712, -2509, -7140, -2307, 1333, 3041, 183, 1241, -7861, -3060, 1432, 9, -1431, -2605, 2663, 273, 250, 770, -740, 6699, -10929, -7227, 105, -2983, -1203, 1637, -6072, -6630, 933, -1526, 658, 2612, 5377, -91, -66, 4944, 3025, 2723, -869, 142, 10532, 9858, -207, 3072, -2610, 0, 81, 1078, 2136, -266, 223, 931, -385, 983, 1029, 108, 2290, -491, 26685, 565, -140, -662, 680, -2206, -803, -777, -250, -467, 98, 2944, -12296, -4190, -2254, -748, -2076, 4780, -510, -221, 1428, -6162, 2693, 6238, -4030, 266, 6540, 2502, 5147, -4649, 1804, -10514, -3413, 2503, 2143, -1924, -3811, 3674, 4341, -1054, -3130, -1260, -576, 887, 25908, -773, 1186, 548, -606, -744, -995, 1320, -507, 279, 1803, -2451, 880, -31, -5, 1615, 770, -11818, 1062, -1126, 472, -297, -12126, -1197, 1912, -962, 1241, 2348, 2332, -3047, 1561, 3844, 720, -387, 371, 2942, 1174, -2347, 1244, 10148, -1620, -11788, 1315, -31, -1867, 3450, -1589, 5180, 3184, -2614, -13, 130, 107, 297, 113, -1407, 29190, -544, -173, 990, 913, -1848, -990, 1230, 264, 1896, -6974, -102, -2232, 3826, -2269, -5027, 94, -12612, 436, -5979, 1757, 1757, -724, 2378, 2584, 728, -1022, -7274, 668, 744, -516, 420, -11866, 246, -1357, 2406, 3674, -2594, 1638, -3037, -2402, 1525, -7304, -1078, 1772, 9264, 12366, 202, 2, -728, 684, -437, 1446, -3546, 828, -2106, -2736, 964, -180, 6524, 2250, 514, -782, 675, 1418, -11225, 2760, -3970, -545, 9128, -6601, -556, -1966, -4625, -149, -198, -3330, -1575, -6198, 656, 674, 367, 1809, 155, -5126, 6109, -572, 4927, 1448, -1855, 1636, 8648, 2010, 8973, 3087, 10172, 34, -1183, -12, -1057, 192, -2955, 1034, -374, 2500, 9318, -4090, -5220, -404, -1022, -1458, -1367, 765, -1193, 1542, 302, -1337, -34, 1449, 1434, 2210, 404, -3277, -8024, 1363, -7591, 9096, -9179, 1176, -7311, 544, -8942, -713, -56, 2623, -35, 1623, 2212, 1733, -712, -1327, -320, -1966, 11352, -1276, -3804, -550, 520, -4848, 550, 1488, 944, 10756, -782, 5643, -2647, -6513, -3500, -2877, 1880, -6634, 2349, 256, 440, 188, -8428, -4580, 2479, 4763, -1807, -513, -4292, -1729, -6878, 448, -6706, -1162, 4938, -721, 5465, 1409, -8759, -898, -4254, -5230, -3886, -7969, 1730, 3656, 1198, 3537, 33, 4091, -2088, -7646, 1160, 2922, 855, -1254, -2616, -770, -685, -100, -577, -4927, -792, -2107, 9613, 2563, 5096, 6143, -3404, -8630, 4164 }, .cb2220m1 = { 32524, -324, 411, -34, -697, 818, -71, 2326, -142, -989, -1512, 358, -260, 3791, -575, 93, 224, 208, -1101, 32767, 1147, -203, 2015, 461, 668, -296, -3340, -38, 720, -993, 1765, -1344, 1323, 648, -997, 729, 581, 349, 861, -2035, 1791, -2142, -822, -1425, 820, -6555, -811, -15708, -912, 4835, 1500, -604, 527, -937, -640, -1240, 4692, 1259, 174, -12040, 450, 8196, 2796, -5123, 1595, 538, -101, -218, 5581, 367, -2700, 277, 2111, 2718, 1458, 155, -100, 3284, -498, 9961, -1505, -10336, -1170, 5337, 1032, -14947, 1154, -578, -11773, -945, -660, 669, 2340, -1038, 1520, 713, 2663, 422, -1242, 1918, -234, -1793, -1580, -271, -5628, -2010, -12209, -1784, -4417, -2804, -3123, -4316, 126, 6353, -2391, -2088, 836, -2550, 521, -1258, 918, 4471, -528, 4243, -615, 3453, -6683, 1784, 790, 13200, 700, 322, -815, 6049, -290, 928, -1121, -1531, -878, -1150, 1404, 325, -530, -435, -254, -804, -2536, 589, 8439, -1087, -16248, -637, -1528, 305, -1577, 642, -22699, -139, 1319, 588, -3079, 800, -597, -1408, -1150, 3145, -868, 3244, -1004, 1004, -1459, -11618, -4557, -3643, -914, 4238, -626, 4025, 3227, 537, -4285, 2010, 747, 1595, 1599, 5994, -797, -911, 2854, -3426, -8488, -1899, -301, -2146, -111, -522, -1852, 3075, -3864, -1531, 654, 193, -11264, 5561, 304, 525, 346, -2761, -1124, 1134, 8354, -12460, -1023, -7634, -2750, -1518, 5001, 1480, -1039, -502, 1455, 586, 1012, -1270, 12435, 895, 1169, 466, -10696, -3861, 4381, 1790, 767, -1808, -537, -1057, -2374, -2058, 9992, -858, -1568, -678, -3812, -1520, 1521, 230, -1716, 13418, -1930, -979, 3272, 1116, -4555, -559, -320, 12080, 13696, -286, 652, 2420, 1725, -277, 213, -1046, 1642, -576, -1514, -973, -1501, 77, 537, -606, 1144, -680, -568, 1104, 2176, -969, 1657, -784, 1107, -1056, -59, -5607, 64, 11913, -178, 8703, 3744, 276, -50, -12807, 1122, -6138, 1901, -439, 733, 6829, 3001, -61, -1005, 3816, 3987, -3588, -778, 2257, 12101, 196, 13796, 355, 1407, 989, 101, 1041, 988, 1274, -1478, -1127, 1320, -442, 3452, -1717, 1244, -466, -868, -323, 502, 1243, -70, 897, 958, 2781, -2492, 788, 744, -12324, 1111, -11704, -452, -734, 19574, -45, -584, -2387, -830, 603, 380, 787, -2962, 2046, 2524, -2403, 699, -4144, 1587, 573, 588, 238, -88, 31, -278, -32768, -1173, -745, 667, -188, 1221, -369, -261, 322, -2054, 651, 100, -2092, 315, 1558, 596, -407, -146, -1234, -30970, -71, 633, 536, -1345, 1819, 655, 680, -1453, 492, -1265, -1292, 1780, -68, 1008, 215, -19980, -521, -3148, -256, 193, 916, 453, 86, 116, 108, 1518, -1420, -1501, 688, 669, 1196, -1579, -942, 868, 804, 110, 1126, 202, 1086, 23516, 1070, -1623, 747, -38, -116, 1176, 554, -2361, 1008, 1085, 1972, -1794, -96, 464, -20910, -1208, -3857, -466, -2173, 2461, 2364, -931, -684, 3056, -719, -936, 887, -3149, 1004, 7085, -2985, -9393, 5142, -9621, 150, 174, 572, -2232, -390, 1356, 160, -10796, 2256, 2238, 242, 1663, 485, 12378, 1236, 688, -2908, 1084, 1047, 4850, -72, -642, 1604, 152, -850, 670, 968, -3207, 1690, 105, -2516, 11539, 390, -1117, -588, -10771, 2879, 4742, -8351, 1571, -850, -605, -1959, 395, 12324, 1750, 2290, -92, 774, -2897, 1025, -1841, 546, 3904, 3908, 11494, 9, 1340, -11976, -525, 1522, -43, -43, -1860, -6160, -199, 2479, 4593, -2876, -2985, 1044, -62, -812, 10424, -2489, -1098, 796, -1292, -2070, 1096, -1944, -2145, -4374, 1041, -1014, 9036, -2142, 328, -8232, 152, -13336, -2225, 13716, -367, -558, -1942, 161, -472, 2224, -748, 3550, -809, -493, 2121, 1234, 772, 5146, 2485, -2282, 7546, -1441, 1595, 9176, 6208, 1292, 1704, 3968, -1500, -1974, -3519, -2826, 149, -903, 504, -187, -940, 121, -215, -615, -257, -1954, 958, 2057, -191, 21258, -726, 2081, 1278, 1670, -854, 2730, -8132, -530, 1004, 2574, 1430, -2536, -10851, 1389, 155, -140, 2158, 2762, 3807, 3850, -3728, -954, -11366, 709, 14727, 514, 694, -87, 857, -249, -419, 617, -418, -1144, -32, -2182, -839, 1449, -1072, -785, -246, 13634, 12488, 358, -447, -2262, 926, 1023, -901, -345, 2260, -1530, -1466, -2973, -2170, 2090, 44, -23476, 603, -1740, -345, -438, -3004, 1322, -3088, 1274, 341, -348, -534, 1055, 3026, -932, 514, 8958, -15489, -374, 1077, 1166, 48, 1016, -918, -27, -410, -266, -1401, -3888, -2918, -2146, 2815, 1834, -875, 162, -678, 1876, -2033, 1999, -12854, -1563, 192, 414, 782, -3109, 1432, -4197, 2358, 8517, 784, 1256, -1362, 2938, -11355, -5184, -10314, -39, -2182, -1686, 241, -195, -232, -6169, 206, 181, -470, 1008, -599, -284, 733, -836, 648, -138, 2078, 313, 24432, 548, -441, 1446, -1628, -1218, -64, -716, -2456, 1987, -352, -1025, -1951, 1320, 350, 744, 2598, -984, -18328, 622, -4, -1572, 893, -3043, -4365, 127, -1, -226, -1696, 1332, -1360, 6756, 2596, 12059, 370, -3690, 497, 585, 1619, -778, 9174, -2046, 2214, 2004, 1133, 1069, 132, -250, -1555, -906, 561, -12904, -1039, -8006, 1876, 2300, -1116, 1895, 1782, 3734, -1108, 1338, -1409, -248, 16117, -1458, 156, -2626, 64, -1199, -3544, 4283, -3390, -404, 1426, -907, -2768, -780, -34, -18656, 2003, 515, 3171, -653, 762, -3352, -154, -1171, -452, -1590, -5936, 519, 1210, 502, -409, 2262, 695, 1028, 8652, 2532, -2636, 3472, -1186, 1350, -651, -639, 8382, -3234, 630, -10323, -2285, -1916, 826, -1449, -738, -344, 1022, -3248, -20921, -200, 568, -84, 777, -1570, -2756, 2834, 26, 3878, -1709, 101, 1433, -2238, 305, 61, -1041, 2399, 628, -1509, -388, 946, 733, -1538, -650, 19935, 478, -10696, 850, -682, 447, 2311, 35, -1258, 2332, -11417, 1743, -834, 660, 3170, 2378, -2734, -762, -1151, -1802, -9324, 4625, 2304, -1186, 1180, 4894, 662, -7067, 869, 613, 1802, 4839, 3412, -5460, -862, -4202, 7876, -1057, 2872, -1336, 1731, -10788, 1088, 3433, 42, -939, 2479, 6425, 991, -1621, 3222, -2464, 2988, -29, 481, 11606, -2800, -8315, 7660, -3385, 1217, -728, -3670, 684, -2295, -724, -567, -2150, -106, -1920, -2143, 3465, 1968, -1089, -11953, -2704, 3049, -1351, 7225, 5727, -525, 2639, 1955, 2259, 6489, -1867, 1544, -3199, -4992, 2420, 4119, -2860, -9505, -2152, 10204, -1133, -1201, -1468, -2989, 4658, 578, 1115, 368, 1570, -776, -503, 1554, 1329, -696, -760, 575, -1527, -3865, 8372, -3378, -8137, -8392, -3471, -1854, -4852, 5270, -634, 608, 1289, -7660, 4983, -1266, -2070, -906, 3291, 2459, 4807, -4241, 5773, -2258, -4500, 2634, -13176, 6412, 282, -5849, 294, -626, 888, -1088, 656, 192, -630, -3405, -12469, 2882, 2184, 3920, 2715, -6852, -1111, 869, -161, 341, 1856, -9450, 2719, -579, -3840, -8763, 1153, -3532, -571, -766, 8301, 2936, -10501, -1073, 10068, -2930, 6308, -2747, 3093, -1710, -3865, -1464, -4447, 446, 898, 5386, -1074, -4651, 6205, 455, -1773, -1270, 6986, -2493, 4076, 10605, -2522, 977, 4098, 1153, -434, 4071, -2890, 2920, 9175, 2276, 4699, 642, -1067, -968, 508, -1752, 728, 3260, -500, 1414, 5554, 2761, 1973, -4704, 2127, 1397, -1070, -14536 }, .cb2224l0 = { -12451, 389, 917, 1238, -626, -904, -1877, 2328, -12808, -1345, 406, 80, 383, -3841, 1188, -907, 2369, -13409, 11191, -2547, -532, 762, -1627, 680, -2305, -811, -1118, 3232, 3413, -2010, -453, -6816, -4100, 1643, 11209, 933, -2272, 1440, -2465, -6862, 186, 1563, -8468, -1832, -1166, -596, -326, 105, -115, -352, -624, 31621, 129, -301, -615, -313, -176, 620, -5, -1354, -3563, 678, -301, 621, 904, -769, -1314, -956, -2294, -362, 381, -2398, 17085, 100, 3962, -830, 18705, 237, -1296, 3534, 1452, 259, 1690, -3106, -3624, -316, -16, 5900, 2195, -1008, 14335, 14173, -1637, 1130, 1110, 499, -1516, 500, -720, -494, -1010, -1264, -773, 1389, 212, 8036, 780, 608, -415, 931, -301, -2186, 2256, -706, 12972, -3461, -3695, 2073, -2768, -1525, -7539, -441, -753, 4558, -8171, -1751, -6885, 4077, 6714, 53, 1090, -3006, 3688, -1162, -59, 302, 928, -450, 238, 10809, 353, 698, -476, 172, -2198, -4377, -7518, 1605, 6348, 5147, -165, 165, -463, -93, 1251, 671, 587, -402, -227, -462, -27960, 215, -56, -958, -657, 508, 98, -2811, -1443, 3076, 6218, -9760, -10465, -770, 345, 3076, -116, -2884, 2215, -2652, 1306, 2638, -124, -317, 366, 1461, -295, 5073, 460, 1920, 12216, -7032, 6816, 3037, -2630, -1087, -1315, 123, -582, -2137, 5061, 291, 1740, -214, 1920, -3470, 10895, 9491, 3558, -1256, -448, -10304, -2391, 1890, 484, 11057, 6636, 422, 2316, -1663, -348, 633, 1200, 1788, -1124, -24435, 140, 869, 738, 223, -1429, 602, 433, -196, -1127, -1937, -879, -310, -564, 1022, -4380, 7247, -3938, 4461, 2219, -8465, 9266, -4564, -3169, -3463, -477, 749, 2460, -776, 294, -171, 1072, 1748, 1000, -208, 1908, -998, -1898, -10485, 2360, -11950, -2412, -2609, 3885, -2738, 1348, -559, -1342, 9366, 1560, -816, 1178, 342, -175, 1286, 3014, 10641, 246, 3128, 6618, -305, 10906, 6359, -4395, 1415, 196, 11136, 1772, -3047, 3313, -1231, -1974, -3021, -1480, -1345, -830, 1551, 2521, -506, 7821, 7715, 5078, 8215, 2102, 1552, 2247, 3766, -3158, -1811, 631, 3980, -397, 9030, -1267, -1974, 1539, -360, -315, 796, -4749, 2076, -1017, 717, 2290, 11212, 9365, 1626, 379, 2060, 1329, 4, -25, -1348, 566, -1266, 1670, 2166, 13123, 42, 2416, -2170, -6380, 172, 316, 40, 300, -487, 402, -220, 846, -894, -1413, -2227, 1962, 19478, -14756, 14377, -582, -770, -186, -1008, -1520, -722, -885, 2622, 311, -753, 480, 539, -1011, -1748, -832, -603, -2015, 869, -14860, -600, 2110, 484, -5874, 1532, 3290, -222, -4670, -33, -794, -2061, -1185, -96, 337, 515, -1887, 26, 20283, -455, -799, -62, -1083, 236, -1721, -569, -1259, 361, 1090, -226, 1480, 13367, -638, 940, 3736, 6419, -5995, 830, -6599, 4549, 1583, -9001, 1104, -1281, -1270, -94, 1104, -2076, 652, 2263, 1465, -25, 9046, -8139, -2646, -13200, -534, -15244, -1448, -1390, 452, 584, -314, -1192, 951, 885, 396, 776, 1303, 1298, -448, -32641, -234, -62, 31, -164, -1042, -82, -26, -272, -559, -164, 669, -500, 516, 1347, 9615, 1123, -1346, -1898, 8341, -10583, 2286, -5233, 1503, 454, -2024, 4248, -2298, -2117, 13390, -849, 2078, 1096, -651, -12232, -374, -812, -3729, -829, -144, 1213, -469, 1112, 1146, 816, 818, -912, -967, 907, 12, 2443, -759, -1833, -174, -838, 488, -1560, -18242, -558, 5510, -1316, 1758, 3957, -7130, -1394, 4962, 3870, -1907, -9247, 2217, -3880, -4413, 1893, -3085, -202, 599, 1307, 1574, -1070, -2593, -2722, 9506, -10170, 1105, 4879, 2208, 38, 5596, -5990, -3205, 35, 9405, -219, 618, 1308, 353, 3457, 1712, 717, -12937, 25, 2176, -2590, -1223, 528, 1318, 4588, 7678, 5743, -8430, -4487, 1364, 8082, -1727, -387, 469, 3172, 401, -2771, 694, 14554, -2278, 3640, -11084, 924, -593, -3841, -4338, 227, 750, 2974, -2834, -1765, 2133, -1181, 5149, 11758, 11949, 3538, 2442, 2801, 1457, -822, -3419, -2468, 191, -646, -975, -1271, 832, 3088, -495, -10022, 1817, 1319, -880, 1342, -1448, -3597, -3310, 8753, -161, -6550, 1422, -640, -508, 11542, -277, -165, 837, 7389, -942, 11009, -97, 1548, 1418, -445, 2105, -946, -8676, 5274, 8842, 576, -1392, -1737, -1276, 5491, 312, 3624, 2806, 2157, -537, 1656, 1982, -1300, -146, 463, 496, 16792, -140, -1755, -832, -2123, -399, 5811, -702, 2891, -3630, -1843, 346, 508, -364, -498, -558, 32048, -744, 90, -372, 430, 704, 871, 139, 772, 696, -108, -18, 310, -411, -798, 465, -165, -321, 745, -27861, -752, 499, -215, 172, 35, -196, -770, 274, -546, -96, -470, -8976, 9156, 581, 904, -4644, -7801, 3525, -607, 6444, 4058, -696, -1107, -632, 1475, 196, -933, 883, 1101, 278, 433, 544, -497, 4, -1882, 1504, 594, -30386, 218, 211, 850, -989, 319, -867, -42, 754, 498, -70, -562, 660, -11561, 54, 803, 425, 966, -1017, -1224, -12630, 1834, -41, 98, -1083, 3508, 1750, -1751, 72, -503, -38, 22211, 252, 88, 221, 690, 82, -1340, 508, 638, 832, 482, 51, 7954, 2702, -1176, 8830, -311, 2536, -6072, -4147, 5234, 494, -157, -1289, -5678, -1617, 1508, -140, -55, 713, 440, -32583, 105, -394, -613, -972, 578, 1122, -32, 114, -228, 342, -1237, 1123, 1126, -188, -106, 11308, -3787, 563, 3423, -9926, 1623, -2551, -1448, -4125, 918, -1366, -476, -66, 4, 761, 164, -61, 20445, 238, 296, 492, -1126, -98, -1201, 14, -1840, -865, 1178, -869, 105, 907, 248, 1538, 2990, 11691, 7783, 1566, -6704, 2397, 594, -1825, -383, 4264, 1911, 468, 1018, -676, -2676, -7756, -2623, 10705, 2710, -8078, -5256, 1699, -2100, -355, -2086, 10828, 611, 18, -830, 978, -4181, 1324, -5262, -327, 1796, -9777, 1306, -1934, -8930, 9520, -2364, -3997, -10209, -6326, 1394, -1758, 868, 1192, -2916, -23, -1586, -296, 438, -279, -14171, -1554, -206, 2383, 506, 1181, 8298, -491, -2771, -4286, -7116, -1680, 506, 1729, -12965, -925, -985, 420, -1746, -267, -478, -11763, -1030, 187, -3878, 1516, 2472, -371, 29, 809, -1700, -152, 560, 1833, 14397, 968, -96, -3242, -2497, -76, 2096, 9593, -1200, 446, 1505, 8058, 1722, 501, 923, -1171, -9516, -2536, 7368, -2, -5304, -2440, -352, 510, 320, 301, 120, 687, -942, 137, 824, -316, 1312, 510, -1133, -27448, -404, 1041, 272 }, .cb2224l1 = { -14840, -1361, 12733, 798, -496, 1691, -1668, -1730, 928, -3233, 338, -578, 156, 784, -787, -242, -618, -853, -1282, -11766, 3970, 12178, -2034, 244, -3411, 300, 159, 3494, -3060, -1459, -2484, -10680, 752, 227, -1612, -922, -549, 158, 2260, -7640, -4479, -4075, -2412, -7707, 600, -12358, 93, -1666, -795, -13060, 61, 511, -2102, -2122, 364, -157, 2310, -1552, 1260, 158, 9503, 7050, 7, -5902, -7098, 444, 3736, -1836, 3109, -2328, 457, -871, -327, -780, 661, 8684, 2530, -268, 954, 1380, -1029, 418, -136, -3515, 1953, -1688, -8623, -3292, 7758, 2796, 11643, -931, -501, -873, -444, -1342, -13900, -246, -283, -1779, 998, -1318, 408, 1505, -462, 10667, -1813, 78, -16514, 360, -2029, 942, 1674, 171, 317, 244, 1183, 724, 760, 1634, 863, 793, 126, -326, 980, -629, 22219, -649, 1274, 717, 1355, -1853, -1792, -1017, -2104, -768, -1708, 2302, 2353, 11167, 10734, -3412, -2266, 75, -104, 425, -880, 2072, 2934, -930, 270, -2414, -925, 1023, -746, -236, -1620, 825, 1324, -101, -19348, -1291, 585, 2165, 2891, 3662, -577, 1800, 408, -1486, 107, 351, -319, 1104, 956, 403, 628, -277, -57, 938, -32768, -71, -441, -208, -32, 191, 314, -171, 613, 749, 844, -472, -444, 952, 42, -8026, 2720, 1911, -2780, 12311, -122, 3569, -91, 6048, -776, 1694, -63, -1272, 3581, 1622, 2538, 190, -13108, -820, -3056, 1189, -1428, -244, -752, -6187, -3473, -697, 1368, 1043, 7702, 352, -140, -12999, -80, 12672, -1473, 3113, 1505, 667, 2392, 1767, 537, 1949, 657, -130, 980, 1743, 8269, 2380, -2311, 197, -651, 2531, 553, -1117, -396, 472, 4565, -12672, 2322, -360, -12766, 2205, -2651, -10690, -218, 586, 5229, 34, 59, 1730, 1226, 2106, 4008, -1878, -9520, -1366, -1174, -290, -1037, 1642, 1234, 305, -1279, -642, 1126, -13199, -29, 642, 2928, 1936, -260, 588, 11690, 9282, -3362, 7732, 1073, 2738, 4688, -1507, -1461, -2271, -1131, 1969, -2152, 1637, -774, 66, -1190, -206, -491, -1080, 644, -378, 367, 17980, -1583, 2162, 918, -121, -432, 115, 5, 791, 1968, -2287, -1574, -9545, 11146, 3540, -4700, -515, -4548, 881, 591, 1044, -259, -978, 2, 232, 778, -198, -1161, -378, -83, 421, 282, 26564, -801, -1628, -1983, -301, 931, 886, 2196, 1453, 752, 2956, -3478, 490, -1420, 13303, 1293, -9466, 462, -12829, 11130, 8061, 593, 3697, -611, -534, -698, -1148, 1598, 293, -726, -698, 289, 180, 876, -369, -43, 234, -21629, -1448, -753, -480, 956, 994, 531, -916, 630, 720, -2300, -9544, -1418, 993, 2130, -2359, 2460, -339, -277, 1577, 12206, -3507, -1280, 1938, 871, -1850, -809, -3364, 6918, 1134, 5010, 8772, 2103, -9775, -1404, 5148, -1494, 1549, 1761, -812, 654, -611, 822, -229, -384, 10466, -337, 2207, 131, 2818, -2925, -3374, -8786, -8552, -2282, 88, -1058, 8571, 2900, -529, -1569, 1882, -981, 204, 2955, -4227, 4196, -3041, 10804, 1822, 82, 1936, 2380, 12992, -5659, -3449, 1329, -1668, 1291, -1726, 8328, 314, 2737, -677, 2384, -910, -878, 687, 640, -721, -912, -12772, -2079, -398, -1788, -2516, -8711, -1038, -985, -7151, -9057, 890, 459, -298, 918, -10061, 848, -716, 1822, 836, -9516, -985, -1379, -409, -2237, 1036, -1082, -1704, 1333, -1432, 11463, -2355, -5975, -1674, -640, -554, 8352, 2732, -5251, 4243, -354, 3662, -592, -9317, -1205, -1084, -995, 11288, -2098, -1620, 2367, -1286, -5312, -64, 540, -2327, -2703, -2013, -8649, -1306, -948, 1443, 664, 2400, 4706, 4061, 387, -20, 1859, 9283, -18175, 806, -1401, 1253, 596, 2176, -1682, 2209, 733, 1404, -6652, 2754, 950, 2346, 3629, -6875, 5069, -9302, 1472, 942, 1184, -10432, 960, 3987, 1985, 421, 300, -716, 938, 500, -160, 226, -87, -1648, -1857, -1977, -323, 2305, -13843, -4148, -2978, 5430, -3422, -1138, -2146, 1548, -1430, 734, -339, 8598, -4568, -496, 477, 4969, 2593, 2842, 8645, -2365, -7455, -2687, 249, 7516, -53, 219, 1139, -668, 566, -522, 1289, 33, -141, -920, 2526, -2797, 16456, -2000, -758, -194, 10984, 187, 1686, -4799, 9671, 1838, -1224, 1325, 656, -5434, 3207, 1813, 1833, 14375, 12259, -95, -536, -1746, -3568, -442, 964, -1472, 1345, 2692, -589, 520, 616, 357, 326, -1363, 28603, 700, 473, -908, -1129, 1046, 1106, -471, -472, -980, 29, 574, -350, -545, -585, -1936, 279, 882, -880, -52, -30552, 371, -154, -1275, -1914, 104, -110, 1122, -719, 729, -743, 360, 766, 198, -11674, 612, -10602, 1157, 186, -3132, 3070, 1535, 155, 774, -9432, 4966, -6717, 320, 5167, 112, 2727, 11228, 1368, 1864, 1197, -1519, 1504, 17863, 49, 2212, 611, -1788, 2932, 395, 32, -566, 2425, -9457, 673, 670, -247, 1617, -12578, 1408, 462, -14935, 1438, -808, -1850, -784, 1856, -1648, 767, -1452, -1652, -1621, 1016, 1428, -11203, 4217, -6410, 2570, -1016, -1720, -9036, -390, 62, -1245, 3027, -255, 1646, 1358, -907, -864, -118, 874, 268, 252, 104, -926, -552, -1206, 965, -208, -24472, 890, -1516, -630, -885, -804, -374, -22520, -1143, -777, 532, 185, 603, 1775, -1887, 413, -458, -1036, -211, 2693, 6976, -9498, 1437, 10163, 2450, -1574, 4941, 884, -470, -3366, 4664, 420, -568, 5703, 10, -1692, 143, 1592, -10966, 2891, -2961, 3938, 1990, 1726, -5247, 3326, -6575, 584, -277, -441, 1679, -520, 1339, 1077, -11462, -267, -351, 201, 10939, 4150, 3890, 1484, 2615, -676, -448, 2316, -1278, 9734, -3039, 2841, 964, -7557, 156, -7228, -120, 5533, -4322, 1796, 2555, -9912, -3038, 2236, 1190, 222, -1684, 3273, -1768, 6233, -6442, 8545, -49, -45, 2366, 293, 308, -689, 308, 368, -452, 1125, 2326, -2335, -17793, 2027, -779, 734, -2032, 1246, -2898, 4174, -74, -40, -3105, -2135, 996, -12714, 3614, 4936, -1928, 1528, -4158, -1791, -2318, 907, -326, 22513, -660, 1022, 434, -564, 28, -112, 252, 372, -842, -2, 648, 2323, -614, 23377, -263, 486, -408, -362, -821, -724, 972, 1248, 444, -1741, -420, -1371, 1088, -565, 22, -394, -64, -292, -103, -501, -30510, -294, -266, 433, -700, 742, -756, -407, -961, -148, -1416, -1041, -481, 121, 346, 10240, 12629, 1476, -2647, 1350, -2012, -262, -5621, 714, 4398, -2732, -10473, 9834, -5165, -991, -557, -2733, -3460, 5779, 659, 1472, 2029, -2339 }, .cb2224s0 = { -27522, 2628, -2486, 277, 874, -2351, 2725, 915, 994, -1209, -439, 2936, 46, 1014, -1816, -3561, -14386, 3113, -10400, -1025, 2114, 1328, -278, 1182, -1820, 3928, -1062, -282, -1327, -1468, 5975, 2342, -630, -4217, 10116, -1254, -2646, -5210, -9942, 1904, 21, 504, 2325, 1443, 6470, 2598, 8130, 810, 304, -1059, -645, 14634, -3198, 4277, -669, -7170, 1554, -2321, 2386, -1072, 2483, -4141, 2841, 3414, 8014, -3141, 10857, 6634, 3138, 3199, -320, 36, -1366, -4129, 3157, 2602, 4273, -2435, 2645, 2986, -3712, -3995, -5476, -4693, -1664, 6384, -11201, 1320, 2184, -5102, -2984, -1569, -2116, -1513, 14284, -11182, -2925, -731, -1321, -6363, 1483, 3463, 1292, -2065, -357, 9108, 6371, 3840, -6905, -8918, 2906, -1658, 757, 1998, -580, -708, 2198, 1867, 960, 4522, 1896, -1674, -4943, 2695, -2465, -2078, 9755, -4853, -2602, 3466, 3897, -3633, 4918, -2049, 3730, -1982, -10085, -3458, -1866, 32, -1706, 3648, -308, -942, -1630, 1730, 512, 14612, 3415, 974, 3079, 765, 897, -270, -1813, -1533, 1118, -2805, -2764, 1130, -1798, 4594, -3134, 964, -20082, 2574, 32450, -1379, 52, 358, -226, 1902, 257, -1071, -650, -399, -381, 2073, 2310, 2164, 8221, 1433, -629, 1440, 1120, -3362, -4642, 2000, 378, 1208, -2648, 4534, 3307, 13200, 2780, 3100, -3194, -10606, -11563, -4491, 2218, -4500, 622, 1313, 2682, 3003, -1387, -3886, -1567, -4864, 10899, -20606, -1606, -60, 602, 125, -730, -1112, 979, 325, -13, -185, 1241, -288, -552, 6042, -7049, -7359, -1456, 493, 11204, -65, -2170, -5248, 2248, -1046, 591, 2085, -2844, 244, -3454, 581, 1315, 3043, 304, -620, 405, -19944, 769, 1076, -1456, -694, 2560, -1046, 2514, 14552, 1586, -7027, -4710, 1366, 1552, 4354, 3296, 462, 600, 500, 3225, 5083, -792, 3199, -698, -3589, -2596, -3350, 2758, -3019, 5664, -9387, 4716, -3125, 3306, 6268, -592, -622, -4144, -6290, 4990, -748, 1854, -1042, -2996, -4279, 338, -1864, -8639, -11208, 932, -722, 1788, -1927, 450, 2191, 11828, -6400, 5364, -2236, 3212, 8340, -3229, -2846, -4676, -1825, 2628, -303, -589, 7728, -4216, -3866, -4400, -194, -11316, 5646, 3716, 4827, 232, -583, 308, -1833, 2153, -2508, -46, 857, -9587, 2768, 5136, 1462, 5142, 7990, -3424, 1067, 7462, 4944, 98, 1014, -4750, 13824, 1130, 2334, 9393, 2416, -4519, 27, 2000, 929, -204, 481, -2780, -3720, 1267, 269, -5383, -1999, 1249, -4238, -9351, -7440, -5964, 6154, -6827, 3112, -2613, -164, 1604, 1245, -50, 8619, -4044, 4652, 2846, 8359, 5345, -2902, 2295, 4801, -5016, -6270, 2893, 2732, -3510, -2613, 4548, -6376, 4510, 10566, 1859, 1038, -8381, 2782, -1622, 159, -1035, -3232, -3766, 1580, -720, -4476, -3863, -920, -2135, -458, 352, -2645, 3029, 301, -1145, -478, 3696, -11700, 9930, 6649, 7290, 2362, 17226, 3238, 1786, 662, 971, -736, -647, 1745, -506, -777, 1458, 2406, -1417, -7933, -846, -2654, 1104, 618, -2783, -10168, -3322, 9498, -939, -2342, -1876, -1914, 84, 3468, -6533, 7796, -3797, -1318, -2183, 1310, -895, 4943, 1062, -4468, 142, -244, 884, 613, -13963, -5853, -947, 18703, -964, 1090, 1070, 1388, -1572, -1110, 671, 1706, 620, -262, -2421, -2277, -5665, -5212, 4994, 2379, -593, 2048, 14489, 1165, -1775, -2093, 2466, 419, 404, 5429, 3089, -1350, 1975, 2281, 60, 599, -1600, 2286, 2358, 6698, -16423, 3760, 666, -1309, -1346, 2786, 2364, 1448, 1114, 17956, -5301, 2430, 1178, -164, 2195, 3927, -122, -737, 1468, 307, -1863, 1592, -7714, -2428, 958, 220, 59, 4124, -1945, 11151, 8604, -2077, -4787, -4578, 1096, 2685, 6478, 8314, -6221, -3842, 2173, -43, 104, -2510, 3109, -2324, -4238, -4709, -3233, 3228, 11454, 2428, 578, 780, -1096, 72, -22624, -1421, -4104, 226, 464, -1726, -1971, 2068, 1142, 1412, 1412, 798, -2605, -3451, -1104, -2224, -2250, -3470, -572, -1420, -1292, -58, -217, -21417, -172, -6368, 30, -2170, 95, 378, -2926, -2180, 2820, -683, 2018, -4313, -13469, 5396, 1808, -592, 4732, -6602, -5602, -983, -4130, -477, -1236, -2263, 3992, -12962, -1778, -2631, -2421, -746, 1964, 1754, -760, 2753, -116, -3860, 10246, -448, -1318, -100, -10372, 1420, -210, 2768, 48, -2373, 7721, -3217, -328, 1543, -2527, 3709, 4024, -916, -4588, -726, -4302, -982, -14714, 3615, -1190, 9051, 199, 2252, 1348, -4204, 693, 1241, -14160, -2460, -2017, 2997, 766, -360, -450, -2919, -7976, 3210, -179, 8935, 670, 1155, 6888, -2249, 2729, 1810, 6283, 684, -9717, -1763, -921, -4578, 3941, -6408, 1431, -2742, -91, -2094, -2118, -9752, 2801, -2497, 147, -5901, -5270, 13170, 2810, 1576, -3191, 10253, 4226, -1340, 2456, 1079, 12541, -5124, -8356, -1000, -558, 180, -2070, -1880, -5718, -687, 10549, 1066, 220, -4147, -695, 3648, -3460, -3143, -1623, 2150, -11222, -2566, -6395, 3552, -4176, -698, 1248, 112, -4628, -960, -724, 1191, 2084, 15207, -346, 371, 190, 5345, -4283, -7482, 1354, -4424, -3775, -4143, 1444, -14876, -589, 2498, 1305, -486, 1628, -867, 1584, 1094, -10, -1260, -1046, 2528, 27472, 910, -1069, 829, -117, -1097, 770, 252, -1412, 2353, 2200, -11, 624, 8459, 6320, -9465, 1225, 2532, 5415, 9252, -1441, -1378, 1081, -1997, -3904, -14740, -5220, 3627, 5725, 6180, -5336, 72, 4638, 915, -496, 628, 1880, -420, 2800, -7143, -7578, 3180, -4210, -1111, 2979, -442, -182, 2778, 2398, -13878, 2209, -282, -888, 180, 3584, -1005, 2, 999, -3074, 1205, -4605, 5250, 17255, 2839, 2718, -678, -2651, 160, 1596, 4685, 2324, 3100, 3744, -1954, -11674, 621, -678, -6242, -3449, -1890, 3134, -289, -7162, 2268, -8437, -624, 4999, -5946, 13013, 244, -200, -1494, -1108, 3768, 445, 2429, -1264, 786, -2993, 3482, 2448, -968, -1184, 213, -772, 4931, 42, -3850, 2020, -17970, 84, 3016, -602, 1805, 731, 3522, -2606, -637, 25535, 680, 1083, 4138, 1602, 190, -1854, -962, -379, -2499, 2453, -362, -4552, 4689, 2168, -5930, -10552, -5585, -4694, 2447, 2047, 5420, 3908, -1449, -90, -68, 496, -12713, -2127, 1406, -10766, 2438, 2278, 2962, -6411, -22, -1966, 2814, -1746, -383, -2381, -5981, 10920, -12354, -656, 2260, 5200, -1908, -2275, 4276, 1174, -932, -532, 2832, 601, 1551, -8434, -4170, -6411, 9099, -6886, 2243, 561, 2026, -3598, -1125, 646, -5188, 6017, -632, 772, -2919, -3776, -9938, 2461, -122, 128, -1416, -1533, 343, 1318, -13738, -1528, -6418, -1196, 832 }, .cb2224s1 = { 32767, -749, -1885, -806, 739, -1858, 3902, 1029, 332, -2122, 1240, 2705, 1362, 190, 1058, -1404, 1224, 1122, 1208, 190, 1984, -1355, 1694, -21000, -1012, 2418, -1269, -1154, 1113, 2291, -2317, 315, 12872, -2296, -1510, 1104, 11324, -1146, -1018, 1326, -902, 168, 647, -1828, -3838, -5682, 2732, -238, -134, 13450, 1570, 2424, 996, -3494, -3720, 4897, 5875, 149, -6367, 6659, -2329, 6916, 1134, 425, -19014, -479, -1900, 3470, -1777, -811, 1723, -46, -2103, -1298, 2929, -4279, -639, -2443, 7231, -1187, -2145, -777, -3287, 4895, 8878, -9318, 289, 4015, -3148, -598, 2226, 11700, 114, 3237, 9586, -4570, 2592, 3614, -2272, -2829, -3356, -1095, -5290, 4709, -1867, -1930, -20722, 937, 892, 1415, 1544, 2950, 5090, 937, -1411, 123, -31, -1568, 338, -938, 5465, 5796, 480, -2782, 3351, -2489, -383, 1529, -5686, 2446, -693, -12796, -599, 1894, -1576, -2244, -4686, 10165, -1085, 10050, 2681, 1138, 2544, -1809, -806, 5278, -8730, -3740, -2343, 971, -3254, -165, -212, -4164, 850, 233, -13694, 442, 1073, 3854, -12926, -2001, 3468, -765, 829, 2174, 1531, -6036, -10848, -11009, 803, 1713, 2884, 1992, 75, -2989, 268, 346, 1998, 4798, 8976, -4632, 1863, -4127, -612, 4790, 10946, -1296, 8009, -1351, 356, -1711, 313, 2301, 1318, 8050, 700, 1218, 2270, -2156, 67, 1537, 1941, 3442, 13321, 691, 2344, 2594, 1551, 3853, 7279, -10441, 1006, -11862, 5532, -611, -582, 2257, -2873, 3993, -5133, -2264, -2478, 1576, 1834, -4931, 10264, -1429, -10404, 393, -3715, -1470, -2003, 384, 4869, -6780, -1297, 1572, 1043, 6980, -4382, -3005, 3698, 4176, -1348, -4972, 1574, 9815, -5995, -979, 3609, 3702, -8503, 668, 3354, 2552, 9183, -1175, 1224, -2859, 11176, 6088, -1355, 84, 1271, -380, 5336, 299, -690, -365, -8047, -3679, -3204, 1334, -13451, -1392, 2200, -3646, -1046, -4292, 741, -1701, 1722, 2061, -1358, 7266, -6356, 963, 2190, -1349, -1882, -14128, -4662, 3552, 565, -1109, 5413, 1239, -2618, 794, -2064, 11805, 9004, -2134, 2804, 946, 80, -2387, -1205, 11, 1642, -1825, -2324, -5018, 4208, 5285, 661, 12430, 1907, 784, 10864, 340, 18, -138, 2885, -2247, 17, 334, -3172, 2977, 970, 536, -1540, -516, -488, -512, -1334, -1930, -2418, 1078, 24837, 12, 2060, -252, -2536, -2206, -3179, -6785, -8842, 8736, 1393, 119, 1652, 10126, 856, 855, -742, -289, -2208, 3831, 6909, -6556, 2472, -245, -1729, 1460, -3014, 59, -58, 132, 3903, -3762, -1419, 13273, 2708, -7752, 84, 3525, -1305, -334, -13421, 5931, -4845, -2697, 666, 558, -1102, 632, -2946, 4153, -4018, 4516, 4875, 4460, -1567, 2233, 386, -754, 1256, 2145, -1692, -13046, 1581, -518, 4397, 1215, -723, 3413, -640, -5088, 1711, -714, 2536, 2433, -691, 10758, -8764, 5541, -2071, -1662, 12955, 12998, 1252, -94, 802, 2573, -2557, -66, -832, 106, -728, 1050, -811, -2684, 629, -16524, 1531, -1617, 1348, 204, 1722, 368, 554, -1752, 114, 1349, 1952, -1007, 2626, 2035, 8148, -2539, -4296, -4460, -8542, -3089, -1543, -857, -2617, -1765, 6642, 2167, -1531, -6881, 86, -414, -5896, -5152, 17445, 1129, -5006, 2936, -3432, -2226, 1176, 972, 1170, 530, 3390, 260, -2909, -3550, -5255, 1771, -382, -1690, 17070, 2688, 566, 2430, -1768, 3373, 1460, -3464, -629, 3119, 430, -3554, 8357, 7075, 293, 2955, -61, -6919, -4939, 3678, -6852, 652, 2206, 5918, -2768, -3022, 5721, -770, -1102, -1057, -2760, 3086, 5611, -160, 2714, -1042, 2569, -14248, 3846, 8212, 5392, 144, -11896, 618, 1212, 3283, -3777, -715, -3870, 2528, -2900, 1645, -1786, -1852, 2776, -1348, -586, 234, -4, -1666, 46, 2095, -1987, -18728, -2980, 2501, 4042, 79, -1849, -2013, 8047, -1898, -108, 340, -4760, 2134, 9000, 347, 10365, 4779, 6660, 1694, -3253, -2282, -1488, 10406, -8054, -3414, -2934, -1611, 3172, -2195, 4973, 1249, 2888, -4054, -5738, -2995, -2282, 1977, -353, -516, 5322, 3225, -4907, 1303, -4656, 9947, -236, 9382, 2332, 2076, 1470, 3173, 4712, 2645, 559, 4904, 1511, -1715, -4856, 5750, -1276, -306, -5980, 14393, 1443, 85, 156, 7718, 793, 4199, 2122, 1098, 128, -1996, -1397, -20, -534, -13296, -1518, -2970, -1001, -6474, -6146, 8337, 5476, 3058, -526, -1295, 1623, -8791, 1257, 2006, -5725, 3035, -2917, 1280, -8479, 5934, 9870, -13131, 14, 1088, -9, 1969, 366, -3214, 192, 2764, 1499, 346, -2031, -2900, -2529, 1072, 11717, 5206, -44, -2514, -8900, 2892, 2132, 3635, 3735, 2726, 1398, 6035, -2830, -4568, 424, -8696, 1368, -3860, 1823, -2620, 4546, -2210, 1660, -1672, -10524, -484, 950, 11, -4494, -6220, -5653, -13332, 2868, 460, -4120, -4030, -3277, 522, -3403, 1126, -170, -1892, -4366, 1304, 3477, -1507, 1111, -594, 1670, -8416, -1690, 2492, -7109, 2531, 4131, -8123, -4884, 16505, -240, -63, 32099, 974, -1360, -2395, -2005, -1156, -877, -416, -922, 1857, 766, 71, 1380, -259, -272, -1924, 2498, -3290, -16045, -2064, 2966, 2936, -1265, 2121, 488, 3781, 1484, -1193, 4776, -1001, -669, 1569, -379, -604, -5, -1943, 757, 359, -560, 118, 17941, 2323, 215, 7621, -3582, -8130, -698, 9893, -2752, -417, -1262, -1504, 3319, 1186, -2192, 3014, 781, -3602, -6190, -7725, 3169, 2038, 1175, 612, 2477, -4136, -12152, 4538, 567, -116, -3222, -470, -118, -9257, -635, 3078, -11596, 93, -4178, 4150, 5985, 4414, -2110, 542, -1125, -1242, -234, 807, -1385, -2448, 824, 109, -1826, 3032, 269, 14188, 3468, 908, -12, 2290, 5758, 1685, 680, 5963, -2763, -173, -34, 3135, 1230, 2226, 2471, -9546, 2266, -1583, 729, 3506, -10664, -652, 2212, -620, 2762, -751, -6337, -4339, 4131, -1234, 5423, -2279, -2884, -929, -12582, 416, 2046, -3854, 11130, -2738, -670, -202, 6216, -7266, 9726, 1308, -1761, 4696, -1061, -144, 482, -1586, 4377, -5016, -3894, 2296, 4340, -555, -3003, -2117, -962, 100, 4548, -1870, -13885, 1351, -3226, -8114, 377, -391, -1344, -2148, 4756, -3518, -14429, -670, -238, 400, 1234, 4389, 1181, 1046, 425, -32, 840, -29846, 1580, -992, 1844, 1961, -1305, 1055, 418, 52, -641, 2430, -1773, -5323, 3341, -5367, 14027, 3051, 3864, 404, 4186, -1875, -5822, -4321, 112, 395, -177, 1080, -3008, 520, 8, 226, 1430, -1635, 8, -2632, -3249, -3595, 622, 564, 8404, 14463, 160, -7828, -4113, -16547, 848, 6320, 2311, 4074, -2050, 668, 1463, -2322, 1790, 864, 317, -594 }, .cb2224m0 = { -17338, 5737, -912, 5906, -5315, 920, 2743, -2232, 1943, -753, 1696, -1818, -2272, -564, -1306, -527, -156, 9952, 36, 2524, 2053, 1841, -1670, 10622, 2532, -5616, -324, -1132, -1148, 1920, 10232, -75, -630, -10796, 1618, 1104, -2557, -603, 2115, 966, -3763, -3183, -851, 4502, -1565, 10062, 313, -709, 10707, 867, 3820, -2747, 3470, -1942, -486, 4092, -6289, -2363, 556, 3190, 5046, -1869, 2886, 10572, -948, -4191, 1544, -1727, 721, -3153, -712, 934, 1610, 1070, 1248, 10645, 2340, -11102, -2744, -353, -65, -4973, -1782, -1037, 1210, 1192, 1138, 1106, 9422, 652, -9595, -1663, 460, 9107, -2827, 775, 1131, 4732, 93, 476, 387, 32767, -161, 266, -406, 604, 675, 83, -589, -639, 220, -830, 2200, -142, -2000, -128, 902, 823, 287, 717, 1857, -1626, 208, 2784, -72, -19310, 6190, -2063, -9101, 3419, 1721, -2092, 332, -6533, -7594, 1138, 807, -2582, -668, 410, -497, 1526, 96, 944, 3319, 1294, -335, 1964, -380, -618, 3069, 101, 18964, -2298, -10304, -1190, -998, -1384, -11466, -256, -4475, 4027, -3532, 1828, -1311, -3417, -3925, -221, 27688, 2277, -1227, 1043, -399, -3327, 515, 1665, -616, 2724, -546, 4608, -576, -103, -9064, -1281, -563, -3588, 2174, -824, 3379, -2360, 354, 844, -7044, -2295, -2613, -11152, 1006, -1064, -17007, 1180, 387, -8448, 836, -578, 2621, -356, -1476, 2362, 822, 4547, 118, -11628, 352, 367, -958, -12423, -65, -1591, -2304, -2880, 1684, 1708, -1693, -781, -71, 10012, -534, -3672, 417, -2048, -1955, 10491, -1257, 861, -414, -4058, 3042, 1529, -5823, 6877, -3918, 993, 221, 2576, -7780, 170, -648, -139, -3410, 7974, -756, 2657, -596, 12527, -199, 13752, 2198, -938, -2265, 1736, 257, 1517, -676, -1165, -2874, -2433, 123, -829, 2605, -10270, -3158, 3624, 2072, 6960, 1490, 4634, 455, -8175, 1139, -4545, -1491, 3727, -8738, -1951, 593, 14, 2897, 2490, -2273, -1436, -10992, 3005, -4392, -3434, -4561, -1014, -9506, -1609, -1248, -1593, -190, -10472, 3264, -2274, 5097, -633, 473, 427, 725, 1577, 11032, 318, -12228, 78, -1116, 441, 1930, 4041, -648, -4324, -224, 2738, 8826, -40, 327, 1761, 2371, 171, 4039, -3411, -2495, 1150, -12181, -1704, 35, 528, 417, 626, 1866, -472, 466, 905, -854, -875, 1194, 24371, 488, 26, 695, 1777, 798, -169, -16, -1252, 395, 871, 1170, -635, -1637, 2094, -5427, -16393, -384, 3872, 33, -687, -1777, -4160, 3020, -1906, 3868, 699, -400, 6755, -3253, 12699, 1474, 7312, 991, -646, 26770, 2524, 2144, -500, 1096, -1869, 1036, -1707, 521, -2091, 1445, 2335, 107, 238, -227, -120, -32768, 591, -257, 867, -1231, 650, -465, 356, 431, 762, -516, -594, 512, 242, 2298, 1012, -1538, -11748, 3551, -5608, -2174, -2428, 10557, 625, 1002, 27865, -589, -1527, -1552, 156, 1905, 1041, -4190, 2300, 1603, -980, -1764, 484, 1555, -2664, 381, 11676, -8848, -3060, 675, -646, 736, -1279, -1261, -1988, 543, -1880, 1917, -2165, 2846, 11863, 2076, 10381, -307, 4354, 73, -2788, -2464, 964, -218, 1552, 1846, 1470, 577, -594, 725, 30798, 43, 13, -1474, 260, 1218, 1433, -114, 1020, -648, -678, -1879, -65, 791, 366, 8547, 931, 1091, 1018, 16312, -1116, -777, -1098, 404, 180, -899, -2865, -10089, -751, 40, -2358, -2980, 3574, 7905, -190, 9207, -18, -18766, -270, -5300, -2023, 2422, -1189, 1267, -1085, -704, 6823, 2164, 2, 125, -2319, 411, 591, -488, -566, -3394, 304, -12375, -268, 11098, -150, -2392, -1255, 3172, 162, 1295, 5897, 7944, 6019, 3329, -2014, 2957, -4933, 4805, 2780, -5453, 2680, 3220, 2784, -549, -19908, -1222, 550, -3540, 1822, 4082, 2399, -6844, 2145, 938, -597, 122, -20, -14986, -1620, 1575, 561, 408, -6305, 760, 1634, 2652, -8301, -2988, 1864, 2524, 3228, 7466, -2620, 410, 1364, 1740, 2204, 1999, 1704, -2601, -351, -104, 10688, -7166, 134, -346, 11852, -13322, -3171, -1230, 1109, -2336, -962, -563, 1030, 2832, -969, -1997, 3233, -414, -8246, -2074, 2737, 3557, 1625, 1036, 845, 1848, 1710, -10388, -4586, 6915, 2734, -8693, -667, 1568, 1758, 2396, -3262, -2497, -1472, -11848, -689, 3379, 1692, 1449, 2844, 8524, -15598, 337, 590, 3303, -1594, -2548, 4529, 433, -1921, 920, 1061, -1693, 191, 44, 957, -2397, -1126, 41, 2164, -1587, 568, -17290, 4687, -1028, -403, 1169, -1282, -1602, 242, -1234, 1870, 1067, 2444, 1752, -2552, 8775, 1384, 5683, -4770, -12436, -680, -13344, -196, -276, -299, 734, 12378, 2364, 327, -1494, 560, -90, 3394, 496, 2357, 629, -17, 1040, -706, 589, 294, -1135, 25012, 444, 1206, -298, 1424, 1524, -2188, -64, -1101, -1998, 374, 1377, -1382, -11349, 1456, -171, -2369, 6966, -2808, -8987, 3390, -811, 671, 3032, -3396, -9815, 2246, 4418, -678, 1851, -1592, -11038, -1194, -3612, 2589, -250, -495, 1203, 1348, -805, 1853, -345, -555, -8755, -9695, -3768, -1506, -8172, -322, -7163, -6319, 2052, 116, -4459, -2328, 4857, -2569, 1419, 959, 1138, 7034, 4836, 3449, 6826, 13411, -893, 981, -2060, -3710, 3177, -761, -1128, 4386, -127, 6698, 3426, -2922, -61, 408, 1426, -1238, 15468, 94, 373, 3597, -2432, -1989, -859, -8976, 2938, -777, 409, -206, -7758, 3384, 295, -466, 29, 7925, 2048, 930, 2296, -10030, 330, 7864, -1004, -385, 2130, 388, 3587, -4480, 1560, -12768, -2606, 8178, 771, -3519, -1590, -592, 2192, -1126, -77, -3947, 1868, -1304, 11107, 781, 6240, 4134, -3314, 407, -6125, 5168, -503, 2155, -990, 143, 219, -9950, -1186, -1446, 1930, -8963, -4084, -6141, -976, 153, -13665, 564, 13631, 138, -269, 379, 1333, -1710, -940, -511, 1214, -2190, 1347, -1397, -1321, 94, -1802, 6627, 1306, -12347, 2780, -1091, -4362, 5047, -446, -3472, 6064, 1075, 478, 769, 58, 802, 562, -1581, 28580, 194, 1338, 573, -555, 617, -409, -1249, -8, 1133, 952, -120, 2502, 5313, 969, -1664, 1769, -12199, 5551, -402, 4862, 3270 }, .cb2224m1 = { 32767, -54, 1385, -206, 19, 522, -1176, -667, -260, -1388, -1751, -2234, 228, -343, -893, -898, -1004, 2517, -232, 20996, 507, -1857, 2574, 840, -615, -1922, 660, 844, 52, 1272, 609, -692, 21805, 938, 678, -399, -22, -1839, -996, 1560, 218, 3973, -6547, -1151, -3914, -789, 938, -11509, -2282, -606, -327, 3088, 797, -1540, -7598, 1378, -100, 2108, -1907, -11671, 1538, 11136, 310, -2096, -3037, 3181, 1731, 2043, 3424, -1098, 2046, 545, -1778, 605, 932, 832, -2356, -1498, 1129, 11542, 119, -10994, -3720, 4316, 346, -9141, 3921, -918, -5476, 372, -318, 9254, -681, 4896, 1587, 1620, 1850, 4057, -1507, -362, -1074, -328, -1502, -3092, 2735, -378, -11572, -1292, -2575, -3397, -7566, -8977, 1670, 8659, -655, 884, 1815, -9348, 570, 394, 1670, 1942, -195, 386, 553, 8885, -9206, -624, -2312, 15852, 782, 562, -1497, 720, 1804, 1415, -3809, 3783, -1918, -3496, -637, 581, 1161, 961, -960, -930, -1673, 904, 11510, -2286, -9964, 2964, -5752, 2229, 786, -1479, -18882, 1517, 128, 3282, 157, -2178, -564, -6029, 766, -4599, 3620, -4380, -20114, -677, 2134, -93, 1486, 648, -4790, 1862, -1476, -56, -3443, -2622, -2806, -1185, 122, 1801, -1547, 12241, -2785, 2386, 56, -4075, -10964, -832, -4744, -1350, 2849, -255, -1375, 163, 1306, 37, 2304, -1396, -11234, 9712, 1732, -2262, 3632, -431, -579, -4045, 806, -12168, -1309, 840, -1474, 918, -1240, -1601, 48, -4137, 6934, 3968, 7370, 4088, 8648, 2351, 1466, 615, -12314, -2347, 4382, 862, -4288, -3138, 1886, -4357, 375, 1949, 73, 287, 135, -60, -1498, -2427, 1263, 3322, -582, 17508, -1202, 1558, 3351, 484, -439, -571, -370, 11952, 11656, -1407, -1410, -2976, -459, 397, 1980, -1374, 1237, 5044, -2074, 405, -10650, -174, -12556, -1962, 4569, -1293, -200, 3106, 343, 748, 1918, 1084, -670, 3, -1070, -397, 3965, 9966, -609, 9691, -900, 137, 2305, -5944, -944, -1500, 638, -703, -582, 10098, -523, 776, 1266, 4860, 6213, 1181, -5634, 518, 9116, -4740, 10683, -547, -1295, -91, 104, -3115, -1724, -17, 1953, -745, 694, -474, 12248, -596, -674, 765, 674, 4494, 1205, 5883, -1638, -3996, -664, 8694, -5620, 3968, -717, -10425, -285, -12605, 368, -3904, 12363, -1288, 1242, -1497, -3117, 2396, -220, 1700, -2788, 250, 107, -150, 345, 681, -44, -2466, -389, 2098, 312, 54, 2734, -22225, -1232, -1778, 1063, -1586, -6658, 344, -2889, -4348, -3685, -2100, 12, -1755, -6401, -149, 8150, -10689, -748, 1443, -32768, 1698, 1461, 216, 1373, -2814, 1014, 1135, -227, -1309, -616, 1566, 395, -724, 852, 1579, -9647, -1214, 728, 329, 9244, 179, 7204, -836, -3954, 168, -5722, 152, -2886, 472, -651, 5114, 8734, -71, 11406, 1098, -1452, 1190, 598, -880, 14611, 12540, -1523, 1340, 1015, 1510, -208, 206, 1314, -1532, -246, -3210, -1637, -197, 197, -32768, 1448, -191, -1720, -217, 1021, 973, -2099, 56, 606, 39, -1569, -1205, -2375, -2156, 4798, 2504, -11914, 933, -6015, 2657, -2911, -5173, -1964, 1576, 5268, 1190, 675, 856, -1718, -4332, 166, 1556, 19005, 2040, 1198, -2170, 1824, -3409, 121, 830, -252, -525, 289, -1701, 292, 854, -1150, -1108, 171, 511, 22114, 662, -1263, -540, -2306, -2332, 869, -5191, 186, 536, 410, -7576, 590, 13625, 3519, 3858, -2787, -376, -6506, 891, 5025, -2054, 8316, -2115, 7668, -5808, -2464, -2422, 1541, -3851, 1578, 420, -617, -6507, -858, 160, 3876, -2830, -5970, -3295, 9829, 1099, 1617, 3502, -3124, -4116, 138, 287, 914, -548, 1056, -1546, 1218, -227, 11632, -574, -996, -9894, 808, -5868, -1457, 8374, -2086, -280, 1038, 528, 1862, 284, 3926, -144, 7168, 1224, 11628, -221, 1018, 1683, 922, 561, 6910, 1895, 3044, 12613, -74, -1424, 1654, 8872, 2255, -990, -2039, 269, 9558, 10122, 958, 466, -1948, -1242, 1042, 886, -1143, -3444, -8720, 1918, -300, 19074, -1629, 991, 908, -896, 1207, 3602, -4802, -2912, 4100, 2936, -1344, 459, -6904, -714, 524, 171, -1430, 1454, -2725, 1130, -757, 2861, -11174, -2768, 5466, 3662, 110, -1999, 12376, -2173, -2508, -2838, -2025, -4378, 134, -9856, 1738, 1027, 1428, 38, -1560, 12824, 13932, 549, 586, 720, 923, -1040, -2827, -3272, 1902, -2113, 2624, 3296, -34, 12291, 1449, -12138, -796, 186, 2777, -1007, 3276, -587, -1917, -130, 2120, -564, -364, 1005, -615, 1504, -2412, 9219, -11412, -2490, 1262, -2720, 1608, -3276, 1294, 1882, -188, 7090, 6029, -4207, -2739, 72, -10035, -1672, 1509, -124, -1649, 420, -3623, -1069, -11225, -754, -388, 790, -3209, -330, -2632, -11920, 3178, -1788, 2585, 4146, 1944, -2757, -10616, 220, -14136, 2158, -274, 2010, -362, 1107, -348, -1990, 96, -985, 1599, 1566, 1393, 304, -1380, -924, -285, 620, -30, -902, 26210, 1485, 1042, -1160, 352, -177, 1245, 1879, -18, 727, -421, 223, -1298, 1066, 962, 1306, 3866, 870, -18780, -3873, 107, -1408, -1261, 808, -818, 1738, 1439, -2156, -1499, -2108, -4626, 4039, -964, 16682, -1169, 266, 9373, 1238, -2728, 2381, 12159, 2155, -472, -2293, -513, 3808, -690, -2190, -1139, -6, 1379, -22803, 1380, -612, 308, 1394, -902, -1454, -2620, -1080, -2864, -3301, 108, 218, 8718, -617, -1098, 1436, -2005, -3966, -2658, 6152, -874, -4636, 8705, -3382, -12072, 418, -1837, -12582, 270, -788, -1174, 2156, 461, -297, 478, -632, -356, 5796, -12024, 416, 2602, 3544, -1240, -970, 4874, 7221, 704, 8940, 2316, 1174, 2537, 5380, -5, -1818, 3020, -4120, 7042, -9618, -1622, 3576, 2455, -298, 451, -5298, 7371, -1570, -12956, 9758, -216, 889, 5395, -2779, -4036, 1736, -1871, -2036, -1119, 1847, 912, 2292, 850, 220, 1300, 2228, 399, -2885, -2696, 2399, 3179, 6266, 1629, 13091, -232, -5322, 1397, -724, 1666, -2012, 3643, 1400, -2724, -18007, -506, -103, 1318, 2473, 965, -587, 1135, -904, -510, -10767, 1937, -585, -73, -1662, 3021, 340, -12475, -1618, -1, -1914 }, .cb2232l0 = { -9947, -673, 522, -36, 396, -433, 949, -442, -12495, -2186, 4280, -997, -1715, -7385, -379, 3498, -572, -9897, 6686, -4736, 577, 1866, 659, -123, -1682, 420, -866, 4458, 5821, 3155, 7929, -5562, -1798, 3086, 8556, -65, -8943, 2354, -4187, -3798, 627, -1859, -9760, -1811, -1724, -45, -1838, 1638, 499, 148, -335, 20916, -264, -556, -269, -1014, -1531, 711, -519, 462, -5117, 3944, -950, 8277, 878, -4803, -5003, -4402, -4722, 2988, -144, -6887, 10661, -909, 700, -2287, 12126, -101, -761, 1836, 827, -609, 538, 442, -4504, 1812, 3818, 7359, 96, -555, 1598, 10040, -554, 924, 3426, -1786, 2620, -2132, 867, -519, -2299, -672, -508, 201, -2457, 10872, -5003, 5422, -8890, -104, 2579, 940, 401, 871, 11167, 1216, 1054, -2876, -1523, -3950, -1229, -3410, -428, 3648, -9389, -3025, -1752, 7583, 3953, 1938, 3899, 1435, 8170, 1019, -2320, 1299, -1152, 226, 394, 11328, -1471, 604, -184, 567, -3704, -5723, -5938, 423, 9362, 4546, -3318, -3395, 5084, -4341, -1781, -2619, 1078, -365, 151, -413, -21591, -968, -202, -183, 849, -481, 407, -11, -2708, 2472, 2689, -9232, -9482, -1776, 645, -1510, -1410, -6115, -114, -2550, 1922, 1668, 288, -1302, 948, 1967, 52, 2393, -1975, 374, 17358, -1332, 5303, 3195, -2674, 4784, -1418, -1359, -57, -2126, 4618, 8890, 6455, 1181, 76, 374, 9585, 8762, 672, -642, 666, -6485, 1751, 3255, -934, 6196, 892, 171, -102, -44, -326, 1330, -320, -480, -842, -22376, -561, -141, 635, -6528, 5711, 2400, 838, 2846, -3212, -5341, -5479, 4961, 2110, -7480, -4215, 7964, -1308, 1219, 1541, -4418, 6293, -4522, -4887, -5760, 2790, 1441, 6135, -1133, -1627, 1235, 914, 572, -1043, -1473, -519, -4618, -1228, -12212, 1101, -10794, -4292, -4355, 6431, -588, -992, 612, -1771, 6751, 4871, 581, 620, -352, 277, 727, 2226, 8552, -43, 2295, 9409, 1122, 7618, 1885, 1192, -1432, -1103, 8666, -2078, -403, -1787, 1572, -2200, -7705, -6743, -1277, -1228, 955, 7613, -1536, 8530, 5703, 5446, 4251, -853, 4910, 1578, 2832, 1274, -2610, 243, 2820, 951, 9240, 1617, 605, -6755, -2728, -5658, 3866, -157, 1215, -8470, -2038, -189, 10411, 7444, -376, 407, -1128, 770, -410, 503, 1707, 786, -529, 82, -27, 21512, -282, 81, -1129, -686, -555, 2674, -99, 1284, 2216, 1238, 404, -3398, 1010, 3966, -1134, -2682, 14222, -1581, 9779, -1114, 848, 1905, 2129, -3937, -4742, 1229, 8051, -4344, 3914, 4273, -659, 159, -1188, -1844, 912, -1256, -478, -16158, -2869, 959, -2096, -2166, 2360, 7861, -2718, -6358, 7653, 6639, -3239, -1690, -1242, 3439, 1254, -954, 604, 17512, -288, 2412, 211, -298, 2656, -5217, -1770, 892, 1979, -1482, 3498, -40, 10424, -1038, -1862, 9905, 298, 77, 2179, -4444, 2580, -2069, -6473, 61, 84, 1035, -645, -662, -824, -743, -104, -1962, -124, 4976, -5378, -1254, -7055, -3474, -10695, -1254, 2547, -694, 3194, -82, -2634, 230, 358, -12, 1594, -90, 598, -76, -21136, -1278, 846, -84, 259, -2536, -4442, 2337, -1606, -3264, -3126, -591, -1295, -2440, -2592, 10888, 5821, -862, -5070, 10402, -10633, 159, -2660, 894, -2112, -1774, 3732, -1020, 422, 9487, 1608, -992, 2046, 275, -10676, 2606, -999, 477, -1868, -1690, 4764, -6419, -7550, 8159, 529, 2308, -394, -2394, 2826, 6680, 496, 3628, -646, 3186, -657, -2260, -1416, -9202, 496, 6624, 2441, 1554, -2195, -8458, -3459, 466, 6706, 1056, -8777, 5436, -4000, -3130, 4794, -6127, 2008, 1602, 195, 558, -1362, -880, -2662, 9726, -9793, 2989, -3182, -2378, -1338, 1086, -4682, 372, -399, 11129, -601, -666, 5206, -1106, 362, 3155, 328, -9862, 719, 1602, 998, -2342, 857, 1510, 476, 7256, 4652, -5750, -4991, 4611, 8718, -4434, -4119, -351, -1606, -1033, -3717, 3585, 9381, -1594, 5052, -7414, -205, 2356, -5949, -8738, 1526, -1838, 4760, -5444, 623, 112, -2863, 5710, 4920, 9497, 3759, 10748, -201, 716, 747, -2559, -4077, -449, -741, -136, -1303, -572, 1886, -986, -10529, -51, 1360, 2418, 116, -1490, 1928, -9977, 4720, 227, -11212, 3730, -2996, 1300, 9935, 356, -4618, -384, 972, 3174, 3732, -803, 2666, 790, 2067, 2343, -1209, -10147, 21, 9066, -4564, 2508, -176, 264, 9834, 3360, 7278, 9386, -1274, 522, -50, 4150, -884, 592, -688, 309, 20750, 672, -1326, -346, 366, 2058, -607, 633, 620, -677, 330, 69, 432, 319, 436, -300, 21845, -318, -676, 320, -386, 889, -724, -1394, -2664, -431, 2046, -136, 5520, 6700, 1192, 5779, 9386, -3541, -5638, -16125, -259, 545, -267, 1972, -2366, -43, 615, 251, 1003, -980, 2262, -10519, 12166, 2007, -884, -1560, -1250, -491, -438, 820, 1212, 3512, 354, -1066, -46, 98, 315, 8532, 944, -1297, 8011, 1029, -383, -1606, -8381, -1650, 2852, -958, 1757, -4270, 2160, -9283, 2918, -3718, -224, 6154, -5671, 3764, -554, 5214, -2526, -31, 547, 6, 1633, -4562, 1424, 1177, -866, 1648, 653, 6056, -1845, -1271, -350, 596, -2286, 9893, -6594, 1099, 630, -537, 230, 972, -134, -491, 79, 306, -74, 253, 208, -1804, 73 }, .cb2232l1 = { -11397, 378, 12845, -1813, 342, 329, 1165, 550, 556, -115, 755, 117, 2511, 260, -1189, -1406, -1528, 1866, 3300, -9678, 1025, 10413, 199, 2878, -3572, 8701, -1895, -1189, -576, -3384, -162, -10866, 3253, -1267, 91, 2277, -86, -3394, 6576, -2475, -1136, -4295, -1610, -8064, 297, -8908, -4433, -2954, -82, -11426, -4610, 2063, 1522, -7972, -495, 1799, 2922, -5179, -865, 4739, 11072, 3927, -483, -11111, -2375, -1432, 1210, -1342, 2418, 1688, 852, -64, 133, -4582, -7136, 10558, -3417, -3162, 2033, 3149, -3050, 2532, 568, -2444, 4082, -2859, -10350, -4983, 6633, 230, 5954, -1140, -657, -998, 1156, 736, -8894, 64, 939, -260, -1704, -526, -1330, -869, -2427, 12377, 1296, -120, -10560, 1794, -9090, 1487, 7162, 519, -382, -3234, -66, 1294, 2363, 1482, 498, -4053, -752, -154, -587, -293, 16533, 65, -1211, 1666, 291, 2820, 2222, 2, 865, 344, -1206, -1214, -2162, 8842, 11063, -2093, 1896, -857, -144, 321, -9548, 4464, 5038, -282, 1160, -194, 823, 3479, -8234, 5834, -320, 7114, -184, -2663, -11670, 472, -2013, -1282, 4390, 4453, -2126, -2483, -900, -6262, -2237, -539, -1134, 164, 426, -8969, -1746, -1960, 8172, -2127, -19948, 657, -712, 344, -443, 458, -564, 56, 756, -157, 274, -1324, -3372, 2981, 635, -9454, -4916, 2884, 2316, 8200, -1452, 2135, 1785, -1054, 82, 5007, -4164, 642, 9241, 5091, 1002, 2467, -8409, -854, -861, -2317, 405, -1810, -793, -7907, 496, -1005, 3373, -1016, 9527, -542, 1672, -9105, 280, 11170, 273, 908, 89, -233, 10850, 870, 436, 1630, 3328, -499, 5091, 1224, 9135, -480, -1134, 2428, -2904, 5077, 2014, 2859, 4277, 7763, 8719, -11474, 1619, 1167, -3188, -1063, -433, -4291, 2646, 1024, 2008, 317, 746, 327, 6824, 1174, 8978, 5254, -8948, -136, -2602, -1442, -698, -950, 1800, 296, -1016, 1653, 3771, -9326, 4536, 7033, 4729, 6630, 1042, 167, 11485, 12338, -147, 2834, 611, 1844, -313, 486, -916, -887, -1423, 642, 242, 75, -1875, -645, -1239, -2118, 1458, -272, -1703, 949, 778, 21826, 214, -1320, 310, 2680, -1542, -2202, 1072, -132, -2067, -3593, -8293, -10331, 9030, 402, -2702, 2984, -12068, 3170, -1098, -1175, -1188, 2998, 1159, -1712, -904, 236, 42, 823, 548, -546, -1954, 15989, -2212, 1886, 2300, 2293, 2606, 2905, 2365, -1214, 1592, 1362, -2210, 1674, -1892, 15049, -1012, -2824, -792, -11447, 11144, 4853, -357, -1230, 748, 1212, -294, -424, -2720, 78, -2149, 966, 7794, 1645, 138, -709, 464, 3614, -10308, 310, -4726, -3694, 1088, -576, 690, 68, -145, -3282, -9280, -9537, -1274, 3202, 588, 1790, 1437, 3880, -1803, -1154, 15082, -2388, -1746, -885, 2267, 1813, 1688, -1039, 9775, 350, 3218, 10550, 1048, -3731, -3748, 3517, -910, -663, -413, -1045, -1236, -248, -132, -1196, 12, 15815, 653, 1429, -371, 4094, -3050, 567, -5524, -11128, -4261, 1929, -1719, 8236, 686, 1309, -1057, -715, -2586, 1327, -38, -6180, 3499, -2080, 8980, -1890, 62, -1004, 3308, 5809, -5778, -3865, -610, 180, -519, 3129, 9000, 1607, 8484, -4056, 4741, -4491, -355, -1324, 1203, -1864, -811, -15995, -121, 1325, -817, -2170, -5753, 731, -1875, -2286, -9193, -307, 247, 2469, -1738, -12290, 31, 1028, 670, -66, -1856, 570, -3542, -3401, 144, -320, -524, 184, -928, -1606, 10978, -3114, -8861, 1467, 1156, 872, 8276, 5655, -695, 2788, 3733, 2155, -2044, -10260, 1683, 1859, -263, 17966, -19, 1621, 50, -261, -4143, 1245, -22, -1564, 809, 2462, -8005, 1247, -1471, -763, -1686, -698, 2868, 796, 4036, -3672, 11209, 1102, -9369, -1008, 1273, -906, -4458, 1642, -3254, 3563, -94, -6267, -604, 8687, 2388, 2214, 1759, -7788, 4296, -7467, 3547, 3248, 845, -7784, -2195, -42, 5327, -1002, -3915, -4581, -1215, -919, -3444, 5142, -1874, -3020, -2627, 1129, -4456, 1840, -11472, -914, -6366, 3495, -2775, 484, -5859, 980, -1967, 1350, 929, 6856, -3952, -3365, 1514, 7423, -675, -2260, 6027, -3072, -6388, -3716, -2398, 5564, 1447, -86, 1180, -1239, -1372, -435, -1314, -1978, 942, -2018, 1027, 704, 20417, -94, -1239, 252, -171, -1100, 1684, 1401, 623, -354, -2674, -5042, -734, -6631, 7587, 4901, -1596, 6806, 4230, -859, -867, 1266, -3869, -3972, 1548, -4889, 2811, 2263, 2468, 354, -6197, -1278, 1947, 5675, 10612, -1730, 2056, -70, 3034, -583, 879, -3719, -8623, -1241, 822, 5832, 163, -1075, -784, 398, 1170, -717, -374, 856, -21602, 89, -513, 260, 854, 1152, 762, -601, 523, -107, 1033, 1877, -1456, 226, -20758, 365, -8943, 1305, 193, 948, 295, 2696, -3165, -1982, -2439, 1067, -12266, -1018, 3400, -178, 1995, 11745, 1833, 9785, 1171, 582, -1844, 157, -1242, -4080, 864, -1771, -4257, 721, -4010, 7990, 142, 730, 1976, -6623, 4637, -7394, -1143, -835, 3341, 1732, -7266, -448, 5379, 290, 1855, 6977, 6637, -6561, -1370, -1767, -2769, -1189, 3872, -4895, -4679, 3906, -1664, 1514, 7908, -7960, -4147, -1235, -1706, 3314, 144, 1668, -9505, 2268, 4147, 2515, -1451, 6475, 1675, 106, 981, 201, 309, 60, -133, -472, 561, -380, 1130, 91 }, .cb2232s0 = { -26218, 1606, -390, -696, 266, -947, 561, -1526, -8, 1080, -187, 5671, 2249, -30, -4129, -768, -10908, 3826, -10422, -144, -1259, -1372, -3553, 1287, -5151, 6442, -5101, 1386, 791, -1593, 12942, -764, 424, -6212, 9733, 702, -9721, 524, -4818, -1232, 6, -484, -818, 955, 6425, 3594, 5156, -286, 1514, 4466, -1756, 11321, -679, -1481, -477, -8015, -3059, 4476, 679, -1143, 2877, 2581, 3230, 239, 12018, -1597, 13431, 11852, 260, 3306, -714, 1299, -4375, -778, 170, -565, -3510, -6632, 3354, 5901, -1070, -5912, -3430, -4970, -4712, 2648, -9113, 1561, 1002, -5659, -3177, 638, 2289, -1050, 12310, -10364, 2830, -961, -194, -6442, 2206, 3454, -2087, 4327, 1080, 10257, 8107, 4904, -3141, -2339, 7568, -363, 3765, 7960, 7067, 1496, -3842, 1805, 2415, 913, -1641, -5411, -7583, 4597, -1324, 2882, 11310, -2570, -2877, 3544, 4642, -2249, 7110, -307, -3413, -2871, -8974, -6358, -5703, 4046, 83, 1887, -3476, -4346, -2995, -346, -46, 22143, -576, 2597, 696, 1520, 140, 2937, -3356, -988, 4090, -1246, -3347, 1387, 2264, 1282, 7040, -806, -12810, -1105, 32767, -4266, 108, -1998, -680, -1279, -467, -110, 462, 768, 1678, 1408, -1888, -1115, 9430, 5852, -3578, 5367, -1096, -4310, -9588, 4350, 6048, 2516, 3214, 4468, -276, 15175, -848, -2875, -314, -6002, -11743, 448, 9238, -3026, -3934, 2840, -2070, 1850, 444, 511, -542, -7382, 6002, -14447, -1498, 176, 812, -2632, -2291, -3312, 3953, 370, -2154, 1678, -1186, -6382, 1544, 3534, -3767, -7459, 7265, -3272, 10669, -1677, -7046, -1679, -132, 2108, -1948, -2938, -5393, -6222, -11293, 2066, 981, -731, 1869, -2211, 3558, -21513, -678, -493, -2087, 245, 635, -2011, -3316, 13445, 2089, -10186, 1114, -1241, 2121, -2305, 3316, -1282, 2733, 318, 3534, 4844, -1439, 8932, -7649, -556, -7519, -3442, 5068, -3546, 8586, -8425, 7146, -683, 665, 3052, -2581, 248, -8320, 2270, 7045, -800, 5890, 2187, -251, -2552, -3867, 3665, -1643, -11757, -5542, 1806, 3669, -508, -3436, 600, 10412, -5426, 1680, -4545, 11536, -1859, -5446, -4594, -4300, -1173, 282, 2100, -2556, 9486, -7325, -7252, -3155, -775, -13674, 4272, 3066, 9352, 1647, 1136, 794, -520, -654, 1539, -2244, 3155, -12039, 731, 3379, -1904, 6866, 9669, -2384, 2099, -2426, 1633, -3358, -5662, 2164, 10679, -5330, 7066, 5826, -187, -4840, -1174, -3694, 976, -2548, 2292, -3517, -1007, -4041, 684, -14986, -4789, -4135, -4376, -10678, -1482, -10466, 3575, -1960, 3185, 1198, -196, -892, -5424, 4802, 13608, -7772, 3952, 4404, -52, 1097, 3182, -2699, 900, -1258, -8055, -3102, 784, -2574, 1556, 1060, -5417, 537, 11991, -650, -487, -10250, 6766, -3716, 1062, 2525, 1039, -3002, 5742, -502, -4583, -144, -144, -5896, -978, 1786, -1420, 1944, -130, -5202, -1578, 7821, -11675, 9980, 5065, 5942, -362, 16344, -801, 1932, 1242, -10, 791, -170, -2468, -479, 3297, 4328, 11473, -1549, -12316, 209, 1739, 1875, 1305, -4310, -13049, -4913, 9208, -966, 2570, -138, 890, 1373, -1324, -965, 8563, -7560, 490, -1392, 5695, -5656, 5431, 3974, -1131, -1246, -1334, -3859, -3150, -12976, -6929, 665, 9393, 490, 2212, 18, 542, 229, -3925, 1836, 4223, 5268, 1200, 471, 20, -9914, -5774, 8362, 5929, -7087, 2005, 15624, 1626, 5419, -1492, -1536, -417, 1957, 2585, -404, -1125, 2296, 596, 836, -356, 745, -2810, 2879, -2354, -21682, -108, 726, -862, 593, -42, 266, -1356, -2119, 19613, 814, 2462, -2940, 222, 3595, 5634, -807, 1219, 2446, 5666, -1839, 7092, -10581, -5136, -2408, 5726, -1116, -2348, -6318, 8991, 6750, -5321, -7344, -2194, -5544, 1705, 12500, 9069, -1966, -4914, 2225, 3537, -1485, -5141, 434, -1620, -5383, -710, -5443, 3930, 7082, 667, -3289, -3202, -2097, 1970, -11647, -2927, -2098, -1345, 3449, -2075, 262, -756, 1829, -271, -1292, 1079, -5746, -344, 3660, -4456, 3593, -7652, -1367, -828, -2290, 1063, 4234, -17596, -911, -6068, 1040, -2956, 2704, 1763, 974, 3132, 697, 1267, 240, -5520, -12368, 10830, -633, -5939, 2307, 1868, -2216, -1261, 597, -6302, -5145, 3550, 7519, -6963, 3752, 876, -1912, 30, -9192, 1075, -4632, 9108, 1139, 911, 9290, 1268, -1006, -1718, -6668, 3294, -1510, 6527, 456, -1400, 11424, -4168, -3940, 4738, -4863, 2990, 3202, -441, -4744, 4623, -4351, 3997, -11016, -737, 136, 7978, -3801, 4170, 3602, -2217, 849, -2552, -22232, 323, 193, -4, -1030, 590, 1625, 3208, -10595, 2624, -741, 13121, -1044, 1601, 5175, 2199, -3833, 1804, -2314, 793, -11486, -655, -3320, -2975, 4065, -3124, -706, -7264, -1038, -3082, -5503, -7147, 8367, 1205, -1092, -1694, -1078, 11584, 8, -1237, -2077, 9732, 4963, 2780, 674, 581, 8226, -1231, -9252, 644, -3284, -744 }, .cb2232s1 = { 32767, -45, 422, -1139, -1052, -2085, -695, -612, 1451, -288, 58, -878, 53, 2912, -1891, -7148, 1893, 3028, 165, 963, 2260, -7904, 5857, -18824, -2617, -1030, -872, 1500, 1118, -745, 143, -436, 1239, -3840, 1785, -2506, 20237, -1026, -1556, -1371, -660, -1185, 939, 1315, -3658, -5428, 587, -4105, 1596, 12612, 5781, 1172, -3490, -1182, -333, 6258, -2594, 2144, -4830, -190, 1972, 2687, 1327, -987, -15046, 4659, -71, 6890, 1588, -4787, 4318, -3704, 496, -5601, 1954, -1250, -3389, -5156, 9238, 2298, -4945, 183, -2036, 114, 12250, -2330, 71, -3395, -1402, 3668, 531, 10915, 1162, 7738, 9089, -1250, 1500, 6357, 1155, -5094, -2641, 1657, 470, 2022, 535, -306, -18031, -903, 2913, -5486, 1769, -1419, 9082, 2149, 3516, 6960, 833, 1123, 1266, 1672, -690, 9634, -2986, -4675, 1006, -2205, -2919, -3205, -2759, 107, -931, -9694, 2340, -862, -2782, -3636, -9414, 9564, 1057, 8664, 1326, 3928, 1452, -4692, -3437, 8610, -10466, -6638, -2879, 3408, 861, -3057, -823, 164, 3153, -3698, -15693, -886, 1456, 3278, -6160, -57, 1110, 22, -2985, 7299, -1082, -7921, -12212, 480, -7645, -211, 1586, 3874, 3242, -883, 6730, -1597, -506, 3744, 7552, -7607, -65, -1442, 266, 10012, 1594, -2628, 6988, -1049, -516, -691, 672, 4913, 1788, 14973, 342, 962, 7212, 1124, 500, 1135, -311, 3886, 12548, 5432, 6219, 3341, -122, 5636, 6871, -10831, 4010, -10084, 1456, 5216, -1013, 1102, 4164, -1490, -5186, -242, -4498, 3322, 3584, -2176, 5704, 515, -11556, 1446, 4303, -3928, -4227, -7268, 6069, -11330, 822, -2054, -3035, -2516, -1816, -3796, 8408, 8849, -3030, -8201, 1149, 7388, 1036, 2586, 5618, -2274, -5037, -1497, 384, 1454, 8154, 1672, -2409, 3347, 13258, -979, 513, 7826, 2662, 1818, 5537, -1104, 2645, -10632, -8767, -5667, -1029, 806, -9040, -4684, 792, -5008, -5807, -3924, 964, 313, -2521, 1106, -5728, 13534, -8078, 4216, -1388, -2588, 986, -14536, -1410, 3461, 1360, -1348, -104, 1493, -2858, -2860, 2045, 18330, 4814, -3628, -705, -3228, -660, -2664, 2616, 4548, 3753, 1574, -1319, -1110, 556, 3304, -2803, 13052, 4592, 2922, 13667, -2322, -3056, -2717, 174, -4222, -4296, -7695, 1366, 1786, 1041, -110, -1997, 4102, -1855, -900, 203, 1311, 3412, 4107, 22609, -4112, 427, -2488, 257, -1267, -1277, -6430, -7193, 10667, 4495, -4317, 6846, 13213, 7335, -972, -3137, 335, -609, 3131, 2406, -3762, 2151, -5188, -7675, 2068, -2027, 3722, -773, -3276, 1539, -7886, 1005, 13693, 4601, -8386, -508, 5662, -4889, 93, -10603, 4051, -2, 1094, -4897, -2274, -2377, 2228, -5507, -464, -3455, -227, 9433, 8093, -2245, 3701, -1047, -6827, 2037, 1926, -3610, -15420, -581, -6127, -2075, 2501, -2216, 5385, -2297, -2660, 1563, 3244, 1418, -2012, 2964, 12235, -8595, 2728, -3541, -6511, 11038, 11326, -183, -1102, 1038, 1224, 20, 1441, -349, 1240, -7737, -930, 1411, 6945, 4130, -13544, -2625, 3550, 3149, -730, 7658, 3098, 673, -2259, 2556, 1543, 1478, -951, -8128, 4951, 11919, 4588, -8448, 784, -11498, -1908, 2578, 2936, -7496, -5834, 1987, 3407, -4133, -4924, -1348, -1300, 916, 899, 20257, 2027, 1450, 4388, -3748, 3846, 2187, -1158, 4720, -3613, 5312, 4055, 448, -6383, -8794, -2232, 1920, 834, 27087, -754, 90, 1410, -985, -1381, -61, 650, 1080, 7035, -2772, -1233, 13410, 4494, -472, -2896, -5083, -2217, -5778, 437, -6853, 4996, 3442, 6092, -6497, -3871, 4024, -898, -73, -3067, -2793, 5640, 6076, -2454, 3598, -277, 1672, -6858, 2419, 9753, 6292, 8835, -9909, -4724, -618, 7266, -416, 1965, -4968, 2421, 1155, 3815, -116, -3725, 7872, -4901, 2383, 1612, -2186, -2302, 2791, -2226, 1144, -13379, -6602, -806, 7099, -2098, 4194, -2128, 8663, -4275, 452, -135, -6053, 1280, 12815, 3278, 8452, 4479, -1648, 1453, 1407, -966, -1016, 3173, -7333, -4552, -13176, -1744, 577, -1572, 611, 1202, -146, -5773, 3012, -3016, -1581, 3162, 3818, -1970, 6195, 1946, -9656, 2861, -7875, 3133, -7840, 10951, -1684, -306, -227, 9776, -82, 1736, 1180, 3457, -2874, 5365, -7428, 7604, 2623, -2998, -2270, 10410, 1252, -2725, -4433, 1758, -5225, 6522, 6698, 712, 4694, -2392, 240, 423, -3030, -12708, -3136, -5176, 480, -1624, -12900, 7537, 4371, 1186, -1828, -757, -1850, -974, -3755, 1415, -6302, 2642, -3823, -1570, -8090, 8251, 1945, -9213, 1147, 4128, -4301, 806, -1745, 704, -2496, 1375, -802, 9, -808, -252, -5453, 3857, 10353, -536, 1875, -2896, -10792, 3358, 1063, -890, 7200, 3660, 406, 2840, -6973, -4469, 4638, -8091, 2772, -8035, 1728, -5315, 7234, -2718, 3707, -1226, -11858, -2397, 772, 3285, -4089, -1400, 1113, -16680, 1885, -435, -959, 242, -817, 259, -2010, -1857, -557, -914 }, .cb2232m0 = { -13394, 14382, -488, -1088, -817, 100, 305, 2267, 2527, -1584, 995, -5781, -3585, -1826, 803, -4108, -3137, 12111, -211, 838, 4879, -1964, -1728, 13830, 2084, -11535, 664, -2499, -3421, -703, 4528, 968, 1008, -12010, 984, 2658, 422, 1412, 10772, 2216, -4291, 1329, -2324, 2392, -2029, 12322, 1053, 169, 12635, -902, -62, -670, -3007, -3322, -2948, 1817, -10688, -1264, 1949, 2734, 1072, -1429, 2085, 10312, -1685, -4433, -1287, -9620, -1132, 20, 614, 2470, 2821, 5934, 3526, 11292, 50, -12970, -11948, 1114, 1980, -945, -713, -5357, 3766, -447, 969, 2247, 11854, 2148, -12393, 1518, 610, 4527, 1164, 1347, -1422, 649, 653, 855, -24, 30152, 20, -564, -2825, -11, -1408, -80, 239, 305, -3163, -854, 568, -18, -4212, -403, 288, -3009, 229, -1152, 16390, -877, -458, 316, -128, -12165, 1088, -828, -10886, -269, 747, -1026, 1716, -10920, -9204, -3123, 958, -6128, -260, -851, -1524, -2386, -6472, 228, 2667, 3158, -140, 1719, 2330, -2730, 3080, 44, 15743, -2167, -11637, -607, 476, 408, -12505, -1862, -1892, -440, -3785, -2348, 1229, -338, -438, -797, 29933, -715, 366, -197, 576, -188, 484, 240, -4844, 1168, -1054, 863, 875, -295, -16091, -1972, 3976, 3833, 3056, -770, 1011, -3098, 165, 3973, -9367, -18, -299, -11254, 1005, 8644, -14788, -2268, 2644, -8410, 578, 2169, -766, 2764, 2378, 3282, -2710, 7612, -542, -12062, -2437, -414, -506, -10332, 2732, 839, -2593, 923, 1159, -1057, -7333, -86, 2832, 11328, -1616, -302, 4399, -547, 6107, 9983, -6901, -85, -544, -4916, 366, 4878, -8662, 7313, -3056, -1027, 2381, 8906, -7270, 509, -1124, -2512, -1636, 5830, -5868, 2369, -3236, 12557, -2713, 12793, -2957, 1688, -852, 4723, -122, -2336, -4698, -1306, -7399, -2090, -1953, -1505, 3335, -10906, -2598, 1322, 2556, 7210, -1553, 1262, 1878, -10719, 1746, -2736, 1448, 8734, -9602, 828, 1752, -1632, 8037, 2728, 562, -1879, -10572, -544, -2254, -1997, -6384, -87, -5878, -473, 498, -2960, -5698, -11500, 1815, 2050, 7388, 5230, 2782, 5602, 514, -306, 13022, 523, -10776, -846, 716, 270, 3350, -6021, 1420, -6175, -1978, 3967, 11612, 3320, -4100, -2468, 4595, -5338, 65, 3478, 19, 3501, -15896, -1335, -1861, -1944, 3935, 3630, 4627, -5892, -458, 383, -211, -594, -165, 24865, -656, 2300, -404, 257, 214, -643, 2298, -180, 35, 4974, 834, -998, -1738, 5449, -9222, -10858, 4188, 9147, -5639, -1691, 990, -1945, 3421, -3527, 9005, -2038, -2369, 5098, 967, 15840, 196, -3204, -1079, -776, 11806, -1352, -2053, 2011, 309, 642, 1541, -1466, -4465, 6679, 5756, 7474, -58, -1864, 5908, -1576, -30374, -904, -571, -1136, 425, -22, 666, 1150, -734, 82, 1254, -226, -437, -890, 1464, -3368, 987, -11885, 1127, -7224, 1872, -8198, 8775, -2695, 1457, 15102, -899, -1384, -1975, 1891, 3128, 1465, -1649, -1363, 1483, 303, -534, -7, -1710, -1377, 769, 12698, -8987, -802, 4636, 572, 2004, -3513, 442, -9863, 3215, -4550, 2138, -4346, 5682, 11053, 7220, 9842, 797, -1399, -2679, -5857, -2143, 3241, 2330, 248, -630, 1044, 639, -3432, 125, 22083, 1976, -1003, 813, 552, -571, 4358, -6200, 3635, 5439, -636, 233, -4856, -3519, -460, 6956, -4215, 1537, 7895, 13910, -7637, 1392, 1572, 648, 3690, -1988, -2463, -3302, 677, -924, -535, -6025, 4126, 7178, -4145, 4960, -1860, -10926, -1470, -9506, -226, -258, 32, -479, 2222, 94, 11358, 3984, -2732, 2111, -590, -2444, -344, -425, 598, -1382, -1213, -17632, 1566, 1387, -3521, -57, -1829, 4788, 574, -206, 7962, 9157, 6459, -1594, -1355, 4874, 1066, 2006, 1793, -7569, 2369, 1108, 1305, -3046, -14052, -1736, -4045, -4328, 7497, 3304, 1518, -5666, -529, 4256, 4667, 932, -1495, -10414, -414, -2110, -3150, -1250, -8799, -419, 5162, 8497, -8720, -746, -3015, 6403, 3855, 7350, 165, -59, -958, 5780, 6044, 1736, 3016, 31, 1012, 3422, 7598, -6837, -2092, 2262, 6171, -10362, -2108, -1352, 1798, -4872, -6369, 2507, 2640, 6074, 1835, 2948, 388, 398, -9295, -5384, -2088, 2096, 742, 3286, 441, 7135, -2112, -9958, -5612, 2479, 5427, -10114, -674, 308, -1037, 770, 514, 1868, -252, -11901, -984, 670, 2506, 5396, -1047, 9113, -10865, 4872, 1720, 2226, 947, -2336, 2649, 173, -1464, -2874, 4463, -1270, 3429, 6242, -5380, -7772, -4550, -8451, 2045, -2855, 5336, -15066, -418, -4886, -648, 3736, 945, -956, -4825, -116, 130, 1889, 9642, 3790, -4371, 7180, -1556, 6562, -2207, -7910, 506, -21594, 662, -338, -943, -1022, 6453, 301, -464, -457, 840, 3313, 10946, -294, 8156, 138, -1425, 2397, -792, 6468, 4615, -511, 15938, 494, 274, -5976, -660, 3894, -2140, 1424, 2003, 4101, 2823 }, .cb2232m1 = { 20456, -1952, -1581, 869, 628, 76, 1404, 4060, 508, -3177, -946, -2992, 2422, 1139, -1931, -240, 1011, 365, -1106, 20973, -1438, 372, 137, -1058, 1171, -1252, 2794, 1434, 1814, 482, 3948, -2704, 20422, -241, 441, -1121, 499, 1036, -918, 9708, -3166, -488, -10379, -3201, -5254, -1871, 5665, -12622, -7591, 127, 1469, -2267, 1813, -4197, 2065, -602, -395, -652, 333, -19114, -1092, 4310, 1590, 1688, -1453, 177, 4402, 1168, 5972, -1295, -3258, 1542, -4832, 3377, -5545, -3622, -4944, 2064, -2846, 8118, -845, -6778, -3640, 5729, -907, -11007, -5, 2634, -11118, 2108, 144, 13299, 251, 336, 563, -75, 3004, 169, -3892, 1477, 1066, -1571, -1113, -1088, 517, 465, -21841, -1541, -1094, 1841, -9213, -17478, 1662, 8, 1192, 1174, 1014, -5659, -695, 316, -5161, 1803, -1056, -2369, -2919, 2941, -9712, 1975, 426, 11214, 2288, 6186, -7348, -3062, 3341, 3252, -4102, -346, -876, -7088, -3330, 4507, 310, -1632, 299, -2636, -2740, -752, 10159, -7201, -9568, 3134, -4002, -41, 2479, -1816, -14099, 3575, 1161, 6427, -2466, 390, -1883, -6265, -1266, -263, 1474, -592, -21234, 94, 4187, 1, 3227, -3273, 1950, 406, 543, 1661, -2648, -9252, -2048, -5987, -722, 4932, -4410, 12504, -1572, 2244, 5610, 307, -9710, -3642, -6436, 4368, 2956, -2269, -6196, 4069, -766, -3695, 3416, -5786, -9668, 11677, 1208, -965, 1516, 1132, 1018, -6168, 1970, -10357, 1379, -725, -8789, 3730, -65, -4758, -1818, -1050, 9641, 4519, 2886, 2667, 6348, 2436, -438, 1978, -9374, -1286, 3893, -2073, -11199, -2081, 3345, -3444, -9480, 2410, 1986, -1869, 3252, 5949, -2119, -401, -214, 3416, -1067, 18510, -2986, 3510, 508, -357, -837, -1205, 2884, 11587, 11565, -555, -2664, -873, 3642, -2068, 1734, -4408, 330, -181, -1358, 1407, -9739, 809, -10203, -2066, 3440, -2063, 3238, 3734, 1671, 750, 6890, 4068, 1238, 493, -1330, 76, 8918, 10855, 85, 12236, 3570, -1074, 3008, -9424, -3186, 1271, -380, -157, -4974, 10575, -1378, -219, 1354, -1589, 10936, 2268, -3787, -1040, 7567, 924, 10490, -806, -1318, -1576, -209, 93, -3745, -3820, 439, -9828, -6265, -864, 31342, 35, 1332, 443, -590, 846, 104, 868, -863, 1526, -1088, 11494, -7055, 3564, 109, -3072, -2234, -4530, 1866, -3425, 9940, 3158, -1821, -680, -1124, 2884, 1191, -61, -9698, 7596, -558, -9019, -1181, 208, -1342, -68, -312, 294, -1468, 1410, 39, -21081, 724, -2137, 935, -8, -10297, 3509, -6510, -6558, -6906, -1905, 1915, 5920, -8983, 3416, 7300, -1372, -1422, 1822, -10433, -2530, 1669, 554, -3008, -3351, -922, 8279, -5184, 5520, 4785, 683, -506, -4558, 1938, 8442, -12639, -54, -2907, -820, 10004, 1780, 485, 1401, -3786, 786, -5937, 2632, -1540, 972, -3342, 2294, 8076, -1006, 11731, -1825, 3036, 1085, 1160, -9680, 11111, 7838, -2504, -2112, 2376, 2534, 3624, 555, 3610, -520, -831, -15, -498, 167, 711, -22685, -999, -1466, -1643, -394, 5404, -4247, -2307, 4052, -1156, -1240, -490, -1598, -4365, -8382, 10493, 464, -16592, 3723, -7709, -821, -4218, -922, 398, 5635, 2184, 5090, -7144, 2420, 792, -324, -1278, 3172, 13101, 1608, -3996, -2219, 2995, -6924, 816, -2482, -406, 3458, 503, -8154, 3460, 2542, -3703, 8524, -61, -430, 23212, 1203, 2335, 5556, -476, 923, -565, 593, -1611, 1814, -1614, -7067, -1957, 10166, -4306, -421, -4026, 1854, -9881, 667, 7720, -2906, 7003, -1823, 6344, -8614, -2965, -2720, -62, -802, 1945, 4574, -4604, -8341, 518, -3543, 95, -4262, -5220, -133, 10270, 1999, 3234, 8900, -4866, -3708, -4465, 4542, 2545, 1770, 6995, 3559, 1133, -1152, 14680, 1002, 634, -12913, 1686, -1645, -1796, -50, 112, -1108, 1070, 686, 1068, 1555, 896, 3498, 10458, -32, 12017, -737, 650, -432, 404, 170, 10873, 1864, -1718, 11061, -1556, -3766, 225, 6999, 1730, -6919, -1895, -2919, 8250, 10050, -4631, -1488, -4801, -1504, -2736, -110, -3630, -2752, -11162, 1128, -2580, 11692, -678, 1338, 2175, -6030, 616, 1651, -7034, -3057, 2420, 1998, 4383, -1721, -10762, -428, 2902, -906, -4298, 2141, -1242, 5464, -607, 5389, -8946, -3890, 10884, 1544, 628, -1969, 13902, -1570, -1080, -689, -4676, -3642, 753, -11351, -110, -744, 4286, 1163, 3105, 9752, 11143, 4296, -1698, 1012, 2284, -989, -958, -9481, 738, 24, 426, 1638, 3898, 8885, 2938, -8826, 2982, -1679, 8466, -651, 5144, 2736, 751, -84, 7710, 3077, 2885, 146, -1102, -2569, -2039, 11059, -9950, -1048, -1031, -33, -5118, -1096, -1986, 2306, 2400, 9320, 6188, 500, 2090, 61, -11357, 118, 1505, 1032, -1920, -164, -9744, -4670, -11029, -102, -960, -1023, -2570, 4102, -3989, -11478, 772, -1515, -1102, -2194, 1722, -1195, -8144, 746, -9534, 3250 }, .cb4440l0 = { -14497, -1982, 631, -984, -2115, -3252, 2755, 2017, -2110, -8864, -792, -1291, -2761, -2365, 698, 1047, 972, -14703, 10590, -3945, 663, 972, 1204, -2801, 1295, -1296, 50, 1448, 888, -1879, 122, 78, -183, -588, 16202, -388, -2240, 1136, 1266, -6445, 2619, -1664, -6329, -2700, 1557, -497, 598, -110, 1298, -334, 191, 29897, 387, 419, 76, 152, 533, 78, 112, 101, 158, 136, -236, 88, 43, 107, 84, 21, -6385, -1711, 1757, 1411, 9152, -72, 1428, -1098, 10328, -506, -360, 285, -36, -2816, 819, 88, 176, -481, -172, 2067, 3268, 5479, 8605, 11272, -1880, 361, 1582, -4973, -1379, 3835, 74, -3, 493, -431, 1390, 101, -550, 59, 476, -469, -583, 568, 732, -1015, -1104, -698, 23922, 1130, -1268, 280, 204, -59, -9789, -317, 935, 2944, -10402, -2564, -4648, 1506, 3834, -1002, 2805, -158, -409, 814, -150, -97, -3573, -1550, 1356, 5350, -365, -2622, -3454, 310, 1194, 911, -10928, 937, 7980, -5286, -554, 1999, -1263, -562, 10, -321, 744, 44, 64, -274, -30136, 340, -1051, 756, -30, -6, -269, -273, 12, 95, 1565, -13194, -11810, -485, -1574, 414, -240, -452, 564, 740, -476, 959, 1079, -1568, -422, 37, -154, 10117, -68, 1412, 11862, -3420, 4169, 5178, 527, -1027, -1030, -1985, 448, -716, 1696, 1942, -254, 308, 1100, -790, 8102, 6630, 3653, -1018, -587, -6990, -19, 1671, 1425, 8089, 3708, -1182, 774, 659, 113, 437, 50, 835, -532, -11209, 1682, -7490, -2592, 1234, -4689, -7301, -143, 3361, 1121, 177, -473, 513, 136, 965, -4020, 4639, -1212, 1271, 2905, -6865, 10499, -3800, -3354, -5029, -3606, -950, 4490, 526, 1006, 2, 1760, 5819, -55, -1098, -1843, 348, -2062, -9196, 3712, -11466, -3218, -858, 2720, 589, 320, 861, 59, 5357, 564, -380, 538, -142, 490, 212, 1716, 670, 1904, -181, 2979, 943, 16916, 1271, 988, -802, -1490, 9154, 643, 1725, 1347, -2827, -4096, 485, -7091, -3180, -4747, -1604, 1576, -5724, 6104, -139, 1726, 11715, 360, 7519, 2513, 5192, -2208, -1993, 829, -387, -5724, 4418, 116, -2955, -226, 249, 377, 2149, -2929, 5021, -3064, 800, -1459, 11384, 8556, 1740, 368, -2839, -2049, 1438, -1357, 4084, 1896, -528, 1621, -1760, 13741, 302, -1018, -9774, -3521, 1302, 1374, 1139, 918, -1724, -764, 858, 804, -1772, 372, -322, -526, 11924, -11944, 4012, 1749, 1737, -1545, 68, 889, 280, 690, -2200, 1068, -484, -171, 455, -44, -3178, 2243, -590, 749, -792, -19876, 198, 236, 2695, -3413, 652, 284, -820, -1134, -199, -112, -5650, -418, 1047, 1090, 2260, -3297, -2164, 13524, 1720, -326, 910, -1706, 3912, -1175, 1687, -2152, 50, 35, 1718, 721, 9316, -2256, -4330, 6961, 4432, -8043, 45, 1370, 3472, 2892, -3224, 1368, 1355, -562, -694, 746, 198, 1188, 2819, 3131, -2371, 6438, 847, 2111, -10187, -3451, -9826, -3502, 655, 649, 1460, 270, 118, 45, 192, 188, -1139, -258, 663, -84, -27519, -765, -905, 357, -4, 89, -372, -24, 178, 1127, 209, 1177, -2762, -587, 1488, 8989, 3217, -2550, 215, 9540, -7196, 1259, -3716, 2767, -261, -216, 872, -3008, -2076, 8682, 709, 3629, 87, -3114, -10624, 246, -1670, -1738, 1229, 7624, -1120, 784, 305, 233, -185, 280, -1466, -268, 198, 499, 308, -2187, -1149, -388, -38, -338, -1084, -19424, 40, 1958, -2240, -86, 264, -9876, -1287, 4086, 3742, 2502, -10078, 4574, -1493, 1078, 3218, 2410, -364, 1049, 2638, 35, -1295, 200, -2847, 10818, -12064, 2375, 348, -353, 2788, -821, -3196, -511, 146, 2015, 235, -1094, 2622, 2688, -79, 5176, -884, -11814, 794, 2696, -6704, 3452, 1295, 3872, 2924, 4498, -166, -598, -1213, 891, 5478, -266, -777, -5, -776, 1003, -1837, -156, 17910, 453, -297, -1545, 857, -288, -308, -6373, 2045, -1846, 3007, -2236, -1904, 815, -2889, 4200, 8320, 9872, -614, -834, 3856, 414, -234, 1559, -7451, 3641, -1230, 837, -127, 2652, 411, -532, -12548, -1692, 1034, -2418, -968, 558, -1564, -1952, 307, -1064, -6776, 1588, -2636, 949, 8272, -12, -3468, 3481, 6588, 2580, 7393, 272, 1528, 1818, -2206, -349, -396, -11704, 1487, 9753, -4665, -24, 2084, -780, 5036, -647, 3668, 561, 1099, -1094, 534, 1270, -99, 1006, -476, -528, 12481, 1589, 1593, -1682, 7022, 2664, 8702, -563, 1082, -206, 87, -1978, -144, 228, 1232, 889, 14340, 65, -1061, 10510, -95, 649, 53, -962, -383, 2479, 1322, -1798, 2840, -492, -419, 90, -680, 79, 1026, -20912, 1593, -742, 1086, 516, 699, 2393, -64, -2010, 46, -859, 111, -440, 14281, 272, 797, -10141, -3734, 3126, -3050, 1300, 73, -1754, -1278, 1890, -2710, 704, 1160, 1, 269, -24, -622, 124, 138, -522, -510, 95, -402, -27306, -470, -214, -159, 396, -201, -372, 122, 136, -1005, 744, 1949, -810, -2648, -726, -384, 955, 1232, 1354, -345, -19485, 1056, -193, 1257, -263, 398, -752, 602, 98, 793, 17, 20186, -189, -2615, -174, 166, 436, 411, -1046, 374, -471, -253, 233, 8352, 1342, -1279, 9305, 2190, -3239, -5262, -3454, 1844, 684, 303, -4434, -6041, -3495, -2482, 389, 353, 159, -14, -29179, -511, -158, 92, -401, -36, -297, 447, -605, 269, 85, 212, 8, -118, -130, 207, 13150, -8712, 2504, 1355, -3268, 1396, -4748, -2200, -1560, 228, -162, 1179, 3024, 742, -860, 69, 10, 30006, -538, -489, -125, -214, 364, -682, -283, 532, -134, 227, -448, -20, -266, 70, 2, 9310, 14858, 856, -493, -3357, 36, -248, 214, 281, -73, 3268, 745, -245, -1007, 146, 392, 36, 8042, 2953, -6603, -7697, 4425, -2498, 571, -2194, 3388, -794, -561, -2763, 1912, -3030, 225, 214, -27, 834, -10661, 437, -506, -535, 8397, 1332, -2406, -8868, -2972, 1385, 296, 865, 2318, 890, 244, -121, 226, 375, 896, -10381, -2266, -3404, 983, 1255, 259, 11427, 455, -3041, 307, -2446, 476, 723, 18, -10224, 510, 552, -654, -876, -465, 628, -12572, 786, -393, -4162, 938, -1327, -1695, -608, -1352, -131, -880, 830, 1016, 21875, -408, -1560, -500, -1682, 453, -930, 1316, -136, 434, -683, 412, 202, 233, 382, -2002, -9267, -1034, 8710, 434, -8121, 3035, -3121, 1792, 2712, -1537, -1082, 854, 1337, -1084, 91, -4485, 2545, -4412, -1930, -12234, -4802, 4641, 437, -928, 2163, -3154, 521, -665, -1200, 2654, 931, -388, -118, -1144, 133, 5089, -1194, -1528, -967, -795, 188, 1918, 897, -7046, -7617, 7118, 5755, -2724, -7894, -472, -360, -591, 990, -3032, 7742, 726, 5490, 9383, 479, -3032, -1904, 7158, 4706, 2442, -1576, -58, -156, -3977, -2696, 4195, -166, 3342, -1566, 3767, -4159, -5750, 5505, -7663, 4516, -4073, -2612, 5136, -290, -666, 1282, 776, -566, -602, -310, 1003, -648, 2928, -3159, 427, -1168, -2702, -16990, -205, -343, -1196, -1980, 1653, -512, -1820, -418, -3368, 3522, -1966, 4964, -5728, -5185, -210, -1721, 10131, -7060, 3351, 334, -96, -3193, -1713, -614, -2633, 147, -1552, -2363, -3724, -1731, -7350, 5453, -2732, -2867, 12458, 416, 0, 4414, 833, 590, 1617, 405, 73, 868, 232, 195, 15, -196, -782, 749, -955, -84, 1176, -553 }, .cb4440l1 = { -12227, -3413, 12848, -1336, 20, 894, 254, -1001, -1381, -406, -1157, -458, 300, -395, 825, -34, 74, 382, -1018, -10266, 1338, 11091, 544, 797, -4304, 1389, -747, 1924, -257, 2615, -37, -4375, 782, 158, -378, 19, 652, -539, 1012, -4211, 1263, -925, 96, -9226, 5921, -8209, -71, -1838, -2201, -7441, -60, 393, -5626, -264, -1002, 85, 1989, -1616, -216, -914, 9907, 8044, -578, -7830, -1705, -3624, 2430, 59, 5813, 870, -317, -2545, -4020, -1330, 1215, 9352, 5425, 324, -4803, -681, -506, -4710, -6574, -4184, 65, 729, -1310, -1387, 1385, 2364, 1672, 2493, -438, -1367, -907, 38, -20220, -1644, 512, -413, 348, -112, -532, 785, 1332, 7140, -1916, -1766, -11570, 1811, -9167, -76, -1531, -175, -1739, -771, 2014, 519, 15, 576, 8736, -979, -28, 1830, 329, -302, 12206, -1501, 5195, -305, 1456, -581, 1488, 142, 235, -157, 192, 1540, -922, 11056, 11823, -2964, -1488, 1712, -2018, -880, -3282, -190, -198, 2436, -248, 222, 22, 863, 1504, 2078, -2047, 216, -1270, -732, -18252, 1186, 3178, -730, 432, 934, 1617, 873, -491, -70, -768, 679, 1398, 537, -364, 172, -541, -94, -24, -129, -26725, 201, -554, -357, -71, 60, 96, -1665, 1425, 1244, 332, -1068, 326, 834, -620, -1473, 1585, 1432, 928, 18782, -1388, 2897, 448, 40, 1323, 1433, 787, 215, 3297, 2586, -856, 451, -17700, 735, -43, 405, -1252, 744, 1012, 677, 312, 206, -279, -432, 6677, -87, -72, -10400, -106, 11224, 1152, -422, 2024, 704, 2462, -1197, 232, -119, 4, -879, 1600, -708, 3496, 279, -143, -1096, -555, 4594, 1486, 161, 942, 2018, 2474, -16010, -380, -193, -11415, 457, -276, -11220, -1604, -38, 813, -4044, 1888, -4265, 1647, -882, 981, -734, -110, 140, -3050, 1248, -549, -1167, -967, 3586, 688, -1380, 424, -17959, 2022, 2274, -44, -1406, -432, 1335, -659, 9555, -3581, 11045, 1870, 806, 599, -2065, 156, -4420, 16, 2349, -609, -3058, -738, -60, -548, -119, -49, 26, 1528, -1842, 6306, 14078, -692, 5480, 321, 1996, 1376, -3086, 490, -54, 1151, 932, 445, -9887, 15808, 3085, 866, -2020, -1785, 2126, -920, 414, -290, 138, 244, 994, -702, 1410, 330, 202, 675, -389, -241, 31306, 380, 300, -53, 804, -109, 413, -44, 6, 14, 486, -293, -112, 26, 11632, -836, -3948, -518, -1364, 11360, 3558, -588, -2084, 490, 381, -955, 2207, -2953, 1115, -265, 2, 65, 464, -180, -111, -174, -152, -30508, 121, -207, -835, 1126, -185, 91, -96, 222, -99, -93, -10138, -430, -184, -372, -194, 953, -100, 382, -1422, 13931, -1835, -1657, 821, 408, 808, -601, -463, 7142, 5596, 3171, 2174, 2740, -11350, 1019, 1449, -386, 1642, 3703, 4271, 1664, 2232, -674, 983, 551, 8543, 154, -383, -2419, 1117, -520, -10966, -4406, -3742, -79, -909, 1813, 5043, 412, 1099, 1434, 173, 788, -92, -1004, 1288, -87, 931, 10241, -855, 6, -405, 2580, 11455, 1150, -1916, 3614, -262, 292, 897, 9673, -381, 1711, -2713, -1111, 282, -2180, -2282, -2266, -724, -849, -11787, 888, 3120, -1459, 495, -10812, -792, -274, -984, -12223, -737, -2394, -299, -578, -2758, 1521, 774, 1938, 857, -1935, 217, 654, 1452, -3695, 6734, 804, 134, 946, -2156, 9495, -600, -1962, -5252, -246, 1269, 8492, 1261, -2205, -106, -1314, 828, 1013, -12059, 663, 436, -2648, 9863, -630, -2961, 3004, 1015, -3153, -1475, -25, 399, -846, 430, -1237, -156, -187, 1115, -502, -363, 386, -2820, 942, -926, 727, 1130, -20388, -274, 1140, 198, 199, 2548, 442, 157, -1546, 3693, 892, 460, 6552, 4858, -2560, -8673, 1930, -8913, 3427, 686, 61, -8830, -358, 1338, -74, 1180, 2871, -3822, 104, 2414, -1742, 11425, -4522, 393, -3016, 972, 34, 117, -16113, -6900, -6964, 1726, -843, -242, -2141, 803, -1093, 442, 1776, 2429, -1000, 489, 393, 635, 389, 1126, 12285, -1648, -11396, -2885, -56, 3840, -174, 3177, -1708, 1189, 1914, 1514, -189, -88, 276, -240, -120, -2929, 9823, 678, 568, 26, 10080, -2575, -806, -64, 6406, -82, -1171, 2169, -1804, -667, -37, 54, 4208, 10829, 11920, -468, 1916, -809, -370, 144, 3616, -263, -4352, -124, 300, -246, -440, -115, 447, -407, 20869, -340, 54, -764, -807, -699, -283, 727, -922, 1098, 577, -6, -809, -50, -115, -75, 280, 156, 182, 225, -30432, -212, -417, -245, 177, 94, 4, -627, 167, 47, 152, 148, 1325, -2436, -10063, -696, -9966, 1032, -1024, -3702, 3933, 400, 333, 692, -3858, 2599, -1215, -389, 393, -666, 2135, 10280, -2443, 1972, 410, -392, 590, 12322, -523, 1141, 52, -1468, 819, -213, 162, 116, -614, -10630, -204, -1247, 535, 199, -6058, 2538, 1644, -11539, -1562, 1462, -1493, -218, -296, -605, 321, 607, -366, -257, -837, 4536, -11683, 1266, -3805, 4496, 2854, 8, -8848, 124, 656, -1041, 411, 144, 916, 445, -91, -260, -149, -882, -433, -121, 345, 68, 349, 821, 652, 251, -23053, 1015, 712, -73, 7038, -1520, 5810, -12604, 2841, 425, 265, 1546, 938, 1851, -1180, 2751, -498, 1289, -774, 327, 4047, -8132, -2622, 5449, 3221, -2990, 10107, 1880, 173, -4006, 399, -332, 642, 297, -4513, -1230, -330, -788, -21881, 903, -1308, 547, -522, 1885, -1730, -63, 973, 897, 670, -657, -232, 498, 92, -8, -11010, 1072, -368, -1864, 11505, 3497, 730, 2158, -1629, -1351, -1583, 2247, -1506, 2144, -902, 639, 175, -6006, -986, -4246, -1510, 1785, -9792, -495, 1995, -9189, -1414, -2550, 1578, 2390, -2989, 1673, -1980, 21, -4054, 8552, 1155, -301, 1204, 3776, 262, -1828, -1837, -1014, -9, 2711, 1467, 463, -11605, 1743, -956, -1213, -3892, 1534, -10298, 22, -902, -658, -1759, 2507, 1552, -12298, -1050, 623, -3221, 1522, -36, -446, -5925, 2144, -2844, 15080, -1984, 3631, 1931, 1894, 1193, -1694, -3172, -813, -1336, 534, -365, 833, -293, 21759, -1266, -1216, 996, -2, -393, -858, 759, 969, -230, -151, 977, -874, 119, -896, 262, -118, 89, 95, 94, -437, -30375, -462, 360, -588, -334, 86, -1027, -208, 536, -196, 367, -467, 119, -32, -2544, 6204, 8830, -264, -7847, 848, 2267, 3877, -6378, -2249, 1420, -1868, -3443, 3747, -590, 58, -274, -6065, -8472, 5906, 3109, 5834, 3905, 2086, 1300, 3828, -518, -528, -3672, 1794, 4353, 408, 566, -2577, -1137, 2749, -2662, -528, -7479, 5550, 2932, -336, 3681, -2034, 212, -8733, 1017, 2258, 8225, 387, 227, 877, 2752, -1375, 2636, 8131, 3850, -6870, -1158, -3736, -8478, 228, -5809, 97, -2555, -2956, -928, 678, 112, 1434, -1250, 1240, -412, -4267, -3811, 4322, -3430, 7705, 5456, -6876, -3452, 7329, 3142, 220, 662, 1531, -5492, -1388, 6842, -3631, 362, 5029, 8052, -2367, -5346, 5724, -358, 2469, 2196, -1426, -272, 534, -192, -531, -705, -70, -259, 93, 335, -94, -145, -17, 920, 1186, -818, -599, 343, -19859, 2968, 161, 128, -4282, 598, 152, 1210, -1317, -1545, -229, 181, -6488, 5699, 7270, 6271, 8809, 27, -4770, -804, -168, -247, -680, -129, -470, -152, 915, 176, -904, 622, 280, 2986, 1034, -1046, -482 }, .cb4440s0 = { -12085, 8192, -1802, 4587, 5947, -3183, -2629, 1837, 2434, 252, -612, -4697, -576, 150, -704, -640, 174, -126, -10309, 350, -3187, 4714, -2829, 12618, -2172, 3502, 465, -159, -601, 1306, 1174, -448, -292, -136, 242, 31, -9005, -6203, -10027, 25, -209, -20, -1292, -1252, 4304, 3681, 4462, -4401, 4412, 1240, -576, 3618, 595, -237, 2544, -6032, -1511, 1523, -3668, -3472, 5552, -4901, -272, 5963, 2740, -878, 13010, 191, -2017, 768, 455, -45, -6873, -3664, 2639, -961, 3068, -4242, 1327, 2362, -1909, -1114, 100, -5940, 220, 865, -12952, -76, -1279, -591, 1092, -3502, 88, -2118, 13053, -10141, -3024, -533, -1923, -4097, 135, 1672, -1661, 1646, -370, -361, 644, -197, -6796, -10948, 11692, -974, 488, 349, 3936, -1506, -149, 513, 1401, -1776, -391, -210, 57, -56, -344, 1018, 7989, -4957, 167, 987, -60, 62, 1622, 1207, -69, 338, -16133, -46, 1018, -1460, -821, -646, 1316, 126, -4631, -842, -1505, 15833, -6404, -4514, 2946, 2923, 1198, -3141, -3109, -1613, 1853, -906, -436, -1110, -282, -214, -3424, -3141, -3988, 284, 22262, 1269, 1787, -1116, -1429, 1017, 371, -187, -825, 534, 350, 1088, 26, 176, 8914, 6662, 935, 2074, -7986, -4780, 2194, 1796, 697, -4040, 2486, 1700, 9150, -37, -1560, 2449, -162, 128, -7469, -2690, -281, -4698, 424, 535, 1416, 243, -575, -1160, 326, -2417, 808, -15816, 994, -302, 26, 894, -7376, 395, -586, 823, -1341, 972, 100, 241, 743, 470, 267, -550, 474, 182, 18252, 178, -182, -7, 3496, 2132, 863, -151, 741, -2158, -763, -652, -503, -434, -736, 770, -156, -19071, 443, -354, -243, 66, 4258, 6714, 3577, 17338, 556, -3570, 1269, -1406, 1668, -349, -90, 781, 82, 558, 936, -788, -1072, -21, -6472, -3022, -475, -6997, -2816, -3774, 1683, -13950, 3482, -1872, 2624, 1064, -318, 1300, -1214, 179, -11, -124, 4560, -2827, -6314, -5736, 1159, 1309, -5462, -11652, 4192, 151, -543, -3484, -2288, -119, 745, 1373, -121, -629, 5204, 7650, -2062, -3370, -2894, -338, -1361, 1080, -3674, 12852, -6119, -1578, -736, -241, -1564, -109, -441, 335, 416, 1678, 4802, -3239, 6182, 154, -3656, -1337, -17027, 1707, -381, -1704, -377, 1022, -592, 983, -321, 37, -1846, -4500, 2575, 14162, -560, 9385, 4179, -1340, -3466, 3235, 1727, 1545, -23, 636, 280, -39, 871, 173, -8915, -2427, 2146, -3698, -12153, -3773, -3873, 5042, 112, 788, -1139, 245, 546, 278, -8, -1005, 443, -76, -1256, 8255, 3841, 6116, 4226, 3705, -1278, -6470, 5220, 5892, -3468, 2736, 5427, -3336, -264, 1906, 294, -60, -7078, 7699, -9792, -7108, -2030, 1055, -6962, 702, -2074, -232, 127, -430, 658, -272, 757, 138, 159, -340, -4606, 1021, 146, -7690, 6001, 5660, 3363, -367, 13222, -441, 13, -874, 668, 2293, 875, 1238, 110, 778, 1434, -976, 2151, -8169, 1421, 2622, 206, -795, -816, -14443, -1583, 3356, 2971, -964, -321, -841, -404, 111, -5595, 4248, -3819, 214, -2520, -712, -1505, 849, 947, -876, 188, 3221, 863, 105, -17336, 1818, 14, 17, -6349, 379, 4746, -12405, -560, -3448, 3664, 8251, 845, 383, 1348, -739, -780, 1695, 4828, -123, -647, 823, 9940, -183, -1804, -7112, -161, 578, -619, 11534, 3214, 1586, 4784, -2540, 1188, -304, -485, -648, -824, -595, -8817, 4138, 927, -3259, -198, 4022, 2213, -1627, 645, 14602, -1058, 1481, -1670, -113, 564, -710, -451, -360, -1261, 2504, 247, 5566, -7262, 1344, -5106, -1608, 1946, -4240, -7393, 10440, 3306, 1940, -999, 155, 832, 55, 10218, 11475, -3252, -8295, 1347, 2405, 3421, -2619, 2262, -2829, 754, -307, 548, -2040, -1130, 317, 170, 292, 248, 2601, -18930, -1942, 1417, 1678, 3310, -2578, -1969, 1550, 3010, 70, 8, 3064, -848, 504, -172, 180, 1787, -1133, 2427, 1002, -664, -40, 192, -23400, -1004, 513, -818, -382, 360, 360, 268, 98, -202, -192, -668, -12924, -11702, 7325, 797, 1937, 674, -2458, -541, -1497, -1673, -955, -356, -486, 182, 299, -46, 65, -4232, 1418, 6532, 2356, -4894, 4870, 3369, -4585, 8743, 1497, -1451, 862, -8612, -1718, 1716, -2389, 371, 592, 7397, -3188, -649, 126, -1300, 1374, -1292, 645, -1494, 2736, -1468, -1808, -17223, -352, 111, -222, -236, 171, -198, -7994, -3822, 5324, -16856, -517, 119, 314, -360, -515, 435, 520, -638, 1635, 420, 1191, 830, 710, 6897, 2925, 3091, 510, 3268, -1702, -16186, 718, -3127, -463, 763, -1035, 725, -122, 646, 172, -164, -277, 5853, -7074, -10, -1770, -2544, 5978, -874, -494, -232, 14465, 1815, -1902, 987, -1533, 1216, 741, 620, 161, 4414, 4184, -32, -2944, -4619, -462, 15701, -1026, -140, -2396, -1747, -538, -1024, 219, 854, -351, 860, -226, -4390, 732, -2003, -2430, -540, 592, 1622, 1180, 385, -2052, 4050, 17401, -650, -243, 1709, 1261, 95, -307, -5110, -666, -7094, -533, -1293, -17357, 2929, 2389, -119, -413, 317, -962, 709, -1552, 26, 175, 700, -570, 20120, 1107, 232, 169, -889, -533, -1276, 22, 959, 866, -954, -792, 873, -172, 1757, 195, 148, 423, 4490, 8782, 631, 682, 1832, -3728, -1742, -11130, -1201, 1776, 9268, -586, -1358, -646, 626, -866, 5, 263, 3950, -760, -2914, -12751, -12669, 1513, -4, 631, 1835, 312, -167, 1546, -532, 619, 1176, 1436, -116, 312, 7054, 3120, 4075, -1320, 715, -206, -1572, 1350, 17688, -1182, -1568, 680, 6, 207, 1010, 600, -766, 554, -1483, 644, -8810, 624, 148, -4015, -1536, -1863, 92, 730, -14806, 386, -5174, -1420, -331, -254, -104, 275, -7268, 2563, 11983, -65, 8043, -1623, -2589, -2610, 1328, 3154, 1935, 3672, -1761, 4984, 661, 209, -1038, 122, -1019, -28948, 55, 358, -539, 488, 55, 618, 20, -314, 446, -1016, 618, -93, -94, -331, -36, 194, -1706, 6628, 396, -146, -765, 10500, 2619, -82, -10894, -3908, -888, -192, 620, 163, 78, 774, -293, -104, -4826, -14066, -1883, -3258, -4577, -1484, 5412, -4274, -4951, 3316, -907, 1948, -1187, -404, 3654, 400, -70, 459, -3224, -3194, 2338, 4390, -5, -3167, 3273, 116, -1026, -1668, 3767, 272, -16662, 137, -1634, -1007, 220, -310, 982, 8220, -16, -1251, -2644, -3344, 2236, -1573, 8174, 612, 1142, -10799, 393, -707, 4804, 397, 1232, -292, 5762, -15608, 2921, -6440, 3544, -2395, -504, 1890, 172, -1010, 178, 380, -1163, 404, -1230, 1034, -596, 105, -2038, 1991, 5613, -312, -4156, -10205, 3092, -4704, -6101, -1620, -1037, -1130, 1590, 8321, -797, 247, 954, -103, 3838, 2330, 10064, 3197, -8508, 1300, -1012, -6607, -3861, 5651, 31, -475, 1582, -1370, 1107, 2164, 743, -567, 4842, -2930, 3191, -190, -2230, -47, 254, 2147, 591, -512, 1312, 1159, 811, 1444, -1312, -257, 16016, 789, -2562, 3983, -373, -9255, 302, -3655, 5750, -3856, -6941, 3934, -2314, 5556, -4099, -265, -479, -4843, -130, 20, -4859, 3083, 6482, -3738, -3936, 590, -6368, -1784, 75, -3903, -6834, -4452, -871, 764, -1118, 8731, 38, -148, -3368, -6330, -370, 2234, 907, -2809, -1458, -2306, -402, 2679, -1222, 1138, 192, -1317, 1012, 15514, 624, 279, -4032, 2565, 6162, -938, 5760, 1685, 4350, 2939, -825, -331, 1840, -556, 427, -4642, -23, 8346, 7577, -467, 3848, 454, -3962, 373, -116, 2314, 4868, -208, -1367, -1803, 2681, 806, -4279, 3348, -528, 14027, -238, -457, -2764, 832, -4680, 4354, 1219, -801, 2414, -5204, -3768, -6524, 5163, -10909, 1656, 321, 3260, -1773, 214, -135, -4563, 5206, -4794, 1486, 406, -1026, 281, 1799, -218, 320, -908, 872, 1056, 2955, -208, -799, 15492, 334 }, .cb4440s1 = { 27498, -414, -266, 646, 229, 94, -15, 302, -489, -401, 125, 752, -476, -200, -976, 195, 4, -402, 2220, 1012, 1731, 2530, -652, -21380, -679, -867, -195, -114, 1326, 2531, -348, -185, -114, 178, -694, -298, 8752, 1735, 2640, -2374, 6191, 1516, 5771, 6705, -253, -8502, 986, 2134, -1854, 3490, -678, -48, 133, 844, -1635, 1630, 6056, -756, -1109, 1563, -1445, -139, 580, -1448, -18675, 846, -390, -259, 1548, -324, 281, 142, 1792, 1211, 1328, -4308, -1032, -5412, 4742, -201, -47, -297, -8403, 9715, 7268, -3756, 1573, 677, -88, -145, 4877, 12946, 3264, 1809, 7230, -2583, 1627, -1786, -7113, -1480, -2111, -508, 415, 1664, -483, -538, -249, 80, 7005, -2562, -887, 3801, 6411, 2222, 36, 875, -5089, 10897, 4014, 4948, -1580, 1425, -1814, -391, -96, 322, -6484, 1896, -7790, -950, -4235, -8362, 3118, 4843, 3754, 1070, -1648, 7692, -1675, 3405, 918, 2270, 573, 193, 6024, 8912, -4905, -1810, 985, 1877, 2158, -2150, -386, 3908, 2030, 419, -12599, -570, -150, 1580, 36, -152, 2, -538, -1565, 6809, -715, -6266, -12725, -6718, 810, -603, 1547, 1001, 2250, 810, 1773, -672, 327, 246, 6414, -7511, 916, -327, 830, 11862, 4373, 1003, 6370, -1730, -2127, 613, 1627, 626, 763, -864, 207, -233, 3738, -8644, -1634, -2050, 3906, -451, 12986, -4828, -2973, -4714, 545, 822, 735, -3539, -256, 65, -93, -94, 2923, 7075, -3763, 6172, -9544, -2675, -3833, 930, 418, -4496, 3790, 386, -7797, 234, -609, -259, 454, 330, 1546, -7634, -1966, 515, -2496, 374, 2633, -3014, 4126, 9920, -7103, 1441, -150, 7695, 670, -48, -41, -512, -6849, -1785, 3755, 1860, 2418, -2346, -1194, -1574, 15510, 444, -1515, 585, 742, -199, -1115, -122, -11, 140, -7763, 1438, -317, -444, -17149, -24, 2685, -856, -3166, 1109, 308, 233, 30, 63, 530, 645, 84, 133, 6139, -1183, -10673, -12790, -112, -1544, 4623, 576, -804, 1023, -1646, 1192, 269, 2681, 44, -909, -14, -414, 48, -4002, 4768, 3440, 3252, 1441, 101, 372, 3166, -1398, 325, 16184, -711, 486, 1328, 114, -450, -31, 1152, 2154, -69, -252, 32, 922, 219, -2055, 421, -1377, 1006, -614, 234, -40, -84, 204, 27171, 182, 1034, 1536, 834, -8038, 1243, -3074, -7829, 11165, -1854, -1173, -871, 4105, 3588, -3191, 188, -2102, 124, -166, 8070, -11066, 6632, 2739, -7787, 184, -5872, 1360, -1089, 1273, 84, -1683, -1584, 975, -206, 1160, 180, 12, -6121, 5436, -14726, 5949, -6756, 834, 1750, -3142, -878, 7, -220, -1933, -141, 160, 26, 756, -800, 6, -8104, -6989, 3353, -3518, 4510, -12430, 736, -2685, -1042, 32, 1184, -519, -312, -1073, -402, 71, -422, -35, 1791, 12735, -2281, 2623, -1502, -3878, 6727, 10541, -1110, 2308, 870, 1124, 874, -1406, 123, 254, 405, 328, 3828, -7541, 3096, -14145, -672, -1725, -423, -1918, 4164, -411, 3094, -568, 3575, -2895, -378, -3065, -232, 449, 8110, 2264, -1383, -557, -10683, -7628, 4155, 754, -134, 6759, 1051, -2054, -900, -948, 579, -1277, 151, 462, 11562, -310, -8260, 10238, -1309, -3052, 345, -689, -1133, -588, 548, 980, -1332, 881, 368, 776, -704, 422, 12433, 1314, -1487, -4753, 2679, 3092, -939, 136, -586, 3504, -1034, -6318, 3506, 420, 2326, 1034, -252, -398, -6232, 4488, -6166, -1754, 908, 4884, -5188, -2985, 10793, -116, 4674, 3980, -9, 805, 1568, -1620, -88, -146, 3027, -16154, 2899, 7839, 5912, -427, 270, -1467, -387, -351, 615, -322, -2, -1061, -654, 56, -438, 132, 2388, 460, 2172, 1874, -3028, 3302, 2035, -704, -1222, -19835, -472, -1858, -1686, -286, 5, -748, 491, -350, -4344, 103, 1473, 2440, 13575, -1350, 1456, 10377, 1962, 3036, -1238, 1580, 607, 1352, 997, 1212, -489, 251, 4075, -3457, 6186, 786, 300, -2532, -373, -2522, 3108, -294, 4938, -2980, 1509, 12450, -695, -1128, -96, 354, -3678, 8494, 2480, 2264, 5162, 11907, 4721, 1111, 752, 2999, 3924, -1429, 321, 276, 309, -603, 601, -62, 3337, -3570, 3273, 6618, -2001, 950, 532, 972, 1619, 956, 65, -609, -281, -14769, -438, 580, 230, -228, -10108, 12289, 8904, 872, -3296, 1535, -384, 477, -913, -777, 546, 445, -1004, -435, -716, -138, 572, 435, 4626, -864, -5716, -2810, 1291, -4796, -241, 2527, -2342, -1360, 4161, 1886, -128, -1521, 13726, 1818, -554, -157, -9665, 2607, -1013, 579, 1122, 1571, -2684, 11364, -6464, -184, -1542, -5670, -1091, -670, 1273, -1051, -7, -278, -2551, -548, -10673, -1434, -343, 317, -3108, -1615, -2239, -14132, 490, -454, 2467, 1990, 470, -1072, 440, 290, 3006, -4420, -2083, 3050, 2779, -2349, -590, -4941, 7464, -9000, -2686, -2045, -8712, -3281, -2476, 648, -148, 408, -1367, -1113, 27347, -1113, 739, 39, 1443, -208, -686, 986, 735, -702, 76, 665, -194, -165, 366, -606, -4908, -3932, -15941, -2810, 4572, 816, -2092, 4213, -2492, 4006, 926, 210, -1110, -1635, -270, -226, -362, -187, 1790, 3016, 2216, 3890, 2018, -1325, 19784, -771, 356, 2118, -98, -688, 1016, 978, 559, -39, 160, -310, 6622, -1754, -11104, 204, -2212, 2370, -11610, 1119, 3216, 3102, 524, 278, -829, 524, 28, 838, 374, -76, -4593, -2933, 10697, -6510, -4970, -2025, -9383, -3428, -4112, 2665, 1459, -1411, 421, 481, 842, -341, 147, -158, 4108, 45, 4935, -21, -7905, 2058, 1158, 15260, -567, -752, -992, -1094, -1059, 2370, 820, 655, -261, 280, -3969, 6342, 8521, 3114, 369, -12269, 1684, 4, 4686, 1985, -3668, -3040, 677, -254, 57, -161, -989, -379, 7075, -580, 2846, -3177, -2285, 958, -7096, -154, -515, -3345, 13487, 3548, -1804, 290, -430, 726, 399, 54, -2814, 10235, 1958, -3356, -1330, 536, 3218, -14194, 200, -796, -862, -1480, 1811, -346, 604, -391, -231, 513, -10495, -6029, -6492, -8746, -357, -221, -1890, -2669, 8, -1756, -5812, -1048, 2258, 223, -474, 1154, -226, 348, -1590, 2915, 158, -24059, 875, -846, 1150, -1000, -844, -116, -246, -219, -482, -367, 120, 517, -489, 442, 8148, 5040, 3770, -1006, -51, -3175, -10278, -4468, 1188, 1497, -6515, -5, -1628, -2387, -1297, -717, 1630, 232, -3608, -6688, 2444, -792, -246, 411, 1464, 3661, 3244, -1121, -1602, -15398, -443, 882, 1412, 926, 16, -73, 2693, 7168, -9399, 528, 7916, -9270, -1669, -2756, 1304, 3074, -1510, -2089, 1491, -1556, -422, -414, 132, -192, 5988, 4500, 7572, -10978, -4875, 3685, 1888, -660, -1750, -515, -2728, -3133, -2742, 666, -2861, 626, 256, 243, 4587, -3567, -288, 2314, 4765, -11036, 7322, 7581, 2651, 3264, -394, -246, -891, -1464, -1717, 123, -517, -486, -1019, 7215, 554, 722, -4253, 2393, 3053, 2881, 1538, -2104, 573, 321, 673, 3902, -2855, 944, -12816, 370, 3496, 952, -1435, 6379, 766, 2273, -729, 80, -2432, -1150, 2408, -895, 15497, -1231, -282, -3306, -435, -167, -3528, -5683, -6413, 2501, -4825, 124, 3128, -425, -2800, -986, -2283, -495, -3392, -1560, -2093, -11613, -37, 157, -438, -794, 1988, -45, 1508, 20, 98, -458, -245, 1130, 110, -525, -771, 1120, 710, -21758, 174, -210, -4839, -2468, -648, -4388, -11, 2990, -181, -4790, -4232, 3634, 6427, 2772, 166, -2996, -12005, 1630, -249, 179, 856, -1250, -4216, 1993, 5164, 4757, -5071, 4331, -3029, -1276, -11184, -2864, 1238, 6332, -2431, 1276, -338, -476, -5659, -2410, 2510, 1853, -4853, -3175, -1896, 10728, 3724, 960, 9963, 305, -938, -646, -2760, 1436, 113, -74, -3098, -4090, 2950, 2701, 992, 206, -1393, -2179, -10862, -2396, -1008, 2639, -1547, -416, 9264, 1824, -360, 401 }, .cb4440m0 = { -25793, -238, 1193, -2635, -238, 1315, -2277, 1588, -896, 512, -864, 611, -398, 1277, -212, -358, 202, 13250, 16, -860, 1618, -1024, 310, 11560, -746, -3876, 780, -4087, -475, 857, 1017, -1439, -890, 155, 8556, 362, -1158, 2116, -291, -66, -1272, 510, -1394, 2259, -4761, 808, -740, -937, 13993, 191, 273, -7670, 6776, 846, -1907, 955, -13206, -1956, 1697, 1670, -329, -244, 2395, 6119, -802, -1007, 649, -974, 170, -2136, -10780, 1020, 1270, 1954, 1118, 13348, 983, -1394, -594, -514, -586, 1026, -1821, 548, -298, 3342, 837, -1395, 13977, 1021, -7792, -2930, 1466, 5494, -843, 2432, 1378, -68, 174, 407, 76, -877, 691, -9445, 522, -3448, 2549, -412, -2358, 875, -5044, -952, -10113, 6574, -6347, -2760, -662, 29, -227, 4884, 1304, 411, -3320, 2434, 785, -14822, 4412, 2272, -6407, 2172, -613, -1665, 296, 742, 624, 135, 5316, -3191, -855, -2061, 485, -3188, 2998, 1382, 2516, -2438, -3506, -238, 737, -629, 1001, 773, 17540, 1478, -724, -764, -1231, -1254, -1582, -692, -351, -1551, -171, 183, 38, -668, 756, -770, 24344, -905, -7182, 502, -3766, -1690, 1588, 1522, 1844, 1276, 1458, -777, 1731, 4856, -14860, -1097, 36, -1310, 846, -1500, 521, -3669, -252, 4480, -2602, -845, 597, -4512, 1062, -292, -18518, 1972, -334, -80, -1256, -366, 3640, -436, -12, -1670, -435, 1496, 1429, -11092, 1012, -936, -1224, -12240, -3048, 210, 1905, -1197, -357, -9759, -2632, -332, -3417, 15078, 1496, 2206, 1800, 205, 1384, 3546, -1853, 755, 1016, 726, 58, -150, -13053, 10375, -2589, -330, 1616, 3081, 2763, -2617, -1204, 324, -53, 2968, 1485, 214, 124, -334, -237, 16784, 2612, 1023, -4298, -2156, 4336, -4307, 4952, 1036, 81, -762, 3416, 714, -187, -4100, -757, 1124, 10224, 7059, 424, -316, 1281, -12262, 912, -1999, 2, -731, -184, 879, -934, -202, -391, -1046, -338, -101, -17511, -1712, -5580, -2327, -2478, 1770, -5825, 1499, 578, -130, 1424, -1818, 110, 542, 22, 988, -4227, 2836, -1447, 1170, 12335, 2179, -11216, -2500, 64, -912, -954, 654, -802, -455, -597, 234, -296, 811, 1083, 1848, 4148, 637, -6608, -2362, -3382, -664, -13088, 2839, 3090, 3294, -4554, 2518, -55, 837, 1392, 5905, 1287, -1484, 965, 16533, -3507, -1903, -1562, 2408, 5037, -4816, 1409, 361, -1890, 170, -610, -1755, -524, -867, -6238, -20117, -745, -956, -176, 2998, 130, -668, -843, -267, -364, -573, 495, 127, -66, 32767, 271, -408, 654, -123, 1831, 151, 996, 82, 628, -251, 144, 198, -88, 357, 37, 612, 184, 238, -584, -52, -30025, -415, 404, -566, 100, 659, -336, 877, 211, -730, -377, 184, -5256, -1484, -1191, -2108, 24, -7821, 209, -2856, -1844, 697, 5798, -1191, 427, 11858, 1000, -261, 184, -686, 1182, -3142, -3138, 139, 144, 117, 3658, -3566, -1562, 672, 2036, 15051, -5069, -551, 529, 1696, -214, -2678, -5966, -3707, 2847, -2554, -1760, -1196, 2088, 6372, 1778, 12935, 2189, 1992, 1761, 578, -542, -753, -1182, 4321, 1871, 309, 704, -1259, 884, 19136, -2665, 1096, 3048, -167, 872, -344, -1092, 464, 3255, -86, 1608, -1062, -1569, -1699, 4504, -274, 568, 1428, 20571, 1452, -894, -791, 459, -882, -1048, -2944, -11095, -783, -832, -2450, 650, 2784, 3156, 529, 457, 483, -12553, 655, 686, -757, 929, 212, 1242, -201, -1627, 4826, -1895, 997, -3225, 84, 80, 287, -2136, 405, -188, -890, -18272, -511, -118, -3642, -1018, 420, 12650, -474, -540, 6978, 6977, 4418, 1162, -1332, -1112, -1765, 2640, 562, -1164, 1256, 595, 567, -483, -31511, -960, -816, 756, 1505, 12, -518, 234, 184, 679, 328, -600, -137, 267, -440, 2540, 593, 1023, -11756, 626, -2034, 5756, -9882, 3175, -1190, 1628, 3920, 3219, 1394, 834, -140, 4036, 4722, -455, 3105, -1355, -3106, 1000, 7806, -2227, 687, -1580, 3180, -12302, -1394, -425, 488, -187, -36, 219, 158, 12006, 1683, 2151, -2, -1110, -12250, -59, 672, 1844, 2084, -2101, 1652, -783, 634, -13257, -339, 3932, 2260, -12452, 152, 316, -688, 79, -912, -2081, 1384, 188, 1942, -706, 204, 700, 1776, 13901, -13666, -324, 472, 1055, -646, 82, -769, -877, -443, -227, -900, 636, -870, 470, -112, -598, -4402, -2726, 1775, -216, -43, -18675, -863, -4604, 3433, 674, -155, 208, 1546, 294, -157, -616, 11070, 1229, -528, 2124, 699, 3624, 54, -516, 194, -13556, 1902, -506, -1317, 1916, 471, -342, 836, 18, 906, 614, -8, -951, 1052, -97, 2212, -924, 310, 6, -733, 122, 23731, 468, 345, 1545, 1434, 611, 403, -3136, -2214, -54, 1023, -1390, -5243, -3744, -258, 6871, -1778, 673, -2362, -13007, -776, -974, -1077, 8386, -3978, -4325, 1236, 4011, 1161, -263, 1224, -12957, -100, 2801, 1458, -3081, 578, 17, 1037, -742, 5972, -632, 2904, -12721, -6733, -478, 182, -1973, -820, -6911, -4904, -942, -348, -353, -350, 7864, 34, 568, 1985, 956, 3310, 118, -2067, 12600, 9063, 1609, -1261, 296, -1248, -1656, -65, 1832, 1525, 1503, 5149, 4370, -1638, -3868, 320, 1527, -424, 17676, 1780, 1172, -1132, 1128, 1294, -322, -101, 462, -6668, -3024, 7573, -11088, 1581, 13, -1398, 550, 4376, 1623, 1727, 857, -5310, 2528, -529, -401, 539, 6508, 4246, 4105, -5363, 96, -13407, -694, 5061, 3445, -3283, -348, -1470, 1114, 602, -404, -129, 642, 1547, 23110, -2255, 1969, 333, 1297, 116, -1691, 364, -528, 758, -1239, -1826, -249, -395, 684, -856, -638, -10000, -2773, -6151, -1244, -3138, -9688, -1994, 7124, 1368, -1870, -312, 1863, -1006, 963, 789, 743, -4158, -760, 1384, -7525, -959, -262, 5752, 4005, -12037, -210, 886, -1961, 4895, -251, -158, 212, 677, 518, 342, -226, -360, 466, 17, 28392, -20, 246, -686, -258, 640, -378, -120, -443, 1078, -2612, 2084, -1706, 4334, -4675, -4634, 2336, -9998, 9975, -1285, 2778, 3292, -1717, 138, 2114, -1120, -180, -1146, 11988, 829, -2530, -8827, 6833, -1191, -1653, 2691, -4067, 1166, 1971, 303, -544, -1459, -261, 1065, 3410, 2050, 3163, -515, 5456, -4261, 5483, 1531, -2098, 2020, 3773, 588, 915, 158, -11876, 282, -1180, 265, 11036, -66, -1741, -1894, -4234, 3048, 218, -1030, 2240, -12666, -2290, -1673, -1911, 1480, 287, -81, 1182, 216, -10734, 2201, -58, -619, 8585, -574, -4576, 1852, -468, -6759, -7667, 167, 995, -1114, -1276, -2053, 2178, -8133, -1270, -7822, -10582, 5380, 3037, 1071, 827, 4972, 1024, -129, -180, -3002, -846, -736, 9587, 1890, 10287, -1954, 1042, 1558, -950, 2406, -1852, 2275, 6694, -703, -910, 3854, 812, 521, -1075, -761, 5357, -3911, 3892, 7944, 4580, 5031, 1088, 7116, -1746, -5223, 2607, 3227, 2296, 5603, 211, -731, 6450, -3312, -12378, -326, 4245, 4168, -799, -3563, -505, 725, -5297, 2196, 2221, -16, -3472, 315, 626, -6131, 71, 920, -4383, -1340, -2675, -664, 7412, -1240, -1361, 997, -3817, -2377, -11717, 1661, 22, 540, -5261, -950, 7472, 3148, 7647, -4400, 4558, -4412, -869, -1528, -2618, 8311, 2110, 534, -460, -223, -162, -828, 274, 1844, 1861, -1583, 6899, 5222, -1772, -2880, -6400, 4703, 2606, -3990, -1224, -4160, 9032, -299 }, .cb4440m1 = { 32767, 383, 857, -1579, -423, 1164, -1606, 1218, -410, 777, -292, 122, 282, -74, -1394, 259, -734, 102, -82, 32616, 427, -545, -146, -141, 340, 506, -808, 171, -778, 900, -204, -277, -228, -426, 566, -481, -1138, -907, 112, 2722, 871, 115, -7202, 1953, -826, -1812, -396, -14722, -840, 155, 1114, 5624, 1112, -147, -6383, 926, 1505, 360, 937, -13391, 969, 7062, 2218, -3531, 471, 458, 191, -465, 8664, -1168, 546, 2109, -944, -74, 1644, -81, -760, -1920, 2659, 13330, 1511, -1148, 1346, 796, -20, -15616, 1246, -1190, -10882, -774, -70, 3643, -896, 1830, -192, 1018, 1085, -95, -309, 659, 91, 727, -4486, 486, -2078, 1235, -14415, -4053, -1619, -2589, -582, -4650, 4076, -762, -1111, 277, 1448, -742, -314, -979, 1889, 2679, -1972, 2480, 302, 2869, -9183, -445, -1817, 12894, 106, 187, -1406, -615, -1174, 746, -371, 382, 350, -1811, -527, 36, 500, -835, -106, 1134, -2207, 1021, 348, 908, -21780, 448, 688, -60, -1790, 1901, -22990, 1467, 596, -912, -3190, 1484, 269, -409, -474, -1670, 1328, 152, -402, 359, -734, -13208, 62, -4197, -6242, 5195, -2841, 5030, 2794, 1264, -1130, 3821, 961, 729, 1075, 49, -148, 7267, 2596, -5093, -8284, -6875, -3059, 3909, -4635, 1402, -6334, -342, -3083, -861, 490, 1257, -630, 128, 2240, 832, 1060, -1802, -1652, 128, 7816, -14391, -6722, -3328, -2586, 3044, 1088, 1577, 852, -142, -176, 1371, 1236, 976, 12165, -1596, -199, -504, -11020, -582, 972, -1468, -2402, -666, -3327, -2148, 1078, -194, 9675, -2102, -1236, -70, -942, 291, 1364, 1403, -3362, 12963, -375, -1728, 1615, -2354, 633, -506, -194, 13037, 14172, 534, -1026, -425, 2488, -180, -678, -436, 272, 1507, -334, 840, -1000, -1068, 1029, -306, 24, -4435, -5994, -1307, 4251, 3968, 2527, -981, -2626, -4400, -242, -1823, -679, 12831, -22, 51, -381, 2422, -2376, -8156, -1477, -6974, 1102, -373, 467, 11314, -554, -432, 824, 7277, 393, -178, 179, -653, 11848, -1593, 14143, -731, -1036, -2322, 261, -1992, -1152, -1430, -1354, -51, -285, -1637, 144, -59, -2182, 5731, 538, -880, 397, 3010, 707, -1822, -1006, 4686, -5096, 4246, -3096, -3997, -254, -11025, 394, -345, 18780, -686, -517, -3422, 104, -2173, 2439, -5400, -10, 1084, 1821, -602, 1431, 405, 2143, 499, 405, 351, -62, -47, 1954, -29915, 440, 1054, 559, -1210, 442, 928, -1, 59, 279, -112, -110, -440, -396, 805, 311, 858, -431, -1070, -30192, 135, 1246, -345, 790, 498, 319, -302, -469, -10, 512, -829, -526, -2052, 2456, 134, -19375, -1210, -1292, 640, 3232, 2580, 973, -2412, 271, -282, 632, -523, -847, -138, -990, 2501, 536, -166, 2100, -357, 122, 466, -4, 2034, 20083, 1578, 444, -344, -689, 5733, -456, -503, -592, -1350, -1038, 932, -1916, 1098, -990, -22687, 1544, -442, -396, -570, -683, -616, -1431, 118, 4113, -312, 2300, 2093, -2344, -2955, 6343, 4306, -10078, 6286, -5794, -806, 664, -217, 548, 5072, 4626, -1643, -11619, 779, 1956, -2960, 614, 2087, 9104, -2418, 775, -4447, 768, 1599, -1084, 999, 1652, 1090, 630, -1197, -3495, -912, -9817, 648, 3278, 1828, 13605, 2757, -831, -1191, -1846, -1441, -278, -8530, -455, -495, 323, -911, 2500, 14100, 3635, 1016, -936, 5265, -3092, 2125, -121, -64, -656, -337, 9438, -7600, 1403, -11917, 2180, 2612, 1664, 1091, -318, -3300, -427, 282, 1979, 894, -703, 514, 160, 1697, 6508, 828, 187, -34, -1094, -2861, 240, -5013, 6004, -4796, -991, 158, 11437, -1730, 354, 1195, 3790, -10432, -3584, 13872, 336, 2043, 221, 604, 2930, 1080, -1417, 1878, -878, -459, -419, 364, -1037, 7764, 3100, 48, 11057, 1936, 2229, 9150, -472, 1178, -129, 2876, -249, -258, -1181, -329, -581, -1140, -1967, 347, -539, -394, 775, -1151, -31, 1052, -1900, -213, -1552, 22484, 164, -113, 135, -1294, 550, 7738, -7223, -739, 1362, 5518, 193, -2170, -11861, -1357, 351, 2215, 165, 16, -606, 727, -158, -772, -13420, -1248, 12422, -812, 1768, -442, 1269, -1076, 899, 124, -249, -1110, 653, -3064, -1632, 839, -230, 512, 642, 13230, 13285, -552, -1113, -595, 864, 537, -1012, -539, -615, -491, 1014, 800, -10, 534, -1227, -25011, 1239, -26, 3834, 104, 762, 1259, 2112, -300, -920, -812, 612, -1061, -378, -246, -7, 11042, -18492, -1411, -77, 407, -556, 218, 1751, 1069, -294, 1789, 904, 285, -76, 300, -160, -128, -3398, -2001, 1689, 4946, -2750, 1427, -12632, -1873, -1802, -1115, -2777, -4436, 2937, -6408, -467, 487, 1043, 3914, -81, 1540, -11718, 1368, -12656, -583, 1009, -416, 249, 1874, 1157, 994, -858, -154, 294, 333, -26, 73, -1576, -20, -560, -1068, 1325, -588, 26161, 1580, -411, -587, -1083, -79, 762, 292, -622, 788, 284, 2014, 78, 554, -516, 1340, 835, 300, -24827, 558, -705, -22, 139, -159, -246, -585, 4318, 234, 1308, -198, -3370, 5724, 2381, 13843, 4, 569, 8002, 1188, -63, -1698, 4624, -405, -218, 4238, -888, -1180, 3750, -4848, -9497, 293, -1087, -13274, -33, -2870, 457, -618, 338, -34, 286, 345, -5321, 904, -5656, -2082, 12644, -7423, 532, 958, -1997, -1483, -2982, 3115, -1851, -2025, 1853, -918, -903, 1554, 540, -16549, 1441, 2939, -1272, 3106, 2374, 3906, -697, 1144, 750, -379, -6502, 980, 386, 36, 1109, 1195, 6272, 4264, 1501, 5369, -1560, 3535, 1084, 739, -1031, -4400, 8452, -430, -1787, -7669, -231, -115, 4324, -1820, -2098, -786, 7478, -2709, -14255, 5771, 115, -1700, -111, -1482, -1369, -112, 122, -472, 233, 2427, 1816, 180, -481, 928, 82, 84, -700, -448, -946, 1968, 1644, 168, -167, 16164, 155, -10316, 941, -584, 488, 96, 5205, 491, -1844, -13055, 1266, -352, -836, 558, 1546, -1720, 313, 2033, 597, -14351, 4426, 3281, -559, 2614, 3248, -2265, -10312, -1614, -288, 480, 1419, -546, -485, 835, 960, 462, 923, 6518, 834, -711, -12639, 8811, -207, 1806, 337, -1240, -4796, 2383, 277, 1141, 969, 59, 197, 1365, -614, -9144, 4824, -436, 4191, -2588, 4509, 391, -5055, -3231, 6978, -6388, 51, 105, -863, 1050, 13103, 12769, -420, -1562, -123, 2702, 292, 1061, 123, 405, 1917, -275, 493, -95, -195, 130, -2613, 9010, 196, -1382, 5903, 7281, 1585, 2557, -876, 3166, 6910, 590, -3060, -559, 4722, 393, 613, -392, -3022, 9892, 1808, 923, 8123, 9873, -1665, 2349, 2894, 591, 2000, -3734, -917, 220, 408, 296, -656, 2608, -1700, 400, -10734, 5434, 6504, -1399, 2175, -1203, -6358, -1221, -5062, 45, 970, -500, -1322, 1176, 5882, -11687, 6324, -2183, 2327, 922, -5628, -3507, 2406, 874, 1399, 4518, -343, 857, -224, 802, -725, -8561, 4432, 1974, 1825, -2168, -451, -3408, 6587, 7589, 3361, -4711, -1474, 3151, 1950, 1022, 1466, 9192, 4666, -822, 1024, 2342, -2220, 1169, 10460, 2993, -988, -4407, -6727, 902, 1659, 80, 106, 400, 34, 1746, -6982, 10484, 6333, -845, -3333, 1764, 217, -4730, -3306, -3664, -2830, 2254, -927, -55, 587, 1812, 281, 4375, -3614, -1349, 1802, -6184, -2648, -4189, -9381, -3243, -4147, 384, 2241, 5524, -478, -1534 }, .cb4448l0 = { -15402, -5156, -1798, -144, -4711, -4700, 2819, -389, 148, -2600, 1706, -1906, -578, 495, 24, 829, -383, -12581, 11667, -1039, 1395, 2670, -288, 23, 628, -248, -512, 79, -326, -5428, -2830, -2476, -1253, -915, 12042, -674, -110, 2950, 3885, -5799, 983, 616, -652, -60, -372, 22, -141, -167, 98, 125, -100, 27211, 133, -127, -271, -272, -176, 1268, 173, -422, 2431, -3998, -2797, 2328, 182, 6526, 3318, -6282, -10580, 3966, 8504, 527, 9507, 6203, 990, -989, 6030, -136, 647, -1100, -324, -2618, -2499, 500, -132, -842, 1237, 3599, 2285, 2906, 10766, 11284, -2794, 242, 184, -1934, 55, -839, -1181, 406, 855, 902, 10490, -327, -1561, 5742, 428, 2218, 1523, 5229, 9130, -760, 108, -140, 22229, 1132, 411, 720, 414, -356, -745, -1276, -899, -562, 369, 5, -7770, 4101, 3626, 126, -13, -4356, 728, -3197, 1930, -1470, -6936, -410, 6720, 1897, -530, -4267, -2181, -876, -472, -2540, -10234, 4008, 10217, -2561, -2021, 716, -1378, -325, 427, -245, 314, -48, -118, -150, -30295, -368, 256, 369, -656, -78, -246, -140, -1250, -635, 1332, -13604, -10383, -1375, 353, 2417, 2140, -349, 1460, -51, -309, 523, 509, 2352, 1208, -377, -2023, 9708, 397, 1216, 10610, -4416, 5520, 3902, -2119, -480, -420, 1170, 36, -3304, 1550, -266, 1682, -808, 2420, 2700, 16239, 3910, 572, -375, 85, -9775, -120, 2214, 2779, 11510, 2628, -416, -1740, -1305, 1226, 78, 78, 635, 422, -13892, 1302, -4117, -1218, 2681, -8436, -1723, 2290, 2815, 1172, -181, -675, -475, -763, 2394, -3639, 7903, -659, 2323, 4837, -6758, 9460, -1480, -2403, -2783, 1496, 806, -458, -246, 12, -254, 121, 1477, -633, -513, 791, 208, -390, -177, -1292, -20471, -4401, -2678, 9026, 128, -265, 822, 260, 11202, 3132, -1879, -3891, 1884, -842, -107, 7516, 1208, -1552, -995, 1203, 2150, 11044, 1285, 2282, 80, 1348, 5342, 2089, 924, 1472, -1454, -8259, -226, -10259, -2335, -2442, 224, 3257, -1528, 6685, 1630, 1969, 48, 4802, 6051, 987, 8662, -2368, -4984, -1974, -4049, -5320, 5003, 299, -400, 727, 208, -187, 2838, -4547, 9682, -2238, 1065, -3206, 10091, 4915, 2945, -1635, -198, 1074, -698, -716, -96, 1390, -2644, 1006, -4154, 10587, 1132, 2912, -7399, -8350, 785, 156, -290, -142, -374, -2161, 1066, 1358, -1798, 3050, -19, 452, 10470, -10948, 4190, -984, -2089, -728, 1503, 4273, 812, 4950, -3750, 844, -1231, -1582, -2517, 2385, -10537, 5807, -4621, 332, -357, -12484, 1676, 160, 10762, -1225, -1374, 14, -1389, -2900, -467, -1260, 459, -861, 102, 1715, 4295, -7324, -7400, 10435, 287, 1866, 765, 1730, 3430, -744, -2, -1773, -96, 2001, 2165, 118, 9296, -4640, -4612, 7134, 5128, -7967, 404, -433, -433, 2222, -8050, 2023, 2766, -260, -2440, 1607, 2442, 7763, -486, 3766, 2355, 7515, 230, 1248, -8873, -8224, -9135, -1402, -1812, 1223, 152, -2316, -739, -405, -784, -598, 625, 503, -175, -573, -31693, 502, -478, -554, -934, 387, -80, -484, -701, -34, -51, -494, -1461, 1005, 2920, 11532, 2667, -1674, -832, 8680, -5767, 786, -1558, -2062, 1009, -392, 2099, -7277, -2587, 6302, 3070, 4496, -1713, -4042, -8109, 1642, -1894, 3450, 840, 3632, 160, 578, 149, 767, 754, 208, -870, -672, 252, -30, -213, -482, 50, -578, -2, -148, 246, -31918, -568, 130, 472, 761, -27, -51, 454, 144, 124, 5844, -8354, 9562, -3755, -262, 3286, 1120, 983, -628, -734, -1732, -1424, 353, -403, 15877, -13552, -335, 337, 519, 140, 297, 150, 725, -780, 876, -116, -91, -128, 275, 2499, 9313, -768, -10469, 1148, 2172, -6417, 3292, -2187, -1108, 3055, 1105, 625, 794, 68, 337, 1384, -106, -516, 574, 868, 849, -997, 81, 25796, 28, 206, -3556, -351, 1058, 1126, -7826, 5310, -4102, 5352, -6835, -4032, 1487, 230, 5617, 937, 10484, -71, 2653, 1203, -1, 667, -1489, -10136, 7782, -763, 792, 1434, -170, 367, 96, -21992, -252, 756, 145, -1476, 1408, 1523, -819, -576, -476, -1068, -241, -39, 1547, 9553, -622, -1799, 1861, 6115, -864, 10690, -586, 470, 200, 1162, 586, 44, -11650, 3453, 8734, -2754, -178, 236, -2650, 2654, 2699, 1180, 5325, -458, -40, -218, -6, 126, 6794, 506, 860, 11863, 652, 1665, -4213, 4863, 1424, 5712, -663, -688, -10, -1421, -676, -1325, -378, -311, -490, 19501, 1242, 268, 4581, 1587, -1153, 848, -1378, -1159, 505, 63, 704, 1942, 2204, -2106, 44, 479, -1098, 333, -21595, -617, -6444, 3547, 1282, -1784, 4664, -1330, 2607, 1241, -3579, 247, -875, 11359, -3013, -136, -12813, -14400, 1857, -998, 1342, 1187, -338, 1263, 575, 1226, -995, 596, 446, 293, 767, -356, 70, 786, 466, 202, 149, 849, -28991, 652, 124, -209, -124, -406, -5463, -1413, -1300, -5339, -1761, 4770, 2680, -10542, 3486, 5601, 2932, 1581, 489, 521, -16583, 1, -1529, 5942, 1234, 4714, -1647, 1150, 2802, 642, 586, 3836, 240, 307, -490, 67, 771, 816, -906, 1554, 1090, -2353, -629, 11291, 2941, -2982, 9473, 1434, -4351, -8017, -5173, 8071, 1931, 1281, -4055, -3224, -1918, -271, -204, 670, 3491, 107, -31624, 227, 75, -91, 108, 171, -53, -201, 373, 63, 118, 126, -104, 127, -88, 1810, 11688, -10240, 550, 3692, -4978, -1619, 40, 911, -1080, 580, -767, 333, 192, 403, 308, -904, 142, 31169, 503, -1101, -146, -144, 35, 181, -355, 54, 590, 499, 95, -1767, 444, -49, 2160, 7176, 12032, 6478, -741, -5576, -644, -101, -1251, -1268, 2365, 10029, 537, -1476, 307, 2108, -2478, -944, 10725, 349, -4242, -135, 7577, -4492, 1492, -2512, 7736, -5118, -6756, -2436, -1890, -2390, 1620, 914, 1658, 47, -11692, -134, -1740, -196, 9521, -136, -1376, -8682, -1136, 1096, 903, -1148, -334, -228, -4, -675, -199, 1914, 2827, -11098, -2129, -2559, -978, 175, 1832, 10075, -2358, -1888 }, .cb4448l1 = { -11514, -2858, 12392, -305, -206, 929, 473, -3120, -2766, -1068, -1237, 420, -718, -21, -336, -45, -478, -1517, 1830, -12644, 259, 11978, 257, 1494, -1759, 247, -733, 112, -2242, 290, 234, -10260, 1781, -1806, -4104, 1747, 38, -692, 4971, -9113, -1925, -1580, -615, -9608, 3779, -11158, 469, -4736, 299, -2815, 2108, 1910, -2356, 66, 523, -440, 2298, -4219, -2512, -1110, 11192, 5932, -2629, -7985, -992, 775, -1134, 3287, 900, -681, -39, -1206, -1708, -6800, -361, 11024, 8496, -198, -3855, 1486, -2547, 1773, 50, -276, -286, 785, -7884, 438, 4590, 2794, 5333, 5476, 2108, 660, 3610, 2308, -8538, 224, -132, 134, 731, 988, -1368, 3894, 4318, 9911, -104, 320, -9506, 1721, -5690, 1712, -8747, -1876, -5122, -1304, -162, 752, 3646, 1621, 11089, 1117, -1971, 1058, 3070, 180, 23112, 175, 483, -1028, -538, 497, 1053, 61, 788, -455, 22, -55, -32, -326, 15956, -2045, 788, 9784, -1170, -819, -3677, 647, -484, 578, -160, 286, -421, 289, 8140, 3838, -578, -1866, -2074, 667, -11951, 1684, 3439, 1280, 158, -1784, 1276, 638, 562, 2045, -220, 852, -594, -2109, -2665, 2748, 38, 91, 1377, -624, -18586, -498, -882, 36, 536, -99, 62, -5275, 3051, 231, -6343, -1751, 1206, -1646, -1347, -13590, 1431, -271, -442, 21934, -143, -1824, -378, -463, 816, 379, 336, -291, -652, 275, -758, 257, -14866, -1304, 7260, -3373, 1249, -1992, 2734, -2565, -3064, -416, 2424, 279, 10518, 206, -681, -14338, 666, 1843, -648, 526, 1982, 366, 684, 1019, 192, 8, -482, -4785, 2134, -1722, 10674, -1613, 33, 1148, -1566, 10226, 3397, 667, -1100, -738, 2420, -14282, 451, 90, -10346, 2673, 1175, -3639, 266, -566, 0, 1672, 1082, 298, 359, -497, 1784, -570, -2538, 2522, -3825, 6265, 99, -7927, 3160, 11079, 131, -2080, 92, -29951, 268, -293, 240, 254, -182, -145, 303, 12, 86, 596, 246, 136, 1020, -1521, -1134, -10125, -5691, 6028, -3703, -4295, -3718, -5719, -564, 660, -321, -1073, 83, -3068, 6167, 12788, -762, 8057, -1215, 2379, 2142, -3625, -503, -1418, -304, -649, -501, -12558, 12787, 3737, 1465, -3692, -1321, 1106, -1136, -651, -50, 1608, 59, -583, 82, 331, 443, 782, 93, 285, 310, 29149, -698, -52, -909, -238, -222, -114, 4, 650, -200, 235, 2541, 598, 378, 11000, 3101, -8228, 1690, -4313, 6996, -11, -2620, -1458, -1428, 579, -304, 20, -372, 897, 602, 432, -138, 690, 593, -1485, 136, 191, -32147, 260, 199, 412, -168, -41, -384, -362, -14, 242, 366, -318, -304, 1544, 458, -7790, 3332, -5117, -1937, 868, 12622, 906, 1941, 4763, 1698, 351, -234, -973, 9166, 6726, 2686, 248, 3597, -9812, -400, 4155, 2852, -415, 2218, 876, 1423, 3852, 2965, -410, 1820, 8268, -1296, 686, 114, 3087, 3007, -9402, -5751, -3459, -6674, 418, 4137, 4778, 56, -1399, -1698, -2590, 8343, -2130, 2535, 6148, -134, -2393, 11551, -338, 735, 630, -658, 13358, 949, -1136, -217, -985, 182, -1014, 1459, 221, 7713, -1386, -1427, 1326, 555, 66, 2694, -1535, -268, -13596, 658, 305, 858, 548, -12748, -582, -1055, -659, -12155, 940, -2164, -2518, -126, -132, -842, 641, -483, -446, -5184, -186, -511, 1169, -6092, 6161, 3082, -664, -2037, 847, 11032, -1306, -1673, -1219, -36, 1862, 10053, 780, -282, -837, -263, 509, -588, -12646, -769, -2164, -2219, 524, -3433, -6437, 3890, -623, -7509, 241, 4042, 264, -1394, 3646, -6925, -5184, 1218, -1476, -2240, 1882, 182, -3450, -497, -148, 160, -1579, -19545, -80, 886, 913, 708, 728, 393, -603, -778, 3414, -778, -1495, 1205, 2342, 232, -3634, -76, -16792, -684, 1322, 192, -13248, -658, 7650, 4731, -169, 5148, -1413, 3026, 2480, -2190, 1004, -2082, 237, 171, -717, -766, -525, -11802, -3776, -9914, 1374, -3250, 415, -2787, -175, -1081, 792, 980, 11464, 834, 714, -993, 150, 77, 2306, 11249, -3058, -3418, -1758, -239, -119, -1408, 6083, -4276, 1827, 1660, 2287, -2997, -576, 400, 2062, -3174, -6215, 10026, -1082, 41, 249, 10026, -6199, -301, 280, 10120, 2249, 527, -564, 1002, 622, 3341, 408, 2870, 12902, 13307, 689, 336, -819, -43, 832, -1242, 657, -106, 42, 1123, 149, -2072, 78, -303, 329, 21745, -2172, -1204, 448, 1437, -560, -376, 311, -73, 153, -785, -368, 54, -445, -92, 120, -59, -377, 402, 567, -25820, 1284, 1288, 200, -865, -1286, -41, -1862, 402, 179, -2338, -3876, 4992, -1824, -10092, -3407, -8516, -3556, 130, -5695, 5846, 2333, 2995, 2110, -6946, 5049, -2377, 1655, -859, -4737, 1648, 7031, -7344, 4992, 1760, -711, 3134, 14363, -907, 171, -1971, -3062, -1079, 600, 603, -224, -440, -11328, -291, -663, 1878, -715, -2724, 284, -456, -10970, -3225, -2240, 252, -977, -360, 729, -572, 3981, 1615, -52, -5372, 6095, -9888, 6873, -3830, 4916, 1834, -1581, -11268, -2316, -398, 1361, 6151, 2736, -1968, 4624, -180, -260, -1221, -5633, -1300, -1081, -1433, -509, 366, -388, 1660, 340, -18997, 694, -1184, -813, 1324, 1261, 735, -186, 5258, -583, -221, 1707, 149, 1022, -835, 1089, 2939, 2025, 421, 411, 3609, -13797, 464, 9214, 2462, -6257, 6032, 1911, 1282, -9673, 974, -703, -128, 950, 369, 1160, -674, -312, -13858, 1078, -7606, 8, 2786, 367, -6441, -824, -195, 714, 484, 108, 475, 289, -1012, -1591, -10880, -324, -647, -2199, 10378, 5781, 995, -416, 871, -1240, -380, 70, -1893, 7632, 1727, -908, -672, -10901, -962, -7322, 794, 1748, -5568, 1215, 5845, -9575, -2413, -2159, 3077, 1359, -416, 6277, -85, 1352, -3498, 6130, 1125, -236, 1950, 8481, 716, -560, -1311, -228, 250, -440, -5320, -1941, -9710, 4637, 1420, -102, -8222, 616, -2254, -528, 196, -1315, -749, -97, -285, -15880, 1105, 630, 368, -809, 29, -1688, -2314, 745, -1627, 19840, -2380, 4108, 1670, 2763, 275, 530, 492, -589 }, .cb4448s0 = { -10720, 9997, -1313, 8849, 5152, -226, -2908, 303, -842, -870, 165, -1372, -105, -154, 170, 2424, -2476, -2126, -8329, 349, -4509, 5128, -92, 9086, -7263, 416, -124, 341, -88, 239, 5172, 553, 1526, 1728, 1955, -1489, -6595, -11237, -10224, -394, -927, -932, 674, 743, 317, 4628, 8453, -3768, 2545, 3506, -2406, 9108, 5643, 4660, 4116, -2452, -1391, -154, -74, 180, 5270, -7922, 46, 11046, 6076, 735, 7922, -196, -1080, 1445, -687, -2403, -784, 742, -269, -498, 6010, -4045, 1053, 891, -1538, 613, -84, -4254, -957, 4682, -14004, 2050, -647, -718, 557, -2720, 2437, -7675, 11874, -9284, -734, 775, -2231, 105, 366, 1360, -98, -126, 508, 2647, 729, 762, -8806, -10413, 9008, -2093, 1107, 201, 1421, 1181, -259, 1420, 828, 327, -1956, -573, -874, 354, 2662, -1437, 10864, -9240, -7648, 1670, 1598, 173, 438, -373, -566, 246, -11999, 1817, -611, 1, -1652, 1876, 1354, 1270, -789, 300, -321, 11577, -516, 329, 5723, 4732, 1717, -6224, -5356, -6292, -370, -3644, -922, -50, -14, -581, -1554, -1675, -20, -965, 28479, 658, -498, -488, 504, -601, 437, -585, -245, -196, 186, 281, -174, 159, 7469, 5890, -5112, 4918, -9023, -360, 40, -2975, 4784, -437, 1609, 1032, 2759, -297, 106, 5176, -4315, 568, -9536, -1297, -6783, -10965, 1285, 264, 330, -508, -522, 624, 662, 539, 7248, -13780, 40, 2140, -2188, 1925, -8972, 1147, -1340, 870, 779, -4, -101, -374, 781, 5733, -5712, -5777, 2080, 875, 13450, -1551, -3229, -1818, -114, 1265, 501, 636, -576, -623, -1269, 3006, 1023, 862, 1359, 1950, 588, -22648, 218, -438, 1547, -408, -844, -263, -106, 14754, -689, -9466, -978, -21, 1412, 43, 2012, 352, 908, 277, -960, -747, -230, -1557, -7132, -5707, 79, -2474, 2177, -5349, 2510, -12720, 2833, -2152, -1693, 458, 197, -643, 735, -2728, -893, 2758, 5196, -3566, -4294, -4914, -1222, 188, -8884, -6234, 2391, -1518, 663, 572, -1465, 1147, 8486, 2037, 2516, 941, 6092, 11602, -2559, -1702, -1848, -924, -210, -108, -1052, 8360, -7567, -4588, -169, 3464, -9206, 1842, -4329, -2499, -341, 592, 918, -102, 340, 214, 1037, -324, -16289, 10308, -47, -29, 1340, -603, -2763, -548, 392, 1489, -149, -769, -67, 13270, -2233, 8257, 1582, 1034, -4270, 916, 4486, 1191, -102, 159, 109, -536, -664, -987, -8041, -1759, 4264, -5600, -13815, -1158, 1712, 2516, -634, 504, 515, 732, -46, -685, -481, 1685, -1782, 262, -3600, 14721, 6334, 7941, 101, 914, -2141, -2, 182, 829, -215, -122, 6325, -3752, -2812, 1618, 3512, -1591, -4276, 6994, -10349, -5675, -1501, -1766, -1949, 436, 82, -5596, 2592, -1086, -2804, 2540, 458, -550, -1834, -2401, -7563, 2340, 1678, -7666, 4538, 27, 6337, 3642, 17068, 5310, 1115, 1579, -142, -397, -670, 2010, 863, -504, 845, 848, 770, -8821, 1963, 2782, 162, 1130, 2597, -13699, -3996, 800, 2499, -1045, -1512, -186, -59, -119, -5048, 6800, -8766, 784, -7091, -1002, 335, 1993, -1045, 601, 1804, 166, 1343, 110, -224, 2247, -344, -5, -4292, 5846, 8591, -11846, -1303, -1027, 1759, -168, -194, -1281, 489, 378, -5069, -3321, 11238, -375, -806, 3962, 9660, -2960, -664, -1067, -627, 271, 1205, 1160, 261, 3725, 7877, -679, 22, 598, -1086, -420, 2168, -46, -15552, 420, 1220, 1332, -58, -156, 7777, -4657, 352, 15316, -4760, -2140, -2577, -1321, 2037, -371, -1254, -912, -1177, -1367, -103, 4572, -9482, -1599, 294, 403, -272, -2331, -4365, 13467, 4585, -2554, -1743, 545, 162, -369, 6074, 11273, -8856, -8175, 2543, -7, 314, -2033, 2704, -1755, -1431, -791, -276, 1085, 236, 6553, 1872, 387, 1056, -31, -20610, -609, 608, 1007, 1604, -1501, -68, -527, 204, 252, 2533, -721, 1468, 444, -72, 61, -209, 512, -216, 42, 385, -490, -104, -29030, -166, -4883, -2754, 788, -430, -867, 565, -1155, 562, 1076, 1757, -2990, -14971, 8392, 902, 550, 102, -6579, -6939, -319, 172, -863, 979, 2178, 630, 160, 952, 946, -3955, 1515, 352, 2557, -5339, 6166, 4588, -2040, 4031, -535, -2504, 2782, -12136, 1338, -2758, 458, -671, 155, 6998, -2598, -931, -396, -922, 2060, 447, -42, -649, -532, -552, -1945, -16548, 815, -408, 3469, -4118, 875, -1017, -11150, -511, 3846, -11349, -1928, -781, 2765, -681, -713, 655, -218, -8032, -465, 295, 1591, -383, -1889, 1627, 108, 1149, 2513, 388, -5702, -15693, 24, 470, -4322, 3721, 1584, 1808, 350, -1765, -620, -2953, 4354, 8512, -12533, -86, -2490, -192, -507, 2024, 3942, -801, 13444, 738, -2086, 162, 2013, 837, 56, -384, 3164, 5052, 1158, -403, -6913, -4290, -2068, 16622, -2738, 856, -2884, -2432, -410, -1179, -456, 504, -1359, 436, 352, -6351, 327, -2196, -1502, 302, 338, -839, 235, -520, 1283, 2710, 18814, 2256, -2, 400, 1300, -1185, 1024, -3744, -3542, -4350, -763, 1902, -14737, 5437, 48, -1589, -280, -67, 232, 2276, 1413, 3284, -308, 1013, 610, 22787, -685, 724, 12, -359, -1651, -1060, 569, 248, 3836, 605, -413, 3380, -1360, -1120, -2933, -2368, -977, 10135, 12356, 3739, -1571, -418, 580, -2662, -11460, -6128, 2867, 11468, 825, -3201, -501, -138, -755, -554, 168, 757, -564, 428, -12118, -15179, -1978, 432, -597, 1528, 3038, -568, 1349, -3377, 914, 498, 928, -91, -5, 9192, 3000, 2542, -1411, 626, 2705, -763, 3247, 13736, 3034, 2170, -67, -852, -378, 1264, -2771, -2415, -4236, 126, -1984, -13336, -1088, -416, -1979, -520, 2506, -1505, 294, -2398, 218, -8740, -3873, 2069, -1374, 86, -998, -3851, 1070, 13357, 955, 3085, -536, 166, 926, 299, 6532, 1324, -502, -1658, 1829, -1263, 445, -1902, 1452, -2747, -16422, 1875, 1773, 452, 288, 5992, 1626, 3659, -917, 2255, -1508, 356, 547, 158, 9, -117, -1665, -595, 14392, -1013, 49, -4060, 12064, 3666, -2903, -9145, -396, -4341, -953, 2758, -178, -204, -462, 98, 222, -3622, -12200, -4484, -94, -8642, -5694, 4034, -720, -1695, 751, -1668, -266, -343, 296, -112, -900, -3750, -360, 1002, -7402, 7758, 7370, 3332, -7517, -769, -1272, 412, -1451, -89, -227, -11332, -472, -1108, -394, -339, -1981, -3494, 12110, -564, -5958, -690, -1066, -130, 762, -50, -1456, -1521, -8428, 994, -867, 2650, -2335, 354, -2253, 4612, -12364, -2626, 1853, 577, -103 }, .cb4448s1 = { 25901, -239, 648, 167, -284, 198, -340, -1112, -55, -242, -214, 528, 112, -259, -284, -250, 23, 475, 780, -558, 111, 148, -2411, -19826, -1158, 2799, -964, 44, -1204, 1187, -4036, 1872, 3541, 768, 159, 1979, 3382, -113, 804, -1021, 3708, -2577, 9697, 11527, -326, -7058, 4306, 1260, 3782, 3370, 1595, 705, 2268, 2182, 1509, 1131, 9877, -7260, -258, 49, 1686, -1472, -2556, -1973, -22425, 338, 486, 963, 1069, -34, -1027, -90, -881, -473, 554, -6326, -873, -9744, 10157, -1079, 584, -1047, -1954, 6204, 2416, -899, 1452, 938, -439, -664, 4231, 9370, 7800, 170, 9448, -4756, 1967, 686, -1186, 636, -1719, -1244, -540, -728, 306, -1778, -7980, -3418, 8318, -1828, 1556, 3487, 10195, 3741, -510, 2077, -1496, 1241, 384, 477, -1051, 7922, -4077, -2513, 849, -693, -9170, 4264, -7940, -1703, 460, -2986, 586, 13, 377, 781, -7047, 6852, -1350, 7537, -493, -1919, 379, 3108, 4293, 8467, -3875, 63, 44, 493, 1496, -1577, -5676, 3318, 6628, 5177, -11082, 1146, 3251, -1159, -461, 442, 1250, 212, 176, 3586, 137, -9153, -13772, -8211, 393, 1170, 1717, -671, 298, -233, 883, -1533, 401, 254, 7700, -4827, 794, 377, -376, 12240, 7298, 2445, 1168, -562, 1528, 563, 421, -606, 0, 5792, -1069, 824, 3728, -2729, 1005, -730, 4318, 644, 17336, -1588, 2100, -365, 509, -415, 3684, -9128, -1096, -4278, 1549, -1247, 5519, 11075, -2216, 6004, -3683, 409, -730, -414, -263, -6623, 8194, 489, -9085, 334, -1104, -814, 1412, 1522, -1657, -7029, -4142, -1274, -520, -40, 650, -1886, 9701, 11456, -7567, 1176, 3268, 3016, 1109, -117, -858, -155, -1249, -230, -216, 3945, 9142, -2297, 134, -2563, 15131, 857, -1597, -618, 150, -590, -166, -357, 388, -69, -8767, 2914, 1087, 4673, -14373, 600, 382, -1893, 844, -242, 544, -106, 568, -1141, 371, 2663, -1860, -725, 8066, -1353, -8743, -10433, -1796, 427, -73, 178, 96, 980, -478, 978, 1767, 6034, 633, 966, 677, -65, -884, 417, 461, 62, -868, 93, -100, 519, 16304, 2646, -1260, 12271, -140, 142, 11138, -892, -2114, -629, 172, 744, -2056, -960, 61, -980, 2082, -439, -3126, -2564, 1174, -78, 254, -178, 1599, -436, 19023, 5335, -1686, -782, 520, -8727, 256, -3588, -5694, 12323, -2091, 1511, -656, 3872, 2370, -770, 282, 455, -573, -39, 7845, -12566, 12690, -156, -442, -227, 575, -274, -1717, 120, -40, 1866, 635, 161, 270, 1039, 3256, -673, -3343, 4292, -14247, 7142, -4821, -591, -418, 376, 21, 572, 551, 70, -5536, 79, 2540, -505, -283, -350, -1279, -1630, 2234, -604, 5246, -17580, -3022, -1052, -307, 6626, 2794, 1702, 1875, -1876, 1011, -320, 1268, -282, 1072, 14370, -8206, 1218, 630, 173, 7486, 15176, -6146, 4903, -636, -1341, 1360, -1541, -1012, -778, 84, 426, -124, -746, -252, -11085, 1783, -2833, 809, -744, 2194, 3328, 7029, -5097, 4934, -3025, -641, 303, -328, 258, 8674, 53, -3395, 975, -9944, -8550, 3376, -714, 1078, 1186, 598, 808, -166, -752, 484, -5088, 1484, -1278, 11394, -1876, -8236, 5159, -1830, -1520, 2761, 592, -204, -1360, 454, 230, -5038, -1582, -5617, 1346, -2045, 2306, 17764, 494, 572, -1930, 339, 550, 784, 151, -753, 4708, -3058, -8267, 3281, -1054, 870, -1201, -2005, -920, -10115, 5395, -6423, -798, 367, -221, -5296, -2808, 8313, -5077, 1655, -200, 114, 46, 350, -2374, 868, -327, 377, -9570, 1231, 9258, 8752, 3074, -4411, -308, 2315, 6824, -3303, -896, -1186, 579, -2561, 2280, 586, -798, 4747, -3487, 1306, -1241, -487, -90, -52, 3231, -555, -17702, -2681, 1649, -17, -278, -647, -4225, 2740, -1248, -3826, 1356, 3572, -1010, 16160, -422, 304, 3970, 1124, -317, -554, 673, -1191, 3180, -4429, 1581, 1543, -2097, 4208, -9363, 10146, 1896, 2904, -4112, -1428, -207, 459, -35, 5395, -8960, 3141, 11004, 308, 3687, 1540, -2156, -592, 1640, 1003, -280, 797, 204, 6910, -824, 4724, 4729, 5553, -3165, 483, -12, 33, -588, -379, 402, 3543, -9646, 74, 9603, -465, 2872, -2367, -885, 2894, -133, 2758, -721, 3473, -13322, 1506, -1344, 512, 1066, -8300, 11391, 11976, -1201, 13, -612, 165, -1823, 154, -123, 1234, -423, -367, -58, 384, 2687, 2536, 826, 6223, 1750, -8589, 1126, 9772, -6646, 2043, 1826, -1037, -2018, 692, -818, -3431, -467, 11006, 3407, 880, -2047, -10303, 6168, 1428, -307, -18, 661, -252, 754, 1207, -2797, -3057, -6235, 99, -931, 1618, 692, 2790, -294, -1200, -5768, -11691, -5305, -100, 390, -783, -11660, -4675, -13570, 2764, 1414, -786, 385, 163, 718, 794, 1118, 827, -634, -75, 6224, 3965, -2092, -1120, -6395, 5474, -12986, -3985, 635, -544, -1877, -191, 0, 121, 379, -3059, 132, 26320, -721, 1262, -706, 421, -85, -38, 665, 590, -208, -196, 168, 10, 1271, -218, -365, -5843, -5897, -12346, -3026, 5916, -115, -2671, -1022, -203, 962, 995, -850, 527, -516, -1641, 452, 68, 1204, 740, 385, 38, 752, 150, -3088, 20608, -54, -39, 6109, 3224, -92, -315, 4407, -306, 1317, -395, -1617, 9104, -3493, -10724, -3059, 283, 81, -9791, -3210, 7307, 4459, -639, -61, 1152, -184, 2290, 398, -2902, -2776, -1624, 1153, 242, -8865, -3617, 309, -11933, -3847, -5750, 3235, -153, -315, 382, 209, -923, 2072, 458, 164, 3631, 3121, 3220, -828, -8644, 2215, 3873, 12445, 533, -631, -53, -136, -728, -240, 420, 2870, -4981, 906, -3272, 4735, 3613, 2412, -3951, -10587, 7389, 564, 3266, -1348, 524, 1570, 6611, 3354, -1042, 1862, 1860, -1187, 5761, -1722, 8231, -7428, -5662, 1239, -2887, -218, 810, -1063, 15078, 686, -2374, -293, -2031, -245, 4441, 5045, 1100, 6722, 1787, -587, -380, 132, 5124, -12478, 95, -1230, 1464, -1871, 929, 1430, 2666, -3768, 2784, -3697, -8238, -247, 603, -8406, 1330, 1033, -743, -2546, 2739, 856, -12698, -4970, 2290, -1104, 34, -1048, -80, 634, -695, -84, 2374, -24793, -1064, -1080, -254, -812, 252, -1582, -401, 765, 847, 340, 479, -3163, 150, -187, 8432, 2607, 2075, 1384, 423, -7361, -10262, -2254, 54, 1065, 40, 857, 2014, -5076, 198, 657, 482, -422, -2185, -850, -318, 164, -684, 2698, -1008, 17493, -64, -6788, -5966, -14352, -2349, 2492, 266, 1077, 1935, -99, 4270, 2319, -2391, 779, 187, -70 }, .cb4448m0 = { -20455, 663, -3140, 2540, -2110, -406, 1078, 1968, -741, -2458, 490, -496, 338, 581, 1079, -616, 154, 10097, 231, -228, 477, 20, 1372, 11492, -1112, -3148, 547, 248, -676, 8197, 5902, -1299, 519, -2808, 11529, -76, 1239, -1032, -542, 353, -1071, 278, 274, 2781, -7741, 3260, 2711, 175, 12340, 1110, -2348, -5303, 1440, 581, -70, 262, -9902, -2375, 530, 1433, 1624, -1475, -947, 13450, 1318, -1696, 207, 198, 1162, -944, -9329, -1046, 195, -106, 682, 14624, -854, -2410, 1054, 242, -348, 581, 463, 716, 760, 2714, 1356, -1359, 13089, 2565, -10523, 1934, 637, 1218, 1160, 830, 905, 272, 408, -581, -1426, 613, 2586, -8186, 3748, -6663, 4372, -114, -4644, 2998, -9440, 685, -8741, 3363, -5623, -4229, -7058, -1201, -822, 1806, 8671, -856, -612, 1165, -426, 317, 6867, -80, -7084, 1143, -1862, 2742, 669, 550, 22, 173, 4301, -10406, 1042, -346, -1334, -2897, 647, 744, 14, -1338, -1648, -1235, 3550, -455, 2125, 1188, 17136, 1188, -6782, -849, 298, -1054, -9254, 409, -1736, 1410, -7254, -1889, 457, -740, 22, 262, 32242, 1657, -2308, 2688, -607, 609, 4, 150, -264, 192, -140, 246, -393, -76, -15050, 390, 969, 457, 1436, -649, 460, -12150, 1359, 1014, -2103, -576, 55, -590, 113, -1410, -23431, 182, -2386, -1568, 904, -218, -281, -188, -178, 63, 211, 549, 687, -12069, -88, -654, -1070, -13155, -124, -697, 438, 3174, 1700, 270, 234, -289, -625, 15749, -2340, 8466, 397, -4460, -1030, 3206, 1081, -1317, -1030, -72, 487, -1477, -8782, 6984, -1221, 2395, 3198, 2995, 5862, -1195, -6075, -1020, -934, 868, -470, -1024, 1202, -998, -1306, 22118, 344, 540, -3137, -547, 2440, -28, 222, 372, -424, -199, 1068, -917, -105, -4278, 52, -299, 6933, 11715, -520, -2853, 58, -8575, 416, -1272, 1128, -32, -1140, -1873, -495, 235, 2079, -314, -1328, -2615, -20194, 848, -1553, 387, -6091, 906, -10180, 8634, -506, 4078, 318, -2657, 1612, -126, -1424, -4, -1745, -343, 302, 2439, 12190, 941, -12534, -4756, -176, -90, -1295, 1041, 1875, -450, 89, 212, 2098, 1708, 1876, 4065, 1682, 1972, -4916, -951, -10683, 1443, -10978, 772, -1013, -235, 59, 213, -230, 142, -576, 506, 101, 44, -137, 26238, -47, -322, -289, 281, 2614, -4538, 634, 1116, 1191, 2985, -759, -5527, 550, 2107, -6018, -11013, -425, -221, 901, 217, 546, 213, 2026, 695, 1074, -2132, -173, -1664, -783, 25065, -326, 86, -632, 1398, 4708, -2911, 2376, 135, -1471, -904, -2338, 987, 3216, -4564, 314, 15692, -214, 1238, 230, -181, -30537, -294, 155, -607, 218, -309, -180, -246, -102, -988, -644, 111, -10517, -1604, -1180, -2748, 1191, -12959, -2, -1004, 28, -196, 1974, -790, 809, 8802, -1204, 332, 180, -3857, 1025, -5998, -9578, 94, -1069, -2398, 185, 643, -1479, 322, 2544, 12584, -8308, -3856, 1286, 1600, -2539, -2752, -2520, -367, -942, 417, -309, -2162, 2044, 10886, 1764, 11028, 3810, 2955, -1028, -1017, -1752, -487, -605, 48, 2312, -368, -1758, -252, 371, 19882, -1994, 1675, 5494, -660, -1669, 256, -54, -941, 4318, -306, 2143, 273, -3367, -3088, 6509, -1884, -5400, -576, 11394, 875, 455, 271, -218, 1401, -44, -5336, -12170, 4664, -589, -3562, -1934, 5842, 1357, 3232, 1449, -402, -11228, -96, -1509, 2073, -1751, 776, -439, 775, -3302, 13521, -325, -118, -172, 411, -396, 6154, -2455, -52, -4616, 783, -12488, -2085, 5817, -1278, 635, -1713, 2888, -830, 649, 7482, 10134, 9147, 3784, 1046, -1934, -2580, 102, -679, -124, 68, 657, 417, -175, -32768, -80, 375, -941, 224, 271, -232, 1519, -99, -680, 67, 66, -618, 252, 1907, 5121, 2456, -2117, -9388, -1441, 636, 7868, -8340, 1939, 1340, 1511, 711, 6530, -1748, -183, 90, 2561, 5860, -364, 5117, -4101, -4028, -944, 10526, -1028, 1047, 707, 12116, -12596, -4006, 922, -1047, 348, -971, -272, -2388, 435, 246, -1055, 148, -1852, -12418, -2531, 3524, 4103, -344, 1667, 2818, -4576, -273, -8337, 183, 497, -144, -9845, -292, -503, -1212, 4316, -1434, -11058, -3043, -5817, -981, 813, 0, -718, -467, 10285, -19005, -82, 776, 1192, 1030, 1560, 1080, -144, 729, 606, -225, -389, -187, 552, -930, -444, -5959, -1960, -1315, 2650, -1282, -18790, 1772, 263, 1410, 812, -458, -476, 744, 2595, -426, -19, 9119, 4529, -1502, 4673, 3675, 7430, 1084, -6966, -518, -13552, 1054, 2474, -9499, 1041, 5114, 442, 2927, 511, -1492, 217, -726, 398, -522, 35, 119, -332, 106, 816, 437, -1223, 27612, 521, -29, -462, 367, -966, 476, -2559, -3485, -160, 1487, -272, -586, -6014, -232, 3679, -1864, 1244, 575, -14591, -483, -1428, 20, 7874, -2948, -5965, 2383, 3270, 490, 2750, -547, -9658, -1473, 943, 285, -2388, -772, -1582, 3181, 3419, 2628, -197, 3376, -13282, -7684, 3383, 70, -1174, -70, -6703, -7305, -553, 3588, -826, -12, 7350, -3604, 345, 1098, 3856, 918, 2038, -39, 11514, 15798, 1327, 1158, 436, -918, 71, 953, 975, 1147, 174, 411, 1467, 83, -4536, -1511, 5350, -3314, 13999, 18, 4107, 1901, 834, 2614, 2356, -369, 943, -341, -460, 4380, -10014, 3308, -3541, -3225, -621, 8449, -1383, 4481, -1399, -3646, -936, 923, 221, 346, 7828, 2406, 3021, -4993, 3012, -10903, -1925, 8153, 382, -1453, 1238, 601, 1195, -2245, -2792, -4118, 473, 4898, 12961, -6094, 5905, 1368, -2754, -303, 768, -31, -1275, 1400, 596, -1326, 619, -1744, 1145, -3977, 639, -10785, -1693, -11192, -541, -434, -11384, -1017, 14361, 1398, 521, -3239, 1851, -491, 237, -1024, 1002, -3002, -303, -33, -6532, 601, -3726, 7832, 6090, -10107, 957, -1149, 689, 1327, -51, 1945, 990, -106, 595, 234, 518, 1060, 77, 837, 28880, -91, -395, -275, -265, -279, -217, -300, 240, -1055, -406, 4314, -2139, 6349, -2227, -5996, 963, -10386, 4629, -560, 1080, 134 }, .cb4448m1 = { 31577, -1322, 1533, -2224, 253, -1485, -92, 294, 183, -580, 420, 172, -794, -206, -342, -338, 53, -85, -920, 29517, 1073, -972, -1839, 1004, 290, 46, 460, -71, -988, 1731, -362, -2070, 3848, -2, -3842, 734, -1221, -8012, 1104, 6782, 9673, 1082, -8561, -860, -2135, -1557, -1613, -13999, 1664, 2268, -1570, -732, 1010, -402, -1139, -428, 400, 1123, -2108, -11776, -345, 10608, 1245, -3142, -3244, -1132, 1700, -308, 1573, 543, 678, 5160, -3062, 433, 2703, -852, -4903, -1880, 1706, 13995, 2465, -4844, -904, -148, 350, -11168, 1406, 312, -11900, 397, 769, 5558, -1354, 187, -30, 231, -1020, 202, 884, -198, -3151, -830, -8490, -670, -2767, 1517, -12957, -3861, -2794, -1854, -180, 135, 7140, 4103, -4427, 450, 494, -1033, -1110, -2857, 11056, -711, -800, 3628, -180, -852, -10300, -2120, -450, 14464, -511, 303, -1464, -542, -89, -204, 500, -400, -318, 569, 216, 428, 350, 1973, -137, -885, -1794, -974, 3977, 3382, -18624, -420, -1947, 165, -449, 1395, -17313, -286, 2054, -447, -2740, -1881, -550, -2166, 1360, -6021, -94, 148, 676, -1619, -1737, -11977, -169, -1664, -7709, 6202, -5954, 1681, 715, -263, 56, 369, 589, 564, 1989, 1617, -1648, 9205, 1343, -11508, -7379, -3791, -3136, 1049, -844, 24, -6714, -1736, -5734, -2907, 5016, 2167, -5722, -1210, 6232, 428, 2467, -3334, -1477, -711, 6728, -10274, -4930, -6224, -349, -710, 1598, -713, -1708, -497, -254, 567, -884, 131, 11520, -908, -1425, -1862, -13449, -1590, -669, 657, 505, 236, -4, 21, 846, 100, 8248, -1847, -131, -186, 181, -806, 3293, -1072, -1208, 14492, 1555, 1527, 544, -120, -258, 6, -2401, 12455, 10880, 1091, -2350, -939, -1252, -564, 150, -114, 1419, 737, -1732, -440, -2303, -226, 536, -2492, -1085, -10117, -11013, 3786, 5275, -10, 2479, 143, -1647, -7945, 884, -1618, 2056, 12890, -424, 5986, -1471, -666, -570, -1466, -499, 64, 566, -1738, -639, 11380, -612, 1879, 1550, 12469, -299, -1501, 2634, 1036, 3020, -13, 14974, -2066, -5786, -2667, 5487, -6768, 468, -385, 778, -805, -536, -304, 718, 386, 285, 7546, 643, 1462, 913, 4707, 941, -3338, -194, 6669, -4493, 8869, -837, 400, -877, -11113, 326, -2318, 13683, -1304, -1966, -933, 312, 128, 470, -296, -322, 340, -1126, 1811, 1999, 2885, 3201, 331, -2494, 3999, 660, -80, -2063, -16771, -1337, 426, 4884, -6026, -40, 2093, 342, -176, 83, 134, 796, -425, -8934, 2100, 8550, 160, -221, -252, -32714, 1306, 1332, -609, -109, 547, 848, 518, -40, 303, -246, -451, -2177, -716, -750, 1, -21232, 1287, -1303, 2051, 1659, 1501, -369, -1415, 274, 308, 260, 371, -1409, -662, -7347, 7161, 3656, -1104, 8862, -5671, 1370, 1122, 16, 1132, 17593, 6778, -993, 613, -665, 3004, 3288, -1625, -1823, -1003, 740, -1002, -888, -677, -1065, -25294, 997, -160, -180, -811, 188, -333, -2483, -696, 1309, 120, 456, -116, -2020, -896, 7216, 6328, -9170, 8407, -2986, -1684, 680, 1752, -684, 613, 337, -629, -11750, -493, -324, -907, -391, 1053, 14125, 142, 420, -1917, -378, -1428, -90, -497, 1116, -464, 2170, 805, -1572, -904, -9020, -534, 6450, -490, 10750, 279, 765, 961, -3985, -2702, 2423, -4981, -1222, 1654, -1089, -2157, 1940, 14331, -895, 1726, 1555, 122, -3552, 1274, -598, -910, 3056, -1704, 6430, -10626, 1014, -8773, 1009, 1936, -360, -468, -1029, -8841, -625, 2212, 2234, 2720, 1190, -64, -2078, 4688, 8690, 5150, -450, 744, -796, -5661, -332, -7938, 2670, -4054, 1377, -1594, 11554, -4702, -3631, 745, 742, -90, -1311, 12528, -4664, 834, -853, 1542, 8560, 2209, 4091, 2876, 2117, -678, 1684, 785, 304, 7980, 2126, -302, 8239, -2105, 1584, 11894, -1055, -1391, 596, 2343, 86, 388, -1348, -1007, 1428, 413, -9231, -10312, -7346, -1108, 1385, -1255, -3954, 738, -1258, 410, 226, 15115, -1059, -4117, -50, -504, -1726, 1425, -9974, -346, 688, 464, 244, -586, -8880, 845, -659, 932, -1309, 290, -29, -417, -2184, 1011, -9622, 1443, 9009, 1945, 2698, -708, 10572, 2410, 1200, 4492, -2569, 1444, 2735, -8604, 2274, -4057, 478, -199, 1285, 12695, 12321, -2933, -1708, 1198, 675, -492, -560, -52, -1261, 85, -480, -96, 696, -764, -1402, -31368, -580, -675, -1678, -58, 600, -522, -292, 647, -36, 154, -1148, 437, 1561, 588, 603, 7629, -16973, 29, -828, -589, -919, -1372, -470, -445, 428, 528, 5828, -353, -32, -1781, -702, -690, -7196, -3253, 1942, 4600, -12102, -674, -10480, -2336, 711, -2174, -7474, -1436, -451, -7133, 856, -2652, 1892, 3464, -546, 676, -13296, -516, -13618, -997, 938, 1686, 1006, 1358, -1371, 922, 534, -170, 126, 255, -835, 50, 945, -1066, -1676, 3, 1038, -437, 26030, 418, 27, -1092, -493, -428, -606, -1097, -628, 298, 295, -806, 183, 146, 1352, -84, -722, 833, -25667, 3176, 1001, -322, -2339, 15, -475, -1257, 2116, 876, 637, -529, -1108, 302, -2452, 19734, 58, 851, 9845, 1142, 2168, 706, 11070, 1556, 544, 3002, 2238, -3974, 2738, -48, -8324, -2186, -355, -14933, 2192, -2481, 2700, 473, -486, 761, -208, 76, -78, 102, -4896, 1378, 12377, -8269, 28, 1092, -5071, -1500, -1190, -804, 1085, -766, 493, 22, -1041, 9136, -1234, -12247, 967, 2672, -883, 4582, 4871, 1891, -532, 329, 226, 446, -6710, 312, -914, 1416, -1852, 3052, 6512, 8971, 5544, 6519, -579, 1021, -241, 911, 782, -3456, 10158, -1865, 3941, -12300, 8, 472, 882, -1580, -1799, -1025, -631, -127, -15316, 8047, -200, -1860, 582, -4363, -1274, 1085, -48, 2383, 638, 480, 369, -838, -1341, 414, -114, 2757, 1222, -2194, -3394, 6469, 2418, 738, -1656, 15594, -1090, 202, 727, -769, 484, 2462, 4875, 1656, -3835, -16877, 5276, 239, 982, -1872, -130, 901, 1352, -155, 4939, -8317, 9000, 2503, 485, 1184, -548, -1356, -7482, -188, -1587, 496 }, .fcb08l = { -2539, -3275, -2699, -3345, -2843, 5501, 426, 7127, -149, 3111, -2991, -2297, -2345, 2702, -969, -946, 2837, 1114, 1800, 1271, 12249, -2282, -2309, 1566, -2889, -3020, -2083, 3586, 8919, 2651, 4111, -1842, -1588, -1428, 3251, -102, 156, -320, 722, 1711, 20565, -3068, -2211, -3164, -3410, -3396, -2882, -2002, 1730, 4077, -2696, -1694, -2839, 2948, -2739, -2380, -2252, -1311, -269, 1900, -2796, -444, -2996, -2525, 5194, 1459, 5042, -1089, 914, 4116, 7644, -3137, -3156, 4028, -3435, -3240, -2585, 5542, 5119, 9885, -2995, -3153, -3449, -3101, -3551, -3469, -2196, -1271, 3869, 5413, -2800, -1990, 3371, -2286, -1022, 3190, -550, 1723, 968, 1916, -2749, -1530, -2211, -2987, -3357, -3262, -1042, 10277, 107, 2662, 9819, -2753, 4269, -3277, 3125, -3131, -2974, -3251, 6466, 9484, -2034, -2707, -2424, -3170, -2619, -2278, -143, -1641, 11856, 5975, -1282, -2629, -2396, -2364, -2012, -1085, -2576, -2422, -2206, 13731, -2261, 2751, -1768, 2482, -1065, -347, -137, 31, 619, 385, -2257, -2215, -1698, -2686, 4468, -2563, -1071, -1359, 7757, 3732, -2856, 9018, -2046, -1494, -2234, -2209, -67, 1340, 2433, 2965, -2722, -2151, -2966, -2780, -2732, -1509, -2085, -1532, 6934, -1248, -1936, -2203, -787, -1781, -895, -1990, 4693, -1818, -1569, 1954, -2283, -2403, 10514, -3105, -1074, -2838, -1, 1192, 1113, 3309, -2249, -2451, -1660, 2535, -1439, 3582, -1093, -594, 1956, 758, 5349, -2524, -2320, -1903, -2055, 5075, -941, -721, -536, 2197, -2309, -3027, -1460, -2911, 11344, -2474, -1601, -1749, 3260, 2547, 3819, -1247, -1449, 2835, -1118, -652, -516, -379, 531, 440, -569, -2606, -2545, -2447, -1685, 8678, -1868, -2003, -992, 5888, 8591, -1848, -2010, -2196, -2049, -658, 3473, 214, 905, 317, -2050, -1083, -2593, 8754, -2234, -2449, -1688, 2194, 2244, 2502, -1659, -2748, 4584, -3011, 3702, -2307, -1887, -1960, -1068, 2889, -3022, -2989, -2295, -2794, 3071, -1588, -43, 2627, 1278, 2031, -2145, -2551, -2333, -3205, -3237, -2760, 9082, -454, 4339, 1776, -2738, 4785, -2176, -1896, 2148, 1350, 768, 249, 1001, 1499, 797, -2182, -1443, -229, -32, 827, 401, 270, 581, 380, -2370, -2376, -2679, -3099, -1742, -1149, 4666, -693, 1109, 7547, -2496, -3063, -2818, -2621, -2016, 5722, 4932, 1217, 2161, 2449, -2207, -2954, 3769, -2824, -1809, -2946, -1693, -377, 1565, 4100, -2947, 3063, -3062, -2919, -3093, -2520, -1712, 2383, 1305, 1867, 10145, -2912, -3307, 7519, -3502, -1063, -2782, 8595, -750, -1503, -3141, -2486, 2923, -2574, -1826, -1244, 3537, 2494, 2583, 1560, -2722, 3284, 2245, -1258, -658, -394, 483, 719, 1121, 1073, -2949, -1013, -3048, 597, -3103, -2510, -1970, 7207, 8635, 1917, -1772, -483, -2318, -1860, -2500, 2981, -1651, 550, 696, 615, -2121, -2055, -1619, -2126, 3108, 3417, -485, -47, 848, 1608, -2636, -1707, 3142, 3798, 479, -1112, 597, -323, 1555, 1531, -2930, 2106, -2398, -2314, -1835, 0, 2920, 896, 2356, 1259, -2911, -3184, 593, -3570, -3389, -3263, 7340, 7640, 6874, 6549, -1912, -1334, -1749, -568, -1718, -405, -1375, 3456, -1024, -1903, 9384, -2721, -2485, -2377, -3026, -899, -3133, -3032, -2452, 7715, 2492, -2450, -1721, -2138, -1497, -55, 760, 2382, 1183, 1105, -2782, 389, -1528, -927, 664, -531, 1405, 363, 582, -292, -1678, -2718, -2763, -3140, -2799, -2178, -2715, -2592, -972, -1226, 3278, -1173, 2916, -1548, -446, -1241, -209, 379, 689, 538, 3110, 2857, -1735, -1244, -589, -413, 65, 471, 522, 323, -2043, -212, 1309, -471, -564, -16, 378, -320, -437, 228, -2194, -2637, -2513, -2670, -1863, -954, -2082, -2398, -2270, 5563, -2959, -2444, -2794, -1736, -1631, -1324, 1482, -481, 2317, 1470, -2871, -2007, 702, -1980, -491, -146, -695, -145, 2817, 1268, -3395, -3456, -3069, -3433, -2874, -205, 806, 3038, 3806, 2623, -2954, -1861, -712, 1017, -326, 44, -93, 910, 775, 346, -2625, -2570, -2974, -2344, -2712, -1930, -2213, 3521, -1341, 4327, -141, 835, -1119, -1336, -1092, -1891, -860, -727, 315, 2562, 4119, -2638, -2584, -1951, -2710, -2499, -1561, -952, 2821, 2505, -2388, -1855, -2926, 1742, -2563, -2655, -1802, 3082, 3063, 2456, -3304, -2670, -2147, -1504, -309, 1421, 1661, 1546, 560, 615, -2590, -1593, -1523, 2025, 3167, -841, -356, -648, 309, 1165 }, .fcb08m = { -2962, -2140, -2166, -1454, -1638, -1100, -835, 686, 978, 550, -1630, -1021, -1424, -1867, -1118, -474, 66, 6104, 904, 603, -829, -475, -1368, -1199, 7255, -890, -465, 114, 118, 224, -2453, -1279, 8192, -1289, -452, -47, 180, 324, 627, 209, -2770, 11214, -857, -1720, -895, -531, -291, -264, 232, -402, -2699, -2561, -2433, -2093, -1315, 86, 2666, 1663, 1351, 2349, -2788, 4576, 3680, -1365, -995, -513, 46, 44, 522, 142, -2739, -1654, -1950, 4573, -659, -536, 285, 72, 875, 627, 3142, 105, -941, 1245, -489, -495, -229, 44, -236, -1083, -2336, -1193, -1620, -1859, -1339, -655, 205, 1032, 5581, 1195, -2635, -1740, 2656, 1976, -52, 784, -96, -165, 419, -486, 8850, -624, -792, -1531, -765, -674, -730, -829, -150, -27, 2255, -1177, 2727, -1430, 737, -902, -780, -729, 169, 278, 3729, 3763, -32, -1581, -563, -573, 77, -372, -64, -477, -2500, 526, -1682, 1464, -830, -124, -548, 561, 202, 1115, -1682, -1552, -2014, -2127, -1374, -749, -720, 64, 2097, 6944, -2771, 4929, -1680, -2212, -1430, -801, 114, 891, 1176, 855, 3571, -2187, -1566, -1694, 84, -46, 932, 786, 765, 856, -1038, -498, -117, -1582, -1379, -1162, 6293, -367, 594, 132, -2487, 2119, -2153, -1749, 833, 1089, 507, 133, 337, 423, -2777, 2507, 277, -1455, -1019, 1811, 639, -595, 136, -1050, -2941, 4474, -176, 1095, 1113, -479, 182, -295, -229, -605, -2035, -1649, -1171, 51, 0, 125, 2844, -310, -82, -640, -2251, -2138, -2270, -1567, 2260, 92, 368, 95, 1433, 1346, 820, -2339, -1822, -895, -69, 158, 190, 911, 1008, 764, 684, -1756, -1013, -1625, -1610, 6062, -499, -1036, -139, 1129, 488, 524, -665, -870, -347, -76, 123, 91, -12, 14, -2867, -2019, 2858, -1903, -1165, 309, 287, 1250, 767, 776, -2784, -2446, -1157, 460, 2589, 437, -285, 711, -299, 402, -2683, -2271, -1714, -1535, -547, 4118, 510, 1158, 700, 631, -2084, -1236, 509, -1009, -510, -193, -1075, -793, 727, 2150, -2722, 968, 1077, -1579, -1410, -894, 401, 1043, 427, 182 }, .fcb08s = { -2368, -2340, -1735, -1897, -1493, 984, 3062, 2826, 1049, 164, 1181, -1990, -1833, -1720, -1360, 24, 1485, 1923, 460, 511, 69, 78, -353, -3, 3761, -480, -1538, -1063, 540, -64, -1546, -988, 1514, -1167, -1354, -563, 1435, 880, 1123, 182, -2243, -2109, -2378, -2201, -1491, -836, -124, 605, 6159, 3636, -2770, -2959, -2956, -3019, -2154, -648, 1805, 4698, 2929, 2078, -975, -360, -895, -623, -593, -879, -345, 4333, 492, -56, -2102, -781, -476, 1268, 606, -670, 1686, -105, 370, 461, -221, -868, -1381, 297, 128, -578, -809, -938, 3896, 490, 4032, 2675, -684, -1108, -1235, -915, -874, -919, -802, -1040, -1324, -16, 2156, 1943, -652, -666, -47, -1499, 168, -210, 4213, -1895, -1734, -1767, -1412, -867, -71, 329, 855, 1294, -1849, 4393, -1312, -1597, -564, 434, -454, 269, 892, -31, -1170, 67, 370, -1144, -320, 3706, -811, -190, -123, -166, -659, -1033, -789, -902, -347, -280, -108, -313, 452, 3701, -1505, -2610, -2758, -2550, -2034, -1361, -676, 713, 2263, 8286, -2241, -2508, -2540, -1721, 182, 1947, 306, 1773, 1220, 2909, -60, 73, -235, -1631, -1302, -692, 4171, -830, 49, -188, -471, -2208, -2265, -1518, -196, 2995, 2571, -579, -68, 805, -1294, 1274, 4294, -1356, -702, -532, -465, -123, -400, -719, 336, 3093, 1634, -906, -71, -502, -938, -982, -742, -1187, -1757, 2890, -1591, 1303, 216, -311, -404, -29, 501, -543, -1466, 1587, 309, -578, -173, 34, 1116, 1286, -1184, -1174, -175, -732, -619, 3508, -80, 191, -1059, -174, -429, -470, 10000, -933, -1511, -1601, -1571, -1445, -1065, -1407, -1053, -932, 1183, 7875, -460, -1609, -1618, -1398, -1154, -1227, -1012, -1450, 20, 28, -235, -110, 203, 105, 252, -154, -51, -58, 2940, -490, 17, -51, 131, -106, -526, -566, -822, -1177, -1335, 2749, 608, -1575, -1322, -1351, 111, 641, 1441, -9, 733, -207, -273, -665, -630, -588, -78, 254, 304, 762, -2661, -2677, -1238, -82, 2569, 3001, 932, -1032, 211, -324, 40, 1395, -836, -1119, -635, -1425, -1514, -1135, 1509, 2963 }, .fcb11l = { -3004, -2927, -2672, -2356, -735, 179, 950, 1734, 1101, 1641, -1610, -1161, -1606, -179, -1634, 3383, -610, 240, 73, 1128, 818, -1052, -1641, 724, -1938, -1741, -1211, 3967, 1988, 1445, 3010, 2203, -1685, -1698, -1838, -759, -144, 515, 999, 1215, 3239, -1912, -2048, -1739, -1488, -148, 1590, 1370, 1066, 1270, -2721, -1637, 99, -1964, 224, -946, -1437, -954, 755, 1420, -2800, -2211, -2304, -2048, 4853, -714, -383, 2159, 1823, 2328, -1619, -1584, -1839, 5462, -1703, -802, -227, 485, 1017, 1695, -2459, 2399, -1820, 2254, -1373, -767, 53, 705, 1074, 1293, -1582, -2486, -2208, -2341, -2264, -2132, -1578, -1043, 322, 7685, -2198, -1768, -2106, 16, -2207, -1495, -1106, -961, -482, 1642, 6785, -1540, -1540, -1449, -1177, -854, -307, 853, 1279, 1449, 3253, -1427, 2314, -1473, -985, -1025, -321, 923, 1140, 1166, -2704, 2664, -2444, -2717, 481, 3083, -1449, 1225, 3168, 2389, -2124, -1981, -1342, -1939, -1904, 4736, -885, -826, 3866, 2046, -290, -567, -1986, -1880, 1966, -465, 1638, 683, 1005, 1099, -2842, -2537, -2559, -2427, -1243, 4039, 1371, 3897, 2529, 2400, -2586, -1328, 785, -1697, 1733, 2382, -442, 190, 901, 1281, -2669, 2198, -1502, -1404, 2593, -694, -186, 466, 1065, 1199, -1905, -1389, 6171, -1817, -513, -989, -356, 246, 1619, 1883, 36, -2178, -1602, 608, -1523, 23, 1265, 578, 953, 1038, -483, -2278, -2138, -1740, 584, 244, -54, -192, 915, 1097, -213, -1569, 1861, -1401, 3686, -1625, -1234, -614, 860, 1311, -1397, 2315, 1896, -1608, -1326, -1487, -99, 2241, 697, 1156, 1711, -2099, -1507, -135, 1422, -695, -57, 1390, 823, 937, -122, 479, 47, -2144, -1514, 955, -1317, -726, 480, 1153, -2959, -2558, -2573, -1355, -1879, -1446, 6435, 677, 3124, 3134, 1850, 1834, -1396, -1417, 1290, -896, -561, 1428, 1007, 1105, -2101, -2044, 1779, -1913, -1868, 1410, 916, 1232, 1112, 1335, -2663, -104, -513, -96, -470, 480, 1516, -150, 298, 714, -2558, 3076, 468, -745, -945, -443, -849, -989, 341, 1102, 433, 588, -1772, 462, -527, 670, -128, -108, 583, 701, -2281, -2149, -2398, -2749, -2557, -1691, -1095, 1336, 9088, 3844, -1799, -1861, -1908, -2242, -2184, 2313, 3779, -809, 519, 2229, -1914, -1673, 1764, -634, -1955, -1721, 405, -499, 243, 1632, -2377, 7289, -1659, -1752, -1341, -948, -323, 841, 1703, 1774, -2029, 2384, -1877, -1918, -1729, 1483, 483, 1916, 576, 1258, -2310, -1796, 2208, -1579, 57, -1735, -1161, 5177, 1674, 2468, -1907, -1499, 1868, 2275, -620, -356, -228, 489, 1064, 849, -683, -1204, -1761, -2211, -606, -764, -1056, 3888, 253, 1518, -2555, -2075, 119, -1567, 971, -1178, 2683, 1476, 978, 1419, -2947, -2418, -2164, 1178, 1582, 1470, 896, 645, 1671, 1462, -2234, -1363, -1184, 1408, 1042, -1091, -208, -49, 527, 917, 1266, -1444, -2174, -2447, -2300, -1732, 3076, 5631, 248, 2195, -2477, -1724, -2434, -2477, -2524, -1828, 2331, 845, 1423, 1767, -2393, -1946, -857, -462, 344, 17, -896, 2391, 892, 882, -828, -280, -752, -1136, -1563, -1040, 1222, -1173, 1763, 1179, -1448, 1946, -1815, -1588, -1638, -1282, 3302, 132, 509, 1408, -2760, -2338, -1935, 1353, -1531, -1074, 1156, 3086, 1374, 1667, 2302, -1623, -1897, -1991, -494, 2603, -754, 524, 1265, 1304, 3062, -1359, -1365, 1987, -1334, -916, -146, -40, 635, 1033, 1724, -1057, 49, -1159, -774, 106, 1053, -153, 134, 691, -119, -1226, 332, -363, -197, -69, -133, 573, 190, 216, -2236, -294, 1288, -2110, -1537, -1005, -1175, 56, 4227, 1623, -2440, -1894, -1623, -2377, 2287, -1220, -1506, 177, 5689, 2849, -2857, -2166, -2546, 2174, -2414, -2343, 559, -1020, 4650, 3514, -2875, 1309, -2557, -2534, -2235, -1901, 1559, 4412, 2301, 2204, -2969, -2018, -2399, -2834, -2431, 1316, -1474, 1269, 2533, 3485, -2892, -2387, -2716, -2317, -2031, -1992, -1311, 8071, 3933, 3807, -2139, 1909, -2200, -2344, -2060, -1638, -1154, -210, 2781, 2139, 1119, -1828, -2069, -2306, -1975, -1165, -444, 789, 2409, 1551, -2929, -103, -1920, -2010, -904, 694, -188, 4, 1051, 1190, -2649, -2454, -2205, -1651, -1856, -1552, -1165, 352, 3351, 1266, -1719, 57, -1828, -420, -938, -1251, -461, 1294, 1158, 893 }, .fcb11m = { -2704, -2459, -2349, -1535, 2807, 365, 1064, 892, 830, 1222, -2190, -1542, -2285, 6443, -1607, -1362, -605, 637, 883, 877, -2378, 2292, 3106, -1057, 1776, -1094, -859, 249, 199, 256, -1537, 2098, -1126, 2243, -1186, -193, -211, 211, 502, 308, 3369, 3197, -1271, -1370, -355, -423, -537, 468, -237, -99, -1439, -1748, -2185, -1972, -1357, -814, -470, 815, 1306, 6390, 1983, -1169, -1749, -29, -1368, 5929, -1539, -900, 576, 701, 1708, -1608, -1148, 3522, -822, -120, -461, -158, -43, 39, -2543, 8872, -1347, -1580, 222, -488, -162, 295, 382, 291, 11143, -1223, -1270, -1399, -392, -563, -500, -604, -544, -135, -1787, -1313, -1490, -1395, -1100, -1278, -818, 6172, 768, 1597, -623, -681, -1128, -1575, 7257, -665, -1021, -439, 932, 703, -1496, -2168, -1945, -1454, -808, -1261, -354, 875, 6706, 1956, -1773, -1503, -1536, -1162, -1386, -1885, -1607, -318, -72, -7, -1932, -1349, 6150, -1852, -345, -18, -81, 223, 339, 425, 362, -1623, -1432, -1973, -1042, -1373, 7830, 38, -116, 1000, 421, -2375, -1808, -1832, -1046, 2077, 955, 1576, 581, 824, -2021, -1582, -1402, -1420, 69, 3549, -513, 192, 262, 483, -2503, 4173, -11, -1532, -893, 282, 187, 320, 176, 259, -2308, 2342, -2385, -2147, -784, -375, 413, 833, 889, 1297, 1415, -1085, -1009, -1501, -1246, -1298, 1553, 1384, 332, 662, 2226, -2399, -1752, -857, 1899, 131, 501, 209, 217, 346, 4294, -1811, -1694, -1080, -752, -263, -228, 249, 628, 971, 2508, -1031, 2871, -1054, 42, -202, -738, -170, -239, -290, -2751, -2379, -2379, -1999, -1448, -380, 1594, 1279, 1399, 1633, -2376, -1839, 1367, 1685, 356, -126, -50, 143, 31, 33, 314, 160, -663, -687, 25, 388, -267, -188, -188, -129, -2614, 1063, -1835, -285, 2549, 205, -30, 370, 319, 297, -87, -2208, -1164, -839, 894, -266, -410, 375, 1263, 924, -2606, -2325, -1854, 1792, 407, 328, -110, 575, 1090, 971, -2517, -1583, 1355, -1892, -490, -203, 846, 724, 597, 779, -1650, -1281, -1294, 549, -146, -548, 2947, -28, 265, 339 }, .fcb11s = { -1536, -2360, -2378, -2138, -1380, -346, 1575, 2779, 3247, 1689, -340, -1788, -1839, 103, 31, 853, -653, 3159, 365, 154, 404, -835, -716, -35, 4309, -155, -1214, -1180, -750, -522, -753, 350, -1660, -1603, -1159, -582, -489, 1067, 2615, 1747, -1755, -2351, -2314, -1453, 922, 3458, 867, 439, 493, 1212, -1584, -1655, 1300, 1783, 1641, 1442, 816, -1283, -1456, -1417, 4998, 1923, -200, -1086, -1060, -1016, -1074, -1217, -1285, -1245, 633, 390, -1443, -1099, -507, 3041, 343, -163, -745, -667, 2333, -2144, -2460, -2247, -2063, -1736, -742, 418, 3124, 3504, 227, -735, 799, -1326, -20, -543, 1900, 237, -671, -545, -1727, 121, -1750, 3700, -485, -553, -77, -212, 942, 62, 1647, -688, -1506, -1429, -619, -839, 172, 3209, -500, -371, -1680, -1408, -1122, -563, 3627, -115, 510, 534, -65, 199, 800, 5040, 631, -744, -612, -1023, -1099, -1319, -1520, -1460, -1120, -274, -1220, 349, 1848, -620, -1411, -616, 1771, 1024, -1223, -2195, -2345, -2144, -1517, -1055, -385, 557, 1482, 6797, -2274, 818, -460, -707, -274, 646, 654, 731, 268, 347, 4583, -1289, -1452, -1193, -1072, -681, -178, -131, -108, 547, -1521, -781, -1298, 239, -486, -445, 3453, -226, 90, 653, -1237, 624, 4692, -482, -798, -799, -766, -645, -890, -915, 3748, -909, -1012, 85, 963, 375, -100, -1010, -1269, -1508, 2106, -1194, 2632, 595, -826, -221, -411, -1104, -1365, -1050, -2112, -863, 1943, -727, -1079, -733, 78, 1990, 363, 953, 1325, 459, -891, 3364, -410, -362, -547, -994, -1371, -1258, 12270, -43, -1668, -1868, -2004, -2133, -1863, -1949, -1805, -1288, -1640, 3783, -1414, -578, -505, -464, -158, 252, 71, 76, 22, -20, -72, -13, -19, -95, -14, 2, 23, -5, 1289, 630, 291, -707, -794, -857, -715, -122, 551, 219, -2358, -1905, -1397, 277, 572, 343, 789, 526, 1629, 991, -980, 222, 740, 1199, 19, 1200, -864, -467, -656, -138, 820, -2005, -924, 154, 195, 393, 267, -183, 1024, 100, 1243, -872, -705, -781, -422, -377, -910, -637, 89, 2849 }, .fcb16l = { -2676, -2246, -3119, -2904, -2707, -1946, 7718, 2292, 2451, 4206, -1214, -362, 1116, -860, 30, -993, -888, -1046, -3732, -2268, -2541, 6060, -2220, -1597, -1650, -1320, 88, 1229, 2118, 2348, 1430, -1865, -2190, -2122, -1844, -2069, -1746, 15, -1746, 1321, -2671, -2993, -3247, -2811, -2141, -1360, 1886, 270, -381, 5676, -2070, -444, -674, -1082, -1144, -346, -823, 4630, -224, 1940, -2441, -2072, -2194, -295, 2175, 1209, -734, 168, 923, 1359, -2667, 389, -2585, -2279, -2195, -1141, -1016, -218, 109, 1926, 5184, -2226, -1888, -1273, -1044, 25, 461, 886, 1125, 1249, -2215, -2381, 3109, -1963, 3015, -2027, -790, 1192, 1646, 2188, -2906, -2598, 484, -2372, -1372, -1082, 1718, 664, 1391, 2396, -2518, 1937, -2362, -2510, -1504, 2947, 446, 684, 1947, 2059, -3263, -3001, -3240, -3034, -2598, 3367, 4407, 2327, 2450, 2994, -2379, -1875, -1862, 6387, -1956, -1417, -525, 1098, 1836, 2932, 1408, -1130, -1417, 1693, -262, -645, -515, 443, 735, 619, -2834, -2246, -2646, -2521, -811, 6608, -421, 1572, 2015, 3234, -2086, -1435, 89, 1648, 838, -986, -1159, -1208, -32, 1354, -2135, -2159, 7796, -2424, -949, -2040, -1179, 228, 1187, 3008, -2963, -2500, -2074, -2025, -1439, 1692, -378, -596, -62, 2419, -3522, -3132, -2899, -3290, -2929, 2844, 49, 4307, 2754, 3897, -2960, 1305, -1858, -831, -1379, -773, 3257, 979, 975, 1513, -2849, -1610, 2483, 456, -1395, -634, 847, 1320, 1116, 1175, 2497, -1554, 2176, -1697, -997, -799, -120, 339, 996, 1379, 11359, -1557, -2219, -2237, -1792, -2084, -1009, 781, 3341, 939, 1954, -1860, -2347, -2117, -2000, -1394, 3825, 106, 2595, 2162, -2938, -2488, -2112, 772, -1059, 1822, 159, 1017, 2452, 1506, 1313, -2615, -2479, -2941, -2220, -2510, -726, 4703, 1778, 3375, -3133, -2664, -2821, -2771, 1559, -1000, -434, 1874, 4130, 2987, -2998, -2692, -2326, 1580, -2231, -1347, 4166, 2021, 1177, 2531, -2880, -2337, -2589, 1505, -2843, -2468, -339, -1059, 3212, 4264, -3112, -2885, -2889, 975, -2522, -2278, 721, 5057, 3989, 3373, -3098, -2947, -1128, -2251, 1935, 2981, 3007, 975, 1983, 2048, -2861, -2302, -2431, -1460, -1492, -1524, -944, 1556, 1778, 1549, -2658, -2259, 2768, -2460, -1447, 2957, 759, 324, 2533, 2477, -2935, -1687, -2554, -2647, -1431, 118, -365, 10280, 1526, 3447, -2570, 2268, -2351, -2115, 2588, -9, -834, 1115, 1878, 2365, 79, 1132, -1619, -1406, -1568, -1766, -224, 825, 2113, 1382, -548, -2669, -1797, -2691, -2139, -2495, -210, 1276, 13623, 2315, 1965, -1713, -1610, -2187, 2534, -1495, -1301, 622, 563, 2154, 2743, 3230, -1784, -1774, -792, -493, -131, 156, 944, 1211, -1886, 357, -1018, 225, -285, 1025, -134, 218, 290, 153, 5869, -2407, -2856, -3051, -2540, -3238, -2260, -370, -451, 6314, -500, -2554, -2110, -879, -323, -537, 570, 1228, 1556, 1342, -2486, 3366, 1838, -937, -959, -683, 63, 937, 652, 1212, -2164, -1448, 166, -799, -550, -1317, 481, 299, 5494, 1360, -3147, -2574, -989, 1550, 1952, -1502, -96, 3517, 1304, 2311, -2931, -2146, -2174, -2052, 579, 680, 896, 2697, 703, 1365, 4130, -2367, -2627, -3125, -934, -3093, -2155, -955, 6025, 5024, -3121, -3064, -2883, -2458, 1723, -842, 3032, 4391, 2327, 2837, -2536, -2208, -1610, -2189, 6509, -1424, -1116, 1427, 2830, 3370, 1084, -1562, -1655, -1628, -491, 2260, -321, 421, 774, 1237, -3267, 977, -3170, -3144, -2698, -1324, 1424, 3034, 3323, 3347, -3021, -3061, 2027, -2345, 852, -2832, -1714, 5926, 4517, 3839, -1490, -2416, -1726, -1268, -1458, -2137, -1715, -580, 1403, 13408, -3005, -2706, -3063, -2745, -2777, -2136, 2786, 202, 5141, 3407, -3104, -3001, -3176, -3388, -3507, -2863, -2097, 2325, 2618, 6146, -1997, -3152, -1036, -2694, -2587, -2986, -2750, -2219, -1607, 5944, -2893, -2633, -2229, -2811, -2482, -2115, -2219, -1180, 5246, 3252, -3111, -2052, -2693, -2934, -1805, 2583, 353, 1262, 8588, 3900, -2468, -2726, -1861, -2352, -2237, -2750, -2345, -1936, 9793, 8392, -3490, -3124, -3596, -3630, -3154, -2390, 743, 6652, 6366, 6143, -2852, -3547, -3124, -2718, -1094, -494, 49, -1053, -3005, 32767, -1721, -1229, -1715, -1590, 1587, -1233, 3384, -252, 312, 1120, -3287, -2926, -3048, -2828, -2502, -1185, 2028, 3778, 487, 2083 }, .fcb16m = { 616, -1065, -1622, -1949, -1283, -863, 6819, 517, 1135, 1282, 2631, -1447, -1477, -1004, 286, 1358, -135, -340, 147, -130, 5435, -1609, -1916, -1758, -1066, -1126, 478, 995, 1098, 1437, -1737, -1339, -1864, -2009, -1038, -1004, -573, 810, 5974, 2840, 349, -1559, -1496, -1151, -307, -82, 681, 827, 550, 776, 1930, 166, -1100, -1489, -1185, -1182, -1210, -326, 858, 1688, -2561, 3514, -736, 1555, -59, -906, -123, 87, 102, 274, 1902, -459, 3008, -984, -707, -334, -571, -317, -190, -371, -2862, 607, 1346, -1517, -1220, -617, 2494, 697, 190, 64, 3264, 3926, -1249, -1542, -933, -302, -246, -248, 69, -283, -1766, -750, -1898, -1259, 6841, -1546, -785, -64, 1208, 1294, -1522, -1742, -1873, -1898, -1455, 7128, -752, 1718, 1398, 1123, -2742, 4733, -1552, -2483, -2210, -495, 355, 864, 830, 759, -2721, -2115, -1891, -1696, -1137, -1559, -1265, -658, -591, 850, -699, 1262, -551, -1055, 877, 96, -388, -192, -479, -1091, -2763, -1379, 3290, 2331, -874, -307, -386, 615, 366, 133, -2671, 5181, 4339, -894, -871, -634, -165, 409, 91, -291, -2649, -411, 8039, -1947, -1156, 57, 351, 1014, 472, -198, -1816, -590, 2887, -1702, -1113, 3414, -556, 117, 483, -377, -1707, -1146, -1155, 2518, 2014, -382, 3, -6, 206, -98, 10770, 274, -1415, -1670, -1020, -1036, -786, -782, -463, -552, -2500, 10460, -1624, -1787, -707, -1327, -59, 375, 91, 22, -2776, -2343, -2104, 825, -759, -823, 482, 1149, 1265, 570, -1676, -1826, -1848, 6125, -1391, -820, -449, 844, 586, 535, -2873, -2475, -2607, -2611, -1830, -487, 1643, 1680, 2088, 2570, -2357, -993, 3189, -1473, 3506, -1203, -793, 662, 464, 98, -2507, 1617, -1793, -1935, -1307, -169, 9, 885, 728, 1178, -2010, -1346, -1375, -187, -548, 2753, -464, -105, 799, 511, -2170, -2428, -2177, -1497, 2072, 828, 441, 1020, 873, 1000, -1297, -1531, -1863, -1967, -1516, -1088, -758, -230, 1561, 6655, -2173, -1787, -1548, -1763, -1366, -24, -645, 6836, 1480, 1923, -2728, -1859, 1798, -2010, -1585, -677, -371, 1405, 1254, 1278 }, .fcb16s = { -2250, -2771, -2879, -2775, -2240, -1363, -272, 1233, 6172, 5074, -2882, -2419, -2054, -2420, -1252, 347, 1325, 1799, 1723, 4361, 774, 2066, 1874, 280, -707, -605, -581, -662, -1104, -2038, 7111, -137, -883, -1079, -1001, -54, -847, -1013, -1045, -832, 4696, 3781, -624, -1485, -1360, -1359, -1307, -1219, -866, -945, 5419, -1512, -2307, -2134, -2056, -1724, -1653, -630, 157, 3399, -727, -860, -1381, -380, -716, -1335, 3819, 78, -2, 277, -3185, -3118, -2715, -3110, -1500, 1626, 3352, 3075, 1956, -539, 16640, -1204, -2281, -2307, -2272, -2349, -2009, -2184, -2777, -2375, -1015, 6208, -402, -1331, -1182, -763, -730, -81, -591, -1184, -1927, 543, 4464, -1095, -131, -542, -129, 486, -366, -1097, -1594, -554, -15, -337, 3152, -723, 71, -40, 385, -309, -769, 290, -853, -1058, -1196, -1557, -595, 3695, 1129, 438, 1729, -1309, -971, -871, 90, 1418, 1261, -23, -1382, -223, -1551, -713, -1044, 4495, -160, -867, -1242, 1188, 159, 120, -1657, -951, 1536, -159, -1310, 1101, -404, 155, 1717, -24, -1607, 2347, 2056, -1943, -1313, -1297, -81, 34, 1441, 354, -2110, -1873, -516, 1102, 2174, 2131, 0, -946, -729, 61, 107, -14, -108, -50, 42, -164, -177, -92, -29, 162, 1349, -2380, -2099, -1692, -980, -49, -94, 331, 1317, 3819, -482, -782, -775, -909, -640, -1099, -615, -225, 1556, 2973, -630, 70, -186, -1599, -1076, 4440, -890, 78, -76, -517, -855, -1886, -1521, -1206, -1152, -900, 753, 1338, 1758, 2431, -2433, -1569, -1294, -583, 552, 2040, -154, 250, 513, 2333, -820, -1987, -2291, -2238, -1880, -1651, -1120, -262, 2013, 9756, -2803, -2574, -2634, -2789, 356, -1838, 325, 4584, 3584, 2486, -1524, 1874, -337, -1800, -1659, 406, 2450, 1252, -245, -1030, 1985, -397, -1565, -51, 148, 2039, -1212, -729, -700, -11, 904, 649, 531, -2287, -1640, 766, -725, 171, -1596, 1387, 3189, -672, -459, -794, -422, -714, -195, -231, 185, 99, -952, -2248, -2170, -1190, -457, 1458, 34, 1179, 2427, 1683, -1658, 3749, -1816, -2000, 2823, -1243, -1415, 713, 875, 75 }, .fcb22l_1 = { 2198, -2215, -2251, -1966, -1540, -467, 403, 1647, -2867, -2589, -34, -2314, -602, 2371, 2614, 2218, -2494, 3659, 2708, -1076, -914, 233, 1149, 1425, 319, -979, 1023, -682, 110, 239, 427, 703, -2979, -2513, -2649, -2265, 7420, 526, 2174, 2932, -2868, -2056, -2232, 1651, -1325, -856, -218, 2091, 458, 1508, -1208, -845, 244, -441, 558, 752, -700, -1370, -395, 980, -321, -232, -241, 293, 10391, -1792, -1948, -1518, -1049, 43, 1524, 2033, -2434, 303, 1730, -1205, -1432, -1183, -694, 1185, -2531, -2656, -2751, -1756, -1321, -1100, 287, 8605, -2868, -2554, 721, -2065, -1671, -771, 675, 2223, -2690, -2501, -2313, 1829, 3189, 45, 1825, 2024, -3153, -2824, -2729, -2308, 1686, -370, 482, 2606, -2972, -2324, 2492, -1762, -1662, 28, 4976, 3214, -2769, -316, -1146, -1954, 86, -60, -370, 1144, 5519, -1785, -1538, -1044, -580, -89, 704, 1151, -2586, -1094, 7473, -1220, -1076, -50, 1029, 1850, 3546, 3279, -1806, -1191, -528, 682, 1160, 1341, -2852, 1541, -2358, -1841, -2317, -1351, 993, 2417, -2675, 2482, -2061, -2089, 3681, 626, 1619, 1818, -2916, 2821, -2482, -2166, -1084, 1137, 5537, 2864, -2499, -1782, 2156, 2558, -1117, 127, 1147, 1556, -2572, 3865, -2008, -1805, -679, 119, 35, 1319, -2704, -1872, -1756, 6843, -911, 322, 1641, 2461, -2652, -1957, 1972, -1582, 3082, 84, 1086, 1487, -2983, -2325, -2780, -2532, -1858, -279, 10092, 4519, -2364, 2718, -1907, 2678, -1005, 246, 1499, 1679, -2570, 8779, -2004, -1627, -844, 89, 1712, 2145, 3316, -1763, -1642, 2819, -599, 9, 906, 1401, -2289, -2224, 2462, -1580, -843, 2501, -24, 1310, 3091, -1745, 2398, -1264, -731, 113, 831, 1328, -2803, -2380, -2808, -2379, -2290, -1376, -234, 2242, 3537, -2137, -2050, -1260, 2881, 177, 1158, 1424, -3303, -3123, -3130, -2861, -2075, 2528, -43, 3890, -3106, -2672, -2554, 1833, -826, 55, 4910, 3324, 3993, -2176, -2446, -1848, -786, 3346, 1590, 2034, -2725, -265, 303, 1076, -1985, 3661, 1556, 1983, -3182, -2712, -2988, -2841, -1332, 4816, 6422, 4184, -2230, -1248, -2176, -1806, -1617, -878, 3764, 1309, -2280, 509, -211, 426, 773, 99, 513, 628, 167, 196, -2256, -1802, -1157, 724, 1405, 1383, 2384, -409, -672, -453, -205, -89, -12, 240, 114, -2220, -807, -1302, -1612, -405, 1134, 1381, 699, -1816, -2151, -1883, 2975, 928, 1527, 1565, 775, -2141, -1981, -1532, -591, 3338, 683, 1763, 466, -2028, -2086, 1448, -622, 589, 1294, 1150, 145, -2382, -1093, -367, 986, 323, 404, 931, -371, -2868, -2737, -2103, 129, 771, 1498, 1974, -1481, -1060, -2398, -1125, 285, 2777, 2975, 1431, -2720, 1748, -2375, -1847, -912, 3829, 808, 2034, -2492, -2447, -1248, -991, 1449, 1304, 867, 1171, -2999, -2556, -2763, -2298, 3359, 4277, 1991, 2850, -2692, -2640, -2593, 1813, -458, 3068, 1012, 2049, -3258, -2820, -2845, -2395, 2787, -45, 5457, 3568, -2491, -2114, -1884, 6, -332, -232, 1680, 1139, 2032, -2383, -2183, -1725, -914, 192, 4175, 2059, -2922, -2972, -2920, -2210, -1143, 1850, 2468, 1871, -3138, -99, -2651, -2510, -129, 631, 1677, 1925, -3302, -3124, -3214, -3143, -2616, -761, 3978, 4234, -1698, -824, -1975, -742, 2449, -610, 21, 998, -3047, -2697, -2747, -1919, -1545, 7534, 1243, 3548, -1863, -1257, 339, -1027, 122, -613, 1989, 953, -2232, -1759, -1751, -969, -1591, 1917, -325, 889 }, .fcb22m_1 = { 13531, -1278, -2217, -1956, -1360, -892, -650, -866, -255, 192, -1139, -1242, -2101, -1682, -1601, 2950, 2340, 121, 662, 446, -2636, 1711, 615, -1864, -1297, -1098, -296, 1070, 1284, 891, 7332, -2292, -2334, -1889, -1170, 1884, -570, 52, 1146, 944, -2083, -2192, -2420, -2165, -1542, -1474, -278, 4147, 1506, 1666, 1014, -1657, -2225, -2261, 8568, -1445, -523, -115, 999, 602, -2762, -2261, 271, -1797, -1633, -790, 391, 907, 1302, 1076, -1907, -2219, -2443, -1963, -1495, -1294, 4722, 935, 1691, 1370, -2355, -1585, -2510, -2297, 2690, -1491, -647, 360, 1460, 1479, -2041, 368, 10454, -1277, -716, -172, -538, -287, 169, -232, 960, -1087, -2459, -2196, -1189, -1967, -1586, -783, 5275, 2811, -1523, -1733, -2373, -1946, -1586, -1280, -442, -205, 2330, 6319, -2483, -2115, -2645, -2016, -1464, 89, 529, 1338, 5291, 3186, 5770, -2311, -2696, -2420, -619, -2322, 8434, -129, 1661, 1232, -1377, -1277, -1193, 406, -1332, -1246, -999, -497, 1024, 1500, -2791, -1417, -2173, 2419, -1492, -734, 2795, 559, 750, 519, -2714, -509, 4622, 3679, -294, 73, -805, 602, -99, 94, -2658, -1984, 6907, -1780, -1244, 272, 874, 140, 1326, 693, -2679, -2274, -2551, 13351, -2619, 4570, -1739, 2309, 1280, 1235, -1011, -2084, -1968, -1404, 2568, 3147, -336, 270, 499, 506, -1567, -2240, -2685, -1951, -2254, 2783, -1411, 8878, 2321, 1691, -2567, -2450, -2572, -2286, -2038, -1803, -1316, -315, 464, 1223, -1988, -927, -2035, 2165, 3663, -919, -328, 229, -2, 217, -2773, -2160, -2637, -2183, 5081, -1434, 1526, 2830, 1698, 1153, -2810, -1132, 5408, -1992, 4267, -1357, 809, 563, 9, -64, -2949, 7061, 4604, -1424, -1839, -610, -251, 370, 901, 147, -2264, 3135, 3241, -1102, -397, -1292, 39, 17, 380, 383, -1483, -1458, 820, 2135, -646, -479, 173, 23, -274, -442, -978, -1216, -1928, 7260, -1249, -956, -24, 250, 438, 128, 4080, 152, 2677, -587, -667, -672, -662, -492, -722, -688, -1907, -787, 3101, -1404, -1234, -508, 3817, 424, 657, -86, -2179, -599, 2141, -1446, -1847, 4341, -801, -26, -57, 216, -1625, -802, 1752, -1301, 2617, -1545, -513, -401, 234, 658, 1299, -1279, 874, -1408, -1135, -40, -423, 394, 660, 684, 3341, -937, -1842, -1177, 1945, -621, 19, -93, 141, -59, -2626, 3368, -1588, -1959, -1506, 3729, -347, 218, 497, 585, -2495, -2452, -2118, 578, -225, 378, 40, 1080, 908, 761, -2070, -1607, 2534, -1535, 1493, 2664, 215, 634, 317, -233, 4188, -1446, -2129, -1812, -1428, -1579, -1038, 97, 989, 2038, 3671, -2707, -2608, -2198, -1119, 1601, 1042, 1325, 1230, 1149, -2566, -1054, 3659, -2173, -1772, -713, -1080, -101, 987, 805, -1555, -749, -1510, 3443, -1402, 4172, -696, 437, 276, 219, -2735, -2453, -2082, 3898, -867, -582, -726, 1134, 1227, 1121, 2333, -963, -1474, 2386, -959, -327, -138, 4, 268, 479, -2889, -2896, -2701, -1975, -593, 1212, 1511, 1087, 1482, 1612, -1703, 4874, 46, -1364, -1342, -544, -879, -455, -488, -396, -2616, 849, -2424, -1976, -1491, -739, 325, 1284, 1831, 1223, -48, -1457, -2123, -1318, 1617, -1064, 2484, -467, 533, 707, 351, 422, -525, -657, 202, -476, 133, -679, -945, -832, 1906, -2981, -2605, -1911, -2541, 11553, -1585, 1555, 2196, 1616, -2669, -2345, -2423, -1848, -1756, 4918, -711, 1186, 1873, 1399, -672, -1401, -1524, -1138, -674, 1285, 195, 884, -377, -1067, -2125, 377, -1747, -1604, 837, -334, -115, -59, 160, 483, -2220, 12861, -1633, -1616, -926, -1203, -113, -90, 378, 148, 5740, 88, -2246, -1598, -1546, -1694, 2790, -72, 590, 28, -2608, 4312, -1068, 3091, -632, -651, 366, 63, 744, 375, 1746, 2753, -2075, -1621, -1033, -471, 972, 199, 575, 655, -2148, 2407, -2180, -1764, -1030, -1089, 4083, -80, 417, 384, 1196, -2284, -2549, -1771, -773, 213, 1188, 788, 1343, 1358, -2584, 7723, -2171, -2301, -1497, -438, 1001, 110, 671, 939, 6435, 5777, -1765, -1287, -1181, -1014, 87, -919, -422, -444, -1930, 4906, -1660, -1558, 3617, -1177, 261, 9, 261, -47, -2539, 2749, -2476, -2298, -1047, -1319, -341, -604, 2111, 2779, -2935, 5011, -1860, -2363, -1686, -1033, 800, 1774, 1700, 1478 }, .fcb22s_1 = { 11523, -796, -1488, -1897, -1888, -1691, -1767, -1794, -1622, -1210, -2284, -2777, -2382, -1371, -238, 2997, 3182, 588, 1129, 704, 248, 1703, -264, -1306, -1147, -560, -1513, -956, 1667, 1340, 5220, -2276, -2215, -2049, -1479, -1294, -774, 66, 1270, 2075, -1435, -1981, -2322, -1896, -1321, -462, 138, 5022, 2549, 1683, -100, -1744, -1528, -423, 6093, -61, -288, -623, -650, -828, -1521, 134, 1240, -1399, -1450, 612, -969, 2585, 945, -312, -1138, -2488, -2513, -1988, -1607, -773, 3384, 1192, 2651, 2580, -984, -2015, -1465, -1576, 2273, -1221, 91, 2615, 840, 1299, -1069, -2151, -1899, -735, 440, 888, -241, 502, 953, 3613, 1806, -1855, -2303, -1758, -1318, -1484, -10, 597, 3723, 1992, -488, -2063, -2284, -2172, -1905, -1547, -937, -18, 3276, 7184, -1942, -2302, -2399, -1972, -1378, -635, 302, 1081, 5454, 3358, -447, -807, 205, -1805, -1546, -446, 6364, -916, 151, -377, -582, -856, -204, -731, -884, -674, -257, -67, 1564, 2486, 1003, -1508, -1692, 1515, -889, -622, 2366, 9, -17, -245, 3733, -1057, -284, 3197, -31, -440, -1115, -1609, -1834, -1930, 230, 262, 7344, -39, -1746, -562, -1554, -1838, -1648, -1310, 2157, 80, -102, 238, -823, -622, -720, -115, -274, 16, -1562, -1785, -1535, -334, 2604, 3388, -410, -103, -348, -142, 1676, -441, -2267, -1988, -1421, -680, 1302, 2682, 383, -10, 1487, -1086, -251, -1134, 141, -84, -1003, -898, 95, 2304, 802, -1549, -1562, 2650, 2180, 64, -512, -832, -705, -429, 1826, -2283, -1976, -1277, 2699, 504, 249, -9, 178, -33, -1357, -1138, 3005, 293, 229, 1633, -197, -540, -1245, -1617, -1269, 6639, 2437, -647, -1501, -1097, -1051, -1150, -1183, -1461, 71, 1529, 2847, 1149, -705, -1345, -1605, -629, -617, -60, -2081, -1435, 938, 844, -1055, -841, 1179, 392, 1112, 946, -1252, -1728, -266, 7063, -1335, -920, -1048, 206, 48, -619, 4764, 274, 2394, -799, -798, -1003, -1278, -1800, -1626, -1415, -498, 1439, 1643, -1978, -1258, -1136, 1285, -9, 596, 141, -2211, 908, 802, -470, -1125, 3216, -234, -412, 3, -980, 15, -1047, 1530, 660, 1986, -480, -499, -550, -733, -531, 1326, -1607, 787, -1136, -1002, -65, 358, 743, 253, -294, 3498, -1033, -1270, -790, 537, 1788, 309, -72, -1241, -1999, 609, 2981, -1025, -1642, -958, 3845, -1221, -962, -965, -1612, -1993, -33, -1136, 1086, -46, 1178, -229, 139, 644, 718, -1696, 2411, 1019, -1056, 52, 224, -487, -395, -40, 125, 3001, -1955, -1950, -784, -1111, 897, -514, 159, 785, 1095, 2944, -2554, -2407, -1975, -632, 1030, 1712, 366, 463, 125, -2354, -796, 5663, -1055, -1151, -870, 348, -676, 1447, 215, -1005, -1531, -910, 2249, -438, 2889, 107, -404, -271, -534, -1022, -2117, -1738, 2261, -257, -788, 32, 1747, 1196, 910, 33, 1, -23, 28, -25, 19, 13, -29, -23, -48, -907, -2113, -1978, -1426, -535, 1589, 1908, 2724, 1646, -897, 758, 2326, 674, -1449, 111, 220, 475, -162, -1465, -2036, -528, 1308, -2087, -2031, -1308, 183, 35, 1097, 1008, 1864, -2116, -2303, -1928, -261, 2342, -292, 1480, 268, 1582, 1079, -1183, -1154, -777, 309, 1218, 683, 1314, 1677, -758, -1745, 1422, -1331, -1638, -1100, -303, 5003, -57, -379, -511, -756, -727, -2315, -1860, -1775, -676, 3854, -67, -52, 2018, 1532, -160, -197, -75, -1934, -1134, 2025, 1810, -491, 83, 646, 390, -297, -441, -342, -479, -486, -296, -30, 443, 1151, 3508, 6119, -493, -1427, -1393, -1273, -1280, -1687, -1683, -1511, 5109, -1008, -1137, -638, -649, -342, -590, -478, -577, -349, -579, 2548, -463, 2107, -568, -678, -788, -454, -608, -452, 1934, 1485, -1746, -1007, -1174, -573, 239, -119, 679, -76, -1687, 1956, -898, -477, 456, -156, 1460, 13, 92, -987, 554, -2772, -2578, -1694, -235, 753, 1527, 1106, 1539, 1342, -1305, 6560, -1526, -1765, -793, -600, 248, -542, -63, -421, 4828, 1288, -1580, -1826, -1163, -1014, -221, -818, -109, -61, 1265, 1939, -1265, -414, 1912, -190, -1157, -675, -756, -935, 2529, 136, -1709, -1727, -1819, -1504, -1232, -959, 1128, 4142, -1945, 2958, -900, -1432, -1720, -1380, 381, 1473, 1235, 1062 }, .fcb22l_2 = { 2441, -2086, -2129, -2146, -1839, -1035, 295, 2465, -2785, -2597, -81, -2162, -991, 3060, 3056, 2985, -2415, 4009, 3058, -1165, -1281, -322, 629, 2232, 481, -2255, 1165, -1455, -621, -29, 923, 1371, -2822, -2421, -2596, -1908, 6338, 279, 1845, 3532, -2955, -2571, -2554, 744, -1785, -909, 775, 3156, 738, 1760, -458, -590, -73, 22, -91, 326, -1098, -1511, -1000, 1741, -1024, -562, -399, 736, 9669, -2109, -1872, -1539, -1208, -265, 994, 2364, -2121, -98, 1523, -1427, -1450, -1157, -294, 1375, -3007, -2669, -2847, -1777, -1196, -1257, 1065, 9128, -2948, -2509, 470, -2521, -1947, -728, 503, 3810, -2538, -2469, -2217, 1957, 2580, -229, 1212, 2263, -3174, -2660, -2792, -2692, 1226, -512, 555, 3960, -2979, -2426, 1978, -2182, -1868, -455, 4681, 4580, -2514, -1642, -1029, -1712, 416, -838, -362, 1208, 5211, -2128, -1867, -1337, -549, -70, 828, 1508, -2272, -1611, 7307, -1612, -1244, -461, 749, 2510, 3669, 3236, -1845, -1333, -866, 268, 850, 1686, -2805, 1079, -2258, -2075, -2017, -1115, 214, 2735, -2719, 2676, -2154, -1976, 2884, 393, 1247, 2382, -3043, 2188, -2703, -2353, -1861, -208, 4419, 4511, -2187, -1630, 2246, 2331, -1105, -198, 818, 1721, -2180, 3571, -1841, -1738, -1020, 14, 407, 1028, -2536, -2171, -2115, 6630, -968, -306, 1438, 3574, -2411, -1857, 1911, -1546, 2709, 57, 910, 1727, -3159, -2565, -2675, -2746, -2017, -534, 8461, 6103, -2299, 2912, -1851, 2660, -1479, -97, 1148, 2204, -2510, 8781, -2194, -1790, -1114, -110, 1140, 2885, 3261, -1921, -1633, 2766, -788, -403, 610, 1651, -2515, -2021, 2415, -1606, -1149, 2479, 297, 1693, 3823, -1538, 2514, -1261, -904, -236, 550, 1581, -2903, -2440, -2922, -2749, -2480, -1849, -423, 3613, 3420, -1876, -1929, -1537, 2955, 58, 1014, 1950, -3295, -3009, -3161, -2926, -2353, 2355, 351, 5502, -3140, -2745, -2781, 1247, -1037, 538, 4939, 4382, 3584, -2284, -2321, -1844, -743, 3156, 1546, 2358, -562, -101, -497, -1196, -1023, 1972, 1255, 1374, -3146, -2824, -3057, -2757, -1736, 3746, 5609, 5118, -2155, -1665, -1701, -1780, -1975, -1127, 3185, 2036, -2540, 324, -481, 311, 624, 719, 543, 1030, 550, 513, -2430, -1817, -1129, 62, 1526, 1809, 2172, -1314, -1035, -586, -292, 233, 209, 543, -252, -2372, -1961, -1629, -1306, -408, 451, 1339, 792, -2619, -2316, -1624, 1941, 678, 977, 1710, 428, -2499, -2369, -2101, -1448, 2988, 874, 2497, 451, -2263, -2204, 1403, -631, 694, 1424, 1658, -243, -2104, -378, 355, 1446, 373, 377, 973, -756, -2802, -2508, -2081, 177, 352, 2428, 2359, -1533, -2710, -2544, -1102, 419, 3132, 1222, 1942, -2756, 1844, -2429, -1854, -1283, 3960, 1633, 2917, -2858, -2784, -2106, -1025, 1588, 905, 1092, 1657, -3028, -2715, -2782, -2218, 2852, 4006, 2534, 3726, -2783, -2355, -2146, 2113, -1201, 3361, 1178, 2670, -3199, -2796, -2682, -2489, 1905, -471, 5097, 4436, -2197, -1078, -2327, 420, -637, 10, 1647, 1362, 1815, -2519, -2363, -2174, -1454, -31, 4125, 3446, -3054, -2953, -2738, -2328, -1636, 1086, 2238, 2132, -3089, -432, -2674, -2515, -168, 745, 2236, 2305, -3214, -2953, -3159, -3086, -2748, -1200, 3346, 5127, -1150, -501, -2109, -1662, 2301, -401, 651, 1320, -3072, -2608, -2833, -2249, -1387, 7704, 1811, 4960, -2474, -2589, 83, -499, -785, 194, 1312, 1442, -2716, -1663, -2088, -1812, -1396, 1862, -369, 1397 }, .fcb22m_2 = { 8809, -2291, -2452, -1982, -1356, -423, 419, 588, 897, 1086, 79, -2155, -1957, 367, 1080, 233, 718, 441, 515, 642, 730, 2454, 774, -2299, -1526, -784, -359, 96, 385, 482, 4905, -2501, -2431, -2047, -1139, 131, 743, 999, 1243, 1294, -2154, -433, -2461, -2201, -1552, -163, -200, 4009, 1731, 1652, -2381, 5295, -1457, -895, 3480, -1230, -94, 471, 554, 669, -2458, -1271, 278, -2238, -1852, -813, 888, 1032, 801, 1008, -258, -538, -1744, -2087, -1651, -1239, 2222, -4, 783, 882, 478, 782, -1335, -1453, 1728, -627, -387, -205, 221, 193, -2282, -518, 7464, -1808, -1134, -199, 340, 321, 410, 617, 2278, -436, -2082, -1958, -1493, -885, 628, 794, 855, 989, 232, -1115, -2617, -2152, -1290, -1299, -458, 222, 3936, 3349, -2240, -2787, -2689, -2255, -1241, 816, 2307, 1566, 1685, 1723, 2960, -2134, -2532, -1798, -1128, -1073, 5380, 1013, 1525, 1415, -1976, 456, -538, -1433, -1347, 22, -496, 284, 387, 465, -2214, -1863, -2261, 1049, -1487, -1222, 1610, 621, 1000, 1116, -2393, -731, 4075, 2375, -1178, -908, -383, 327, 543, 572, -2071, -2039, 3310, -1903, -1502, -72, 123, 693, 721, 918, -1866, -1251, -1065, 5630, -1574, -541, 1, 1014, 813, 887, -2145, -2421, -2176, -1756, 1856, 408, -1, 759, 1109, 1276, 3053, -2705, -2467, -2068, -1160, 1405, 459, 1167, 1219, 1318, -2198, -2037, -2005, -2204, -2039, -1473, -1529, 264, 1333, 1822, -2121, -1434, -472, 1901, 2448, -589, -424, 248, 376, 602, -1571, -1032, -1243, -1619, 5682, -1162, 362, 570, 865, 852, -1875, -805, 4258, -1569, 2992, -1175, 51, 164, 314, 648, -2083, 5574, 2553, -1866, -1156, -642, -198, 330, 446, 602, -2365, 1601, 2873, -2043, -1510, -1142, -20, 588, 535, 676, -2207, -1637, 626, 745, -1548, -590, 745, 540, 505, 618, 749, -1389, 857, 1387, -398, -606, -75, -86, 11, 78, 3322, -1347, 1978, -1431, -745, -280, -42, 135, 350, 376, -508, -1349, 2961, -1184, -647, -1257, 3009, -374, 523, 616, -1848, -41, 2652, -1609, -1603, 3284, -24, 502, 122, 448, -2337, -1029, 734, -1533, 1523, -1312, -754, 335, 510, 774, 769, -117, 139, -1254, -1468, -965, -375, 2, 227, 518, 3187, -1524, -776, -1253, 2977, -530, 319, -61, 244, 413, -2290, 3085, -1763, -1480, -1374, 3272, -87, 323, 421, 652, -2317, -2182, -1604, -1, -801, 1320, -156, 907, 799, 918, -1494, -2205, 1137, 69, 1249, 3437, 925, 29, 419, 448, 3574, -1564, -1713, 2374, -941, -252, 123, 263, 366, 539, 1059, -1856, -1753, 766, -1704, 106, 262, 596, 684, 820, -2503, -1878, 1835, -594, -1024, -2105, -1567, 488, 794, 883, -1626, -613, -1410, 2846, -1413, 3557, -348, 460, 332, 577, -907, 700, -1680, 1130, -1637, -793, -160, -38, 473, 630, 1487, 1872, -1526, 1379, -806, 121, -383, 149, 259, 413, 759, -2817, -2758, -2290, -1348, 460, 1782, 1536, 1513, 1503, -2265, 3193, 117, -1704, -1367, -487, 125, 365, 594, 651, -2287, 1272, -2537, -2038, -1515, -578, 2994, 582, 941, 1058, -1556, -1583, -720, -1584, 956, -1032, 1861, 146, 402, 429, -2184, 1667, 1241, 289, 52, -232, -265, 210, 248, 331, 1133, -1813, -1869, -1429, -1484, 5620, -400, 1316, 1146, 1150, -1975, -818, -1921, -2054, -1768, 2953, -544, 426, 856, 1107, 493, -2019, 176, -1915, -1040, 717, -91, 728, 647, 776, -2360, 739, -2136, 30, 636, -447, -116, 498, 531, 775, -2250, 8607, -2075, -1928, -1072, -450, 38, 439, 558, 778, 4484, 1056, -1830, -1716, -988, -412, 260, 56, 425, 579, -2243, 4094, -1267, 2172, -990, -562, 97, 304, 533, 609, 790, 780, -2029, -1947, -1327, 1224, 255, 344, 516, 660, -591, 1702, -118, -1402, 396, -1387, 2268, -247, 177, 355, 1393, -2318, -1975, -1563, 863, -939, -365, 411, 800, 1019, -2370, 4656, -2301, -2111, -1679, -698, 458, 788, 1004, 1138, 2285, 4924, -1940, -1955, -1159, -436, 237, 5, 300, 364, -2492, 2165, -2021, -2072, 1504, -612, -93, 249, 676, 799, -2411, 1952, -1752, -2418, -2285, -1323, -621, 837, 1043, 1266, 76, 3160, -2176, -2176, -1717, -1105, 1045, 410, 728, 940 }, .fcb22s_2 = { 6946, -1850, -1986, -1590, -1276, -1063, -1026, -1017, -805, -346, 9, -2911, -2843, -1899, -198, 2193, 3325, 1315, 37, -528, -371, 599, -751, -2157, -1912, -855, 988, 1222, 1085, 953, 3212, -2793, -2564, -1707, -657, 683, 1109, 683, 647, 446, -1906, -2315, -2569, -2428, -1698, -600, 1100, 3790, 3368, 2172, 2017, -895, -1354, -734, 2552, -403, -68, -402, -752, -932, -1205, -1937, 572, -1434, -500, -579, 291, 1723, 1312, 1695, -238, -1715, -2029, -1525, -816, -363, 2816, 167, 2196, 1793, 897, -1081, -262, -1338, 1052, -1231, -94, 1296, 503, 184, 588, -2057, -911, -1933, -1769, 167, 1013, 1774, 1414, 1289, 2406, -1906, -2055, -1952, -1726, -1618, -451, 575, 3021, 2569, -776, -1649, -2111, -1930, -1499, -1349, -595, 329, 3090, 5458, -1954, -1309, -1554, -1159, -1132, 329, 714, 760, 2529, 2417, 1046, -1025, -1114, -1325, -154, -1501, 4160, -696, 230, 398, -2010, 385, -1344, 36, -1269, -987, 1009, 1453, 1163, 1591, 916, -1534, -508, 221, -1596, -1130, 1394, 539, 676, 676, 1263, 2029, 284, 1592, 161, -124, -572, -1362, -1946, -2148, -1488, -222, 4967, -1202, -939, -375, -80, -593, -445, -418, -781, -1560, 31, 4757, -1417, -954, -402, 193, -316, -278, -926, -895, -1024, -436, 2673, 1991, 254, 28, -861, -1291, 1475, -2708, -2689, -2118, -703, 290, 1841, 2048, 1213, 594, 132, -2598, -2427, -988, -1111, -158, 478, 2118, 2571, 830, -1430, -678, -773, 1340, 2473, -798, -751, 215, 274, -65, 335, -1947, -1796, -1436, 3862, -611, 105, -31, 775, 669, 1439, -1266, 1670, -739, -1259, -572, -17, -107, 176, 130, 3899, 3478, -548, -1429, -1176, -1104, -1147, -1503, -1277, -1068, -258, 2645, 1753, -333, -827, -1306, -827, -502, -306, -119, -1602, -1644, 1922, 1127, -628, -1073, 348, 195, 616, 685, 1750, -898, -1852, 1813, -700, 254, 598, -234, -433, -1035, 2502, 94, 467, -1672, -905, 776, 679, -11, -1071, -1845, -1083, -320, 690, 110, -708, -1077, 2514, 70, -412, -300, -371, -717, 1700, -1625, -1346, 1954, 14, -64, -121, 181, -673, -909, 2274, -1389, 2058, -1503, 306, -187, -209, -69, 1523, -632, -695, -1283, -988, -569, -798, -521, 398, 2834, 1953, -2215, -1626, 106, 6, -498, -57, 173, 731, 1002, -1706, 1701, -328, -1745, -1398, 2176, -19, 311, 492, 667, -1073, -1803, -1684, 703, -1316, 1803, 659, 913, 906, 1033, -1982, -102, 945, -1620, 718, 555, 613, 38, 394, 421, 2738, -1159, -2248, -1852, -1568, 33, 363, 1490, 935, 561, 1464, -2466, -1209, -1204, -692, 2009, 129, 354, 372, 380, -2053, 1122, 2272, -824, -1355, -926, -122, 567, 526, 923, -1320, 59, -226, 1674, -1512, 1498, -631, 221, 26, -247, -40, -1615, -1597, 2111, 34, -813, 200, 219, 758, 1000, 306, 394, -430, -117, -409, -81, -207, 16, 36, 176, -1737, -2898, -3005, -2214, -568, 2140, 4132, 2592, 504, -521, -1509, 3610, 1070, -1890, -1319, -11, 174, -148, -212, -347, -464, -1068, -2568, -2532, -1973, -519, 2104, 3713, 1882, -145, -1319, -2375, -1862, -843, 2061, -266, 1465, 866, 912, 1183, -1784, 2072, 205, -375, 1112, -374, -534, -430, -162, -204, 375, 82, -823, -1148, -752, 4681, -339, -247, -790, -1088, -494, -2302, -2310, -1603, 46, 3367, -50, 393, 1383, 1457, -1377, -2005, 643, 326, 312, 1189, -225, 563, 261, -70, -667, -1191, -2255, -470, 1000, 142, -525, 2285, 756, 2061, -953, 5888, -1339, -1534, -1252, -16, -116, -305, -375, -596, 3611, -889, -511, 43, -809, -659, -737, -510, -258, -108, -1515, 2806, -1555, 1025, -932, -601, 146, 164, 207, 71, 1606, 93, -2420, -2311, -1641, -244, 1785, 804, 1040, 427, -1510, 38, -2490, -1987, 44, 699, 1407, 988, 1061, 411, 1162, -1382, -2669, -1635, -905, 1503, 674, 1357, 869, 244, 411, 2612, -1792, -2147, -1693, 1434, 281, 38, 228, 424, 2291, 1354, -2128, -1377, -1014, -609, 131, -151, 418, 602, 111, 2200, -1547, -1153, 1435, -1282, 6, -111, -1, 92, 238, 613, -2271, -1181, -1455, -919, -182, 1066, 1932, 1679, -1715, 2825, -1764, -1759, -741, -829, 501, 746, 1056, 1416 }, .fcb44l = { 4868, -1851, -2031, -2019, -1751, -552, 756, 929, 1389, 1590, -2090, -1202, -1317, 516, -1798, -1020, -694, 4322, 1388, 1904, -2605, -1239, 1005, -757, -1248, -358, 699, -201, 409, 1093, -2901, -2254, -2605, -2595, -2104, -1681, 6854, 2692, 3155, 3446, -2535, -1421, -1745, 898, -2046, -1457, -1044, -269, 1748, 1873, -2268, -1098, 407, -1865, -2103, 1510, -1217, -399, 1718, 2017, 3638, -1685, -1547, -1480, 1637, -744, 580, 586, 1313, 1409, 617, -2020, -1919, -2179, 932, -937, 559, 1795, 1528, 1596, -2867, -2553, -2507, -2653, -2365, -1985, -170, 8679, 4271, 4273, 2263, -1835, -1934, -1719, -1778, 2357, 125, 1319, 1543, 1765, 3689, -1215, 2369, -1533, -1611, -771, -123, 1005, 1297, 1465, -2491, 2631, -1636, -1655, 1244, -1178, 386, 961, 1300, 1553, -2357, -2404, -2305, -2177, -1714, -383, -98, 258, 3902, 2475, -2923, -2580, -2685, -2803, -2678, -2428, -1247, 450, 8174, 5035, -2302, -1629, -1495, 1832, 1616, -577, 639, 872, 1122, 1437, 785, -1947, -1976, 823, -1909, -1005, 430, 1244, 1713, 1664, -2537, 8025, -1705, -2005, -2030, -1155, 64, 1106, 1975, 2277, -2410, -2639, -2292, -1858, 162, 744, 555, 1559, 1719, 1806, -2282, -1982, -1914, 1415, -1785, 2197, 254, 763, 1338, 1741, -2509, -1991, -2328, -1853, -2299, 5145, -34, 1495, 2913, 3018, -2009, -1736, 2411, -1595, 1877, -1316, 693, 1042, 1565, 1744, -2657, -2161, -2222, -2135, 4454, -1784, 1331, 3208, 2852, 2955, 3738, -1338, -1425, 2090, -1601, -279, -2, 712, 1220, 1436, -2385, -1265, 7093, -1561, -1742, -1003, 283, 1009, 1843, 2055, -2251, -2175, 2310, -1321, -1976, 1874, 164, 2781, 2721, 2487, 2519, -1101, -1539, -1575, -1487, -724, -25, 355, 643, 1011, -2296, -1799, -1895, -1700, 2743, -924, -254, 32, 1504, 1910, -2811, 898, -2363, -2518, -2408, -1737, -936, 221, 2588, 2527, -2535, -2360, -2477, -1861, -1882, 1833, 3587, 1307, 2141, 2274, -433, -1994, -1692, -1318, -1398, -350, 1518, 1923, 835, 1262, -2246, 3383, 2458, -1464, -1874, -983, -157, 531, 1490, 1729, 9543, -1713, -2011, -2015, -1870, -969, -34, 1160, 1724, 1919, -2530, 140, -1923, -1730, -1720, -605, 629, 1577, 974, 1373, -2268, -1582, -933, 1124, -1624, -514, 4156, -118, 1515, 1907, -2267, -574, -1311, -954, -47, -1259, 15, 364, 854, 1009, -2221, 629, 994, -1646, -1324, -1509, 2359, 3453, 1393, 1912, 3586, -2286, -2537, -2560, -2415, -1748, -368, 3093, 2881, 2611, -2556, 2792, -1558, 1117, -1681, -65, -36, 516, 1233, 1514, 531, -1814, 998, -1795, -1693, -871, 725, 868, 1504, 1465, 907, 300, -2060, -2366, -2392, -1881, -596, 1754, 2169, 2104, -2755, 2709, -2298, -2627, -2423, -1875, -733, 3886, 2648, 2821, 623, -541, -163, -319, 85, 84, 15, 716, 511, 572, 3948, 2773, -1504, -1746, -1832, -934, -78, 988, 1277, 1518, -2678, 2216, -2162, -2331, -2076, -968, 3445, 1070, 2077, 2206, -2892, -2425, -2674, -2905, -2844, -2584, -1381, 3269, 2696, 3281, -2090, -369, -1515, -1367, -200, 2089, 739, 700, 866, 1169, -2276, -1057, 2851, 2589, -1686, -515, -65, 579, 1278, 1593, -2837, -2458, -2565, -2783, -2843, -2468, -1704, -1531, 1475, 4153, -2209, -1857, -1873, -2177, 758, -1531, 3207, 1163, 1506, 1851, -2383, -1683, -1839, 5772, -1815, -465, 361, 1086, 1912, 2140, -2629, -1688, 1608, -2190, -2419, -2064, -1253, 1397, 2099, 2306, 513, 1664, -1683, -1629, -1682, -109, 269, 695, 1072, 1317, 208, -1602, -1918, -1038, -813, 312, 24, 26, 761, 990, -2288, -2225, -1948, -1932, -1832, -949, -450, 920, 805, 1468, -2897, -2633, -2557, 464, -2174, -1157, 1170, 2230, 2550, 2522, -2643, -1928, -2255, -2578, 82, -2206, -63, 2663, 2007, 2292, 226, -2541, -2687, -2753, -2229, -1556, 785, 3837, 2331, 2492, -2496, -1740, -2465, -2295, -2151, 1142, 363, 3967, 1943, 2432, -2619, -2400, 520, -2274, -1900, -1486, 2135, 1407, 2300, 2288, -2811, -3066, -3128, -3098, -2529, -1475, 2172, 3413, 3613, 3571, 564, -2347, -2257, -2377, -1944, -1771, -582, 509, 1683, 1975, -285, -2136, -2529, -2464, -2117, 278, 1094, 1042, 2192, 1976, 1781, -1874, -2042, -2103, -1744, -1044, 3373, 1252, 1861, 1873, -2688, -1849, -2462, -2494, -2105, -1903, 2221, 250, 1653, 2233 }, .fcb44m = { 13151, -1763, -2583, -2518, -2181, -1036, -537, -112, 214, 590, -608, -2270, -2228, -1301, -1018, 3687, -471, -282, 909, 1665, -2426, 1713, -808, -1240, -1366, -976, -140, 1730, 683, 191, 7253, -2076, -2733, -2698, -2253, -1116, 376, 687, 1314, 1532, -820, -1471, -2092, -2047, -1796, -1347, -732, 6348, 2529, 1441, -1460, -1845, -1046, -1643, 8086, -928, 90, 660, 428, -188, 604, -2022, 556, -1680, -1641, -902, 834, 941, 1480, 1906, -2439, -2573, -3091, -2853, -2419, -1606, 2312, 2624, 2659, 2659, -1286, -2273, -2400, -1826, 2443, -1391, 685, 1822, 1810, 1625, -1993, -731, 9737, -1476, -1183, -1244, 34, -85, 0, 201, 4171, -2430, -2869, -2866, -2488, -1154, -1253, 282, 2715, 3643, -2130, -2522, -3259, -3051, -2977, -2204, -1264, 1103, 7113, 7948, -1271, -1694, -2011, -1294, -1607, 247, -303, 715, 4276, 1908, -2337, -2111, -2232, -2123, -1648, -1302, 7686, 1213, 982, 984, -2594, -2127, -1981, -2104, -2405, -1966, -936, -95, 326, 672, -2263, -893, -1367, 1288, -1321, -1351, 2503, 747, 390, -247, -2220, -860, 3641, 3766, -1724, -1487, -531, 239, 134, -82, -2563, -1537, 3883, -1911, -2109, -1713, 1056, 726, 977, 1091, -1874, -1366, -1628, 11069, -1653, -696, 118, -78, 337, 29, 2449, -1438, -601, -1533, -816, 1262, 540, 79, -460, -1403, 3204, -1918, -1892, -1911, -1468, -976, -42, 2785, 1088, 564, -236, -2267, -2324, -2130, -1880, -427, -258, -543, 903, 5142, -1791, -1611, -1073, 2911, 2993, -1295, -400, 229, 192, -85, 4461, -1711, -1431, -1640, 3525, -1398, -101, -219, 327, 415, -669, -1520, 6595, -1291, 5123, 155, -480, -518, -552, -890, -2609, 7074, 3220, -1054, -1852, -1165, -25, 89, -361, -140, -1610, 2214, 2903, -1737, -1704, -1178, -708, -171, 177, 674, -1075, -890, 82, 463, -1432, -1048, -703, -759, -247, 344, 2448, -656, -1135, 4366, -583, -705, 40, -314, -676, -1271, 4389, -952, 3249, -1606, -1524, -1172, -490, 97, 128, -91, -66, -1293, 1696, -1114, -1455, -519, 2620, 479, -257, -1512, -2037, -1281, 1752, -1285, -1812, 2789, -52, 676, 409, 296, -1977, -1043, 270, -1615, 2131, -1051, -161, -498, 767, 1673, 1044, 27, -1107, -1730, -1856, -1264, -275, -167, 893, 443, 3850, 97, -1244, -1691, -1566, -1088, -1062, -837, -159, 1830, -1424, 2494, -1878, -1532, -1991, 2919, 62, 399, 524, 381, -1340, -2415, -2028, 218, -1342, 410, 815, 533, 948, 1998, -1213, -1847, 3691, -2123, 1822, -1548, 537, 987, 356, 123, 3876, -2476, -2021, -2195, -1562, -737, 2250, 709, 797, 1102, 2065, -2258, -2394, -1816, -1536, 1059, 4653, 1457, 456, -27, -2226, -736, 765, -1879, -2188, -1793, -928, 892, 1793, 2257, -1182, -1646, -1789, 6105, -1936, 4316, -307, -143, 223, 236, -2213, -1862, -1823, 3326, -1810, -1384, -453, 1007, 1331, 1405, 4135, -2298, -1657, 1981, -1702, -853, -318, 298, 760, 1025, -2537, -2782, -2985, -2687, -2839, 4493, -448, 4249, 3048, 2678, 1045, 3227, -690, -1390, -976, -652, 587, 194, -749, -1358, -730, 250, -2404, -2548, -2157, -1027, 32, 2091, 1059, 1360, 262, -2135, -2061, -1777, -1614, -246, 2004, 2605, 1516, -948, -1060, -1076, -1643, -748, 144, 1595, 1730, 531, -1086, -2182, -483, -2191, -2411, -1983, -2345, 10051, -841, 1456, 924, 207, 4652, -1831, -2026, -1710, -2235, 4036, -755, -70, 533, 887, -1899, -2326, -2129, -2115, -1606, 1443, 2557, 941, 618, 527, 949, -1547, -2067, -1785, 455, -60, 79, 202, 912, 954, -2527, 14551, -1893, -2315, -2609, -1844, 497, 287, -197, 626, 6839, -804, -1299, -1259, -1109, -97, 976, 144, -343, -1375, -2334, 3740, -1049, 2980, -1739, -474, 223, 137, 155, -171, 2962, 1814, -2378, -2643, -2249, -1109, 858, 643, 1630, 1399, -2098, 974, -1718, -2193, -2146, -1488, 3353, -147, 1187, 1266, 1559, -2532, -2941, -2759, -2101, -1098, 1562, 1049, 2045, 2159, -2298, 7439, -2129, -2361, -2318, -1552, -422, 482, 985, 1111, 6050, 5657, -1698, -2267, -2127, -1135, -140, -286, -352, -124, -1230, 3492, -1370, -1221, 2958, -1239, -472, -722, -169, -89, -2310, 2988, -2367, -2421, -2589, -2034, -662, 421, 1863, 2736, -2612, 5429, -2104, -2257, -2440, -1817, 4819, 883, 622, 636 }, .fcb44s = { 11239, -328, -2011, -1713, -1662, -1290, -1225, -1520, -1541, -912, 400, -1103, -2698, -162, 263, -964, 668, 405, 732, 2493, -2491, 1000, -2910, -793, -1351, -515, 1051, 2002, 1757, 2150, -2010, -2021, -2254, -1896, -1953, 664, 7067, 2632, 531, -1367, -2228, 2113, -2019, 2309, -1458, -426, 1242, 338, 205, -222, -1317, -1806, -2477, -2427, -2477, -1852, -1472, -911, 2261, 10280, -2369, 382, 3180, -1210, -1601, -748, -732, 504, 1440, 1142, -13, 610, -2457, -739, -1318, -1013, -52, -470, 627, 4734, 1248, 2947, -631, 1560, 2096, -833, -1173, -1475, -2060, -2189, 967, -1451, -1544, -758, -538, -31, 1395, 3550, -3, -1999, -1975, -1734, -2680, -2512, -2037, -1306, -252, 1288, 6012, 4834, -1087, 3259, 3115, -1369, -1136, -948, -264, -582, -677, -643, -2500, 1284, -317, -1872, -1150, -1150, 310, 832, 1597, 2842, 6295, 3806, -671, -1536, -1460, -1256, -1223, -1504, -1672, -1471, 1358, 1004, -1893, 1114, -1643, -103, -513, 189, 303, 140, -1618, -648, -720, 7274, 573, -180, -731, -1226, -1564, -1742, 151, 2103, -1562, -974, 94, 546, 3536, -205, -1657, -2534, -2187, 2840, -1248, 451, 2615, 171, 479, -305, -1299, -1708, -2144, -1593, -1289, 2766, 2287, -400, 188, -51, 141, 105, -2128, 4976, -1690, -1216, -1175, 297, 1454, 449, -478, -970, -1914, -1459, 3036, 2668, -950, -634, -507, -374, 4, 34, -1664, 2901, 847, 2817, -1154, -1651, -1262, -1160, -624, 629, 1578, 765, -2002, -2121, -1527, 1938, -272, 113, 287, 955, -1473, 60, 8047, 137, -534, -841, -1077, -1504, -1788, -1758, -1871, 119, 931, 1775, -704, 2792, 354, -501, -1370, -2038, -1031, -1631, -1914, -879, 377, 7589, 173, -196, -491, -1658, 3790, -773, -1731, 3028, 49, -1013, -563, -1232, -953, -730, -2568, -1926, -679, -267, -324, -962, 51, 461, 2728, 3631, 3533, -1690, -2846, -2370, -1945, -917, -551, 276, 2634, 3558, -2592, -1750, -2422, -1586, -1204, -1001, 4603, 1802, 2673, 1685, 2710, -853, -2321, -1919, -1603, -868, 3706, 290, 570, 338, -2245, -1704, -1915, 545, -787, 1635, 1725, 526, 666, 1604, 642, -1154, 3231, -1232, -1772, -623, 217, 27, 3, 641, -2411, 1924, -967, -1583, -1499, 2316, 1354, -115, 333, 559, -1721, 2475, -1942, -2114, -1196, -571, 1769, 2350, 1315, -607, 4510, -1414, -2228, -1312, 1439, 469, -248, -399, -270, -721, -1517, -1247, -771, -36, 6488, 942, -279, -572, -1041, -1908, -2388, -2281, -2595, -2275, -1529, 51, 471, 4435, 3002, 2738, 4049, 1562, -2706, -1672, -1649, -1204, -518, -280, 774, 1344, 6, -1950, -1521, -1768, -972, 1420, 3011, -191, 644, 1478, 3220, -313, 3030, -153, -841, -739, -378, -1013, -1410, -1815, -2104, -1033, -2097, -1992, -943, 2391, 424, 369, 1601, 3331, 1494, -2060, -2027, 1579, -1407, 1120, -280, -197, 761, 1048, -710, 4094, -1533, -1984, -1620, -1132, -515, -485, 971, 2644, 3979, -661, -1891, -1120, -897, 2484, 1623, 21, -1534, -2438, 3201, -1510, 858, -1459, -711, -1332, -833, -240, 763, 1096, -1435, -29, 3174, -1773, -19, 708, 1680, 403, -910, -2224, -2670, -619, 1320, -751, -1323, -1022, 2875, 1080, 985, 1, 191, 7823, -475, -604, -1126, -967, -1139, -1600, -1767, -1161, -1342, -1960, -2112, -1793, -1596, 3103, 535, 2001, 3235, 151, -2266, -807, -1977, -1661, -1255, 2328, 2632, 3189, 621, -1130, -2183, -1127, 2391, -884, 2173, -690, -354, -516, 352, 954, 1847, -74, -1260, -1839, 2557, -1221, 228, -630, -162, 386, 1462, -1889, -2596, -2216, -1869, -518, 1281, 2329, 2653, 1117, -1535, -1038, -1752, -1862, -1635, -1067, 994, 5212, 2719, -264, -2021, 1824, -2110, -619, 1538, -397, -332, -153, 860, 1281, 6568, -1790, -2459, -1707, -1708, -799, 294, 89, 475, 992, -1668, -1819, -2010, -1623, 2079, 3255, -388, 591, 1477, 581, -1544, 476, -1825, -959, -1296, -1037, -453, 1146, 4693, 839, 2027, 3021, -1731, -1746, -1964, -1115, 1197, 102, 164, -162, -2301, -1281, -2022, 3983, -1122, -281, 85, 352, 1042, 1599, 6463, -93, -2010, -1988, -2282, -2189, -1915, -1721, 17, 4694, 424, -998, -111, -1995, -1246, -1176, 78, -116, 1951, 3059, -1974, -1783, -2243, -1238, 3935, -928, -15, 1265, 1536, 1907 }, .shape08 = { 5279, 1101, 12974, 5624, 2029, 3853, 5918, 1516, -2905, -224, -92, -819, 803, 1091, 3091, -3355, 152, -1214, -7317, -738, -8973, 546, 12035, -937, 2216, 2113, 1214, -6577, 2006, -1661, -673, -5880, 496, 454, 3400, 676, -322, 11388, 634, -1169, 12556, -5804, -7724, 588, -6801, 1080, 354, -1681, -942, 1926, -487, -580, 156, 79, 15253, 667, 1155, 655, -719, 1999, -785, 214, 2822, 1020, -1967, 73, -387, -137, -15225, -1552, -357, 2830, 2140, 3070, -2552, 2410, 1230, 4131, 999, 248, 531, -909, 3948, 12858, -8056, 2205, -2837, -171, -1633, -129, -93, 1852, -1920, 157, 9647, -84, -150, -1365, -1522, -13197, 6168, -3195, 5890, -1724, -6407, -1340, -7435, -621, -5732, -2895, 145, 3974, 728, 9840, -494, 7357, -394, -13614, -256, -1930, 468, -266, 8001, -153, -365, 7652, 135, 1400, -3869, 1091, -4935, -2884, 1259, 6819, 1025, -6667, 1079, -9794, 6827, -4166, 1108, 1149, 18861, 593, -177, -1067, -644, -2164, 4727, 85, -101, -10805, -247, 8918, 2261, 5475, 756, 3018, -6535, 1941, 359, -4229, 1206, 958, -878, 554, -18780, 2289, 4906, -7412, -7685, 7932, 965, 2460, 4423, -563, -3668, -3482, 3307, -1737, 971, -7480, 10742, 1978, 2365, 20, -3625, 466, 2056, -6602, 9396, 3145, 3162, 1857, -630, -6905, 1660, -3024, -2159, 1109, 1282, 2767, 210, -2203, 3099, -7889, 1805, -13115, 988, -6235, 1566, -1399, -9612, 1821, -519, -57, 3428, -14024, 1141, -2542, -9396, -17, 440, -8591, 2271, -7811, 1891, -935, -4330, -1303, 362, 426, 319, 1176, 3176, 2202, -14308, -619, -2942, -2271, -531, -652, 345, 17681, 1453, -1561, 341, -2077, 933, 433, 1529, 463, -1095, 4912, -840, 16266, 973, 1732, -718, 6702, -3659, 4037, -704, -2707, 1423, 1291, 2300, 149, -933, -1338, 2019, 6173, 481, 14937, -364, 3896, -443, 992, -896, 378, -226, -1505, 268, -428, -2622, -289, -2069, 10472, -3880, -5330, 385, 3053, -4642, 1525, -1557, 716, 2504, 848, -450, -2018, -458, -705, -7120, -543, -2138, 2548, -351, 737, 12906, -1012, 63, 15357, 332, -837, -225, -1299, 2843, 1334, -669, 2083, -707, 1171, 8219, 2190, 10567, 1370, -1376, -2919, 2108, 10098, -388, 4442, 164, 490, 7580, 26, -1848, -2919, 640, 4758, -108, 8194, -1325, -2314, 447, 5178, -1095, 9902, -693, -3624, -223, 690, 10495, 776, -919, -1621, 2046, 469, 1454, 3681, -1090, -1776, 1457, 212, 2054, -994, 698, -496, 22347, -623, 254, 960, -4073, 531, -2572, -14393, -1022, 258, -3667, 994, 15242, 5078, -3618, 1925, -1229, -1754, 1715, 4358, 1286, -2360, -4590, 1824, 7864, 1423, -2146, -2763, -10635, 474, -829, 1159, -157, -54, -158, -29, 202, -383, 285, -2, 862, -364, 415, -123, -145, -9733, 1167, 10199, -1408, -2992, 2131, -412, 4743, 2992, 3555, -617, 9606, -2831, 2357, 5300, 625, -678, -500, -128, -56, -6327, -1122, -2567, 1904, -1804, 709, 3194, -148, -1371, -6534, -1748, -1490, 14159, 1466, 1395, 1101, -2725, 503, 68, -1486, 0, 211, -1218, -3, 20920, 1709, -208, -839, 4574, -6084, -6557, -103, -984, -375, 8409, 1715, -2170, -5003, -3296, 13482, 1211, -4159, 3496, 1040, 6925, 213, -1398, 441, -1231, -814, 842, 1574, 1145, 1359, 437, -1777, 20566, 259, -4573, -1412, -158, 10144, 1269, 1405, -12631, -1104, -615, -15892, 355, -3795, -1158, 3241, 252, 232, -179, -617, -2038, 285, -1014, -1248, 1835, -1558, 1266, -10207, 629, -312, 11376, 154, -288, 5915, -353, 60, 2695, -853, -103, 15659, 2403, -1184, 3, 9236, -10953, 4434, 829, 2563, -164, -848, -646, 7247, 895, 1726, -752, -979, 1053, -971, 318, 2180, 927, 804, -262, 446, 3261, -4926, -4523, 1247, 2039, 12770, -1191, -1310, -5574, 4763, 657, -4139, 10821, -805, -1109, -3189, -1721, 167, -10022, -1877, 2123, 328, -7048, -2130, 2431, 1522, 3209, -8448, 1810, -5412, 9815, -3677, 6575, -6237, -929, -434, -2375, -13586, 3497, -1140, 1227, -6354, -507, 329, -1690, 1079, -880, -3743, -4021, -4645, -6053, 958, 4594, -1122, -11628, 1537, -3418, -1242, 133, -9335, 1611, -432, 10733, -885, -468, -13466, 690, 214, 8968, 3441, 5451, -219, 5492, -377, 409, 3812, 2450, 508, 6542, 3824, -3705, -514, -8262, 1537, 7969, 946, -2869, 8762, 417, 5094, 2104, 6694, -342, 1259, -4779, -1445, -1519, 333, 4385, 652, -386, -580, -1892, -873, 1862, 2704, 13837, -5415, -1975, 5881, 7150, 8272, -6412, 704, 1854, 257, -3746, -9789, -9634, -924, 1393, -3237, 259, -56, 4390, 4902, 1172, 5114, -2616, -4409, -1180, 4691, 7400, -625, 8873, 6846, -1224, -213, -5296, -3504, -147, 17828, -1347, 3251, 1702, 1440, -2364, -491, -227, 1765, -446, -9746, -2019, 11287, -195, -9559, -312, 888, 5789, -1753, -11069, 2537, -265, -1762, -779, -8501, -308, -89, 1973, 3640, 17344, 1326, -689, -398, -3820, 2167, 229, -636, 2142, -6587, -751, 13243, 465, -5946, -202, -968, -1060, -240, -10626, 3405, 1302, -1263, 972, 11351, 100, 2266, -930, -2108, 5350, -3186, 11130, 2073, -5616, 650, 2000, 1048, 5628, -531, 674, 8453, 1030, 1152, 12095, 352, 409, -1029, -1236, -190, -5724, -589, 3550, 1958, -14081, -339, 1672, -1659, 4518, -75, -638, 5501, 277, -578, -2185, 157, 2066, 8634, -2403, 1617, -12487, -1881, 8273, 179, -2152, -1294, -512, -415, 456, -141, -125, -405, 132, 49, -1978, -19085, -451, -1480, 324, -5397, 235, -1217, 346, -1258, 3540, 10075, 10291, 5060, -2057, 6156, -992, 9344, -3718, 4296, 895, -8464, 341, 1426, 648, 1494, 2895, -3760, 10139, 15531, -984, -1550, -1319, -1542, -119, -517, -185, -3368, -9279, -3455, -4257, 1092, -10120, 5072, 3099, 986, -2562, -12068, 1932, 6489, 950, -2417, 1362, -567, 591, -715, -515, 3506, -726, 6319, 214, -364, 3611, 1895, -2005, -273, 1513, 2379, 475, -4855, -527, -11493, 27, 4343, -2394, -639, -744, -2601, 10917, 1910, 2449, 1238, -2175, 5322, -4054, -40, 4274, 684, 8152, 966, 10882, -13, 4253, -287, -3192, 548, 2020, 189, -6894, 797, 2160, 579, 4084, 1767, -4011, -640, 7697, 791, 945, 1230, 6491, 1508, -3762, -433, 11340, -129, -1131, -5121, 3148, 1544, -7648, 1866, 9660, 2365, -2110, 782, -82, 3666, -701, 303, 298, -1934, -125, -1427, -17589, -1188, 175, -7046, -488, 1121, -6594, 489, -1551, 14349, 1499, -544, 17132, 198, 2516, 2479, -978, -214, -3399, -1223, 2094, 130, -1020, 1049, -710, 12801, -498, 297, -1365, -187, -3169, -123, 9019, 958, 221, 14234, -590, 961, 3092, 8, 255, -4586, 1789, 2522, -12577, -91, -822, -805, -714, 5298, 1299, 3306, -1288, 13176, 235, 1754, -67, 1912, -604, 3240, -2048, -200, 772, -173, -996, 1368, 2380, 294, 763, 19665, -196, 528, 182, -2394, 923, 749, -13578, 855, 589, -9553, 0, 5737, 10399, 9147, -1655, -3735, 1246, -2429, -1147, -2199, -2953, 614, -1404, -449, -8524, -2271, 5001, -9517, 2940, -204, 3625, -258, 32, 1521, -299, -1786, -2836, 1523, 2427, -835, 3139, -197, 3351, -279, -14766, -1267, 5169, -1039, -10967, 58, 641, -767, -1193, -591, -716, -834, 8109, -915, -711, -10427, -1680, -638, 2643, -850, -258, 10452, 362, -5394, -349, -14727, -655, 1040, 1722, -10265, 551, -283, 9888, 408, -400, 5980, 1878, 781, -923, -667, -789, -348, 624, -260, 14515, -804, 1721, -2, 5356, 1802, 1218, 498, 1871, -988, 16295, 4163, -2342, -4290, 3121, 3269, 112, -3492, 1124, -1496, 1863, -1426, -1090, 1598, -197, 1160, -1660, -1094, 477, -4104, -396, 1605, 26134, 746, -12876, 2320, -1690, 8626, 39, 1341, -1254, -1890, 2555, -13996, -1218, 3827, 1216, -909, -180, 1720, -87, -143, 989, 340, -1426, -4029, 3141, -9424, 466, -8227, 422, -7379, 2038, 401, 98, 3602, -1223, -946, 2469, 1159, 727, -268, 467, 203, -11079, 3850, -3469, -1965, -1857, -1415, -2477, 3173, 7352, 9483, -5541, 6212, 1886, -3868, 2728, 577, -5057, 321, 972, -77, 47, 227, -38, -1037, -222, -347, -341, 1179, -948, 592, -7485, 2218, -5955, 2698, 11798, 197, 6260, 1711, 998, 8, -6223, -1184, 1145, -1781, 1376, 1394, 388, -689, 2279, 6511, 2542, -4903, 3917, -790, 535, -1903, -4448, 4216, -22, -6715, 5204, 4807, 3193, -1064, 5403, 4503, -2434, -4296, 1383, -1514, -4103, 747, 3928, 2987, 9513, 2492, -8691, -993, -2667, -40, -170, -3116, 611, 2367, 16297, -1256, -1404, -3462, 466, -524, 5464, 491, 706, -7491, 2027, 373, -4086, 1620, -7789, 704, 5002, 1706, 8325, -851, -9883, -3072, 4475, 2696, -8549 }, .shape11 = { 44, -10592, -832, -413, 612, 530, 379, 753, 1442, -3006, -858, -1077, -12018, -196, -771, -1142, -628, -2938, -439, -3323, 20, 12513, -2462, -1270, -57, -8417, -690, 790, 276, 2349, -341, -1644, 230, -2176, -202, -14725, 170, 1725, 3030, 683, -231, 641, -242, -3252, 110, -1440, 2886, -1467, -1155, 14395, 297, 52, 240, 3938, 9880, -7555, -1214, 3351, 129, -1269, -168, 669, 13765, -1289, -465, 10017, -632, -328, -276, -33, 31, 18883, -148, -131, 525, 1669, 2288, -203, 868, -660, 248, -409, -91, 295, -9174, -1484, 929, 2824, 1097, -3205, -113, 2712, -1544, 527, 1419, -963, -388, 691, -16791, -84, 72, -3802, -357, 1633, -15182, 62, -6024, -742, -5396, 4470, -198, 1, 1428, -1691, 18715, 1402, -2539, -375, -8455, -901, -147, -3274, 9359, -277, -8941, 714, 2834, 2924, -6326, 907, -123, 10487, -484, -4772, 877, 9840, -505, -7562, 301, 671, 116, -371, 3740, 359, 385, -5145, -908, 156, 9639, 3782, -9688, -4214, -945, -7685, 334, 2185, -1342, 388, -1741, 278, -231, -912, 905, -1039, 598, 2049, 662, -198, 22378, 166, 116, -1699, 335, -8380, 1279, 1536, 14955, 1254, 190, -2519, -608, 364, -561, 5748, -1178, -923, 3183, -59, 13880, -2530, 241, -564, -319, -7510, -9, -124, -20346, 305, -25, -400, 222, -16943, -488, 802, -1685, 3323, -6198, 1000, -903, -846, -387, 462, 847, 526, 10024, 2020, 2090, -9563, 1416, 169, -12182, -428, 10388, 869, 1068, 2201, -1041, -3180, 152, -646, 4, 4017, -1069, 307, 5283, 3021, -13662, -493, 9, 542, 152, -2617, -3870, -514, 13497, 1180, -603, 1255, 2396, 7418, 8902, -11165, -2626, -5719, 1764, 858, 1105, 1476, -1764, 1969, 977, -1738, -928, -13940, 1444, -4157, 836, -12243, -369, -256, -15681, 5320, -5170, -509, 353, -1581, -1455, 965, 716, 209, -883, -317, -1961, 9128, -8197, 2173, -2434, -1126, 4066, 1025, -16663, -7013, -147, 1617, -745, -3205, 1496, 1822, -1199, -2999, 117, 619, -20002, -232, 142, 3207, 561, -292, -1635, 1035, 37, 2712, -243, -8269, 305, -2601, 495, 14516, 831, 260, -54, 4217, 675, -1632, 4962, 793, 1066, 133, -344, -12428, 95, 6164, -1298, -1860, 3622, -467, -867, -1178, 11053, 118, -36, -6997, -763, 16019, 16, 2459, 306, -820, -1135, 847, -709, 928, -164, -293, -5736, 543, -11548, 5389, -2012, 300, -228, -1043, 5107, -558, 1187, -140, -13034, -1571, 740, -4967, -432, -6289, -1778, 3449, -337, -12607, 344, -3790, -1598, -274, -346, -1494, -108, 325, -1215, 819, 404, -568, -286, -21364, 15495, -2297, 606, 117, 10, -193, -972, -292, -573, -1155, -1289, -1025, 472, 1154, 843, 187, 586, 20569, -5, -236, -1181, -1092, 700, 891, -603, -601, 21648, -449, -193, -1103, -298, 2084, -251, 449, -1414, 17168, -391, 104, -5465, 401, 8839, 781, 1741, 201, -369, 466, 12358, -636, -945, 3928, -605, -17445, 5020, -1289, 977, -6202, 1783, -507, -76, 267, -31, -2731, -1560, -1225, 1348, 11176, 1669, 754, 1671, -4038, 151, -371, 7283, 243, 1387, 126, 1007, 1292, -15, 696, 282, -2623, 1065, -1026, 191, -632, -132, -12957, -32, -1697, -422, -240, 1352, 10252, 1067, 8296, -1244, -9, -301, -3014, -249, -372, 10731, 535, 2147, -8959, 346, -408, -8329, -1905, -48, -8176, 2782, 412, 1425, -946, -748, 1095, -1370, 9086, -99, -143, 68, -544, 264, 494, -377, 13, -618, 237, 193, 3549, 317, -168, -7148, 2351, -244, -13240, -3355, -2322, -533, 9554, 6906, 124, -694, -901, -2762, 207, -915, -2520, -143, 8544, -678, -2788, 12926, 791, 1296, 4861, -1470, 889, 3675, 806, 290, -11146, 422, 9217, -31, 1608, 140, 3939, -6903, -276, -704, 2353, -344, -1038, -230, -177, 670, -617, -129, -857, -8231, 638, -411, -252, -15709, -1218, 210, 288, 542, 533, -9087, -10493, -624, 1175, 611, -230, 746, 1455, -590, 830, 1756, -15800, 823, -1077, 788, 1071, 468, -1654, 660, 983, -9697, -1300, 662, 2053, -281, 12949, 389, -915, 197, -1742, -4587, 1746, 707, 1625, 9021, 2204, 759, 1303, -428, -220, 41, -5499, -16080, -193, 443, 443, -78, 889, -561, 5629, -1073, 7019, 222, 1661, 1190, 1108, 94, 5624, -3796, 407, -706, -122, 744, 363, 1648, -10896, 595, 953, 85, -267, 195, 851, 17173, -636, 243, 907, 2029, -700, 351, 1495, -157, -575, -11664, 1252, 8341, -616, 3708, 5693, -6, -1753, 1072, 863, -823, -4278, -12043, 750, 597, 3145, 38, -8140, 3136, 290, 7, 11084, -876, 1842, 175, 3458, 460, 1615, 11698, -827, 16, -12482, 428, 411, 2625, -1352, 142, 529, 229, -48, -965, -145, -592, 655, 499, 22095, 22141, 37, -1875, 701, 45, 724, 1111, 1631, 262, -252, -9092, 5325, 408, -637, -612, 647, 1268, 834, -510, 603, 199, 816, -9904, 9533, -1580, 2669, 1824, -2092, -701, -271, 7489, 46, -3295, -844, -304, -226, -260, -692, -5, -527, 37, -49, -1542, -69, -1087, 20519, 367, 1, 3487, 2535, -5110, 642, 1223, -2130, -2894, 1752, -1618, 9732, -1633, 6904, 137, 654, -358, 355, -21, -277, -68, -188, 132, 530, 372, -315, -11498, 221, 815, 2480, -1398, -123, 353, 3114, -12025, -1212, -1111, 916, 6452, -1880, 1867, 307, -66, 1857, 138, -980, -3088, -174, -41, -393, -656, 847, 15824, -379, 358, 672, -389, 920, -21145, -393, 350, -574, 1005, -2083, 26, 79, -203, -7967, -3302, -5805, 772, -302, 2104, -1240, 13710, 6816, 2282, -3709, -1512, -81, -2216, -3005, 444, -795, 751, 2163, 20751, 780, 542, -480, 624, -425, 769, 2474, -5903, 399, 10564, -112, 69, -1409, 1885, 2339, 67, -620, 196, -2432, 6046, -1673, 6512, 809, 7904, -516, 4278, 223, 359, 16512, 1224, -480, -505, -735, -502, -593, -4565, 1914, 122, -531, 1442, 464, 69, 292, 410, -581, -19848, 1059, 132, 1392, 5917, 705, -7706, 2496, -1487, -791, 11939, 185, -265, -2412, 630, -8028, 1434, 10315, -1541, -3756, -2403, -1918, 1050, 8057, 234, 13546, -92, -2172, -671, 11631, 103, 116, -171, -4604, -267, -602, 15, 454, 6859, -2151, -8707, -1664, 61, 2518, -969, 903, 1209, -1435, 13531, 590, 236, -821, 598, 1186, -7690, 134, -1005, -18177, -148, 519, 900, 951, 406, -3584, 47, 9439, 1418, -797, -3353, -703, -1798, -1244, 291, -2784, 14612, 2029, -161, 1040, -4130, 3064, 1721, -2898, 269, 3367, 1379, 14359, -690, -655, 2010, -4935, -681, -2606, 11651, 748, 101, 13593, 629, 28, -540, -854, 1405, 558, -8785, -1016, -13043, 121, -556, 4959, 1694, -720, -138, -3897, 182, 1938, 844, 919, -683, 12042, -1101, -155, -1375, -1509, 11, 220, 821, 21721, -367, -634, -1468, -174, 1002, -1203, 318, 11672, -2114, 2472, -1701, 5932, -661, 1094, 2500, -5609, 254, 437, -911, -1611, -8005, 217, -1139, 1321, -10713, -2183, 1163, -890, -622, 12820, 1021, -13578, 1040, 3216, 592, 686, 737, -2881, -1693, 3995, -455, 4666, -4124, -9316, 2061, 10645, 271, 264, -6829, 641, 2061, -6683, -512, -747, -9131, 2445, 343, -9944, -2888, 607, -10855, 871, 418, 504, 936, 1079, 273, 400, -17752, -391, -1543, -6193, 1482, 737, 2096, -982, 167, 972, 336, 1063, -1272, -1602, -1907, 9, -191, -15207, -119, 4047, 1479, -1405, 526, -18462, -627, -1996, -1022, -1544, 312, 7972, -227, 797, -5204, -2160, 391, -423, 257, 3836, 442, -1931, 22, 143, -203, 362, -73, 15679, -289, -1445, 577, 858, 11408, -1970, -1022, 1550, 882, -3699, -2697, 3978, 600, 86, 3858, 8683, -7681, -4856, 4051, -1321, -587, 46, -499, -354, -655, -15717, 67, 490, -2670, 474, -1374, 5601, 60, -17615, -808, 87, 367, 579, 1057, 1020, -394, 1181, -189, -10846, 763, 2635, 282, -3279, -866, -15257, -449, 112, -15577, 227, 269, 13964, -1273, 1513, -1487, 195, 319, 2527, -286, -5883, -5360, -959, 2791, -3335, -945, -1985, -903, -11418, 8525, 669, 6106, 153, -1169, -1198, -553, 7037, 528, -4237, 717, -214, 1824, 10108, 961, 9077, 1899, 10407, -207, -29, 355, -6794, 111, -13627, 1361, -3577, 291, 4534, 2209, -1579, 109, 523, 456, 10990, 31, -448, 385, 1481, 2, 15266, 798, 5759, 860, -16424, -1315, 1631, -456, -977, -180, -2593, 1191, 5959, -32, 8112, -506, -7766, -1871, -15310, 662, 196, -20401, 925, 446, -2035, -620, -686, -249, -2517, 423, 703, 633, 828, -182, -37, -406, -149, 821, -22255, 652, 522 }, .shape16 = { -786, 193, -15441, 200, 1050, -16545, -41, 329, -869, -170, -858, 2725, 217, 447, 2107, -23, -387, -10280, -383, -320, 387, 16012, -79, -967, 3528, -2123, -537, -636, -1761, 949, 100, -17, -446, 261, 22527, 331, 26, -87, -206, -2292, -1178, -164, 598, 147, 889, -14487, -2823, -1280, -1892, 33, -1763, 993, 4807, -953, 2181, -588, 59, -296, 218, 291, -104, 495, -1092, 2232, -14904, -983, -2919, 795, -17207, -2045, 2988, 597, -10312, -718, -2196, -5822, 847, 1304, -757, -4714, -148, 831, -734, 806, 4348, -308, 244, 566, 2706, 604, -748, -864, -568, -219, -128, -688, -218, 110, -29289, 482, 76, -1447, -142, -417, -253, 8124, -19775, 990, 4546, -1012, -8082, 133, -1612, -2243, -3788, 1568, -2892, 852, -1642, -3479, -23, 1300, -564, -1037, 249, -14533, -43, 321, -680, 10, -417, 23426, 397, -108, 1843, 180, 11976, -9613, 353, 3768, 130, -1035, 4340, 218, 596, -224, -779, -1680, 1326, 152, -971, -9725, -355, 5328, -459, 16242, -438, 926, 6210, 1912, 769, 2621, -148, -1008, 517, 341, -3594, -965, 11383, -874, -16949, 1167, -3371, -1655, 586, -132, 3990, -770, 211, 246, 514, -166, -734, 30408, -258, -521, -20, 339, 499, -2572, 2110, 272, 1357, 123, 2841, -320, -31, -444, -501, 215, -42, 595, 108, 484, -223, 937, 475, -72, -319, 75, -205, -978, -9155, 145, 2020, -3, 2438, 4046, -1281, -875, 1532, -598, 12288, 369, -2046, 343, -778, 1769, -2589, -641, 17437, 1793, -592, -1954, -1607, 6184, 3440, -512, -2710, -1330, -127, 8765, 83, -243, -315, 709, 256, 1176, -1198, -463, 970, -302, -568, -997, -1022, 159, 11008, 27, 13074, 1523, -3239, 2330, -4808, 6115, -9933, 1449, 2153, -3111, 1780, -731, 121, -881, -14289, -265, 566, -611, -253, -2965, 250, -105, -66, 2570, -1922, 2712, 1907, -2025, -454, 173, 1463, -29, -31955, -113, -1751, -3353, 254, 1001, 6781, -29, -639, -1289, 288, 498, -21505, 48, 109, -2151, -223, 1360, -3430, 658, -4185, -1706, 1244, 1899, 124, 12, -35, 289, 382, 433, 261, -131, 54, -646, -280, 86, 180, 153, -169, -20242, -95, 734, -524, 77, 102, 8468, -421, 29, -3, 51, 1526, -600, -264, 355, 1949, -985, -291, -86, 10212, -789, -393, -182, -51, 946, -16716, -954, 1179, -2745, -509, -4774, -587, -608, 7657, -509, -388, 987, 109, -218, -17579, -524, -467, -1643, -444, 1430, 2541, -124, 1785, 27, 7905, -73, -3135, -1241, -254, -2114, 1175, 780, -50, 4055, 535, 438, 32, -113, -260, 81, 1102, -59, 29188, -48, 212, -29, -344, 559, 856, -483, 608, -40, -1498, 112, 10374, 1198, -434, 4053, 1286, 236, 1823, 16046, 592, 1583, 78, -5243, 1311, 456, -1342, -546, -353, 13289, -333, -529, -20859, 183, -167, -1368, -338, -690, 4248, -205, -666, -634, -1653, 1174, 234, -18622, 891, 284, -2632, -1516, 289, 11242, 727, 133, 284, -323, -1370, 908, -13169, -412, 1155, 410, 610, -3072, -8220, -637, 242, -647, -2072, 16041, 2292, -8009, 351, -3137, -3075, -1051, 4569, 125, 23, 1281, 2487, 520, -209, -688, 205, -1248, 246, -601, 533, -12209, -2298, 826, -2762, 45, 15123, 721, 1128, 798, -676, 349, -153, 263, 89, -854, -24, -350, -227, 157, 587, -240, -185, 663, -32328, -148, -204, -2396, -597, -344, 8104, -280, -375, 264, 648, 741, -290, -321, 263, -569, -381, 167, 1757, -29636, 30, 393, 398, 590, -242, 81, 1601, 3683, 787, -336, 675, -1080, -713, 261, 18420, 1760, 609, -4610, -551, 2790, 19807, 1347, -125, -9412, -261, 548, 1056, 179, -917, -181, 12637, -267, 621, -11908, 1366, 76, 5875, -742, 394, 155, -370, 2481, 46, -15392, -344, -9750, -1353, -2242, -1685, -1286, 2320, -2176, -1729, 705, -1582, 1590, 1603, 21129, -3555, 2192, -883, 3438, 233, 1965, -537, 399, -4818, -4085, 559, -292, 1290, -2700, 10, -301, -1865, 226, 52, -1346, 306, 316, -12281, -525, 285, 9631, -2, -849, 1620, 128, 176, -1021, -473, 7929, -133, 2459, -33, -1517, -22047, -2300, 98, -3513, 334, 4617, -193, -1309, -1279, 738, -443, 95, 406, 660, -705, -54, -39, 26396, -766, 249, -2423, 7759, -689, -3909, -17404, 65, 1849, 945, 15907, 1386, -433, -831, -6349, -3919, 1870, 8096, 311, 15043, 1709, -315, 1288, 7522, -215, -5072, 1246, -1486, 3762, 4526, 1517, -1936, -543, -263, 771, -10215, -425, -5098, 59, -266, -1012, -380, -2131, 630, 405, 665, -4550, 1403, 8, -46, -879, 398, -532, -185, -286, 921, -65, 378, 669, 174, -15280, 91, -776, 8480, 2463, 184, 2065, -666, -561, 4122, 594, 732, 4007, -852, -71, 194, -126, 1765, -1570, 968, -257, -288, 950, 27482, -333, 370, -1429, 285, 558, 11245, -135, 565, 1296, -261, -62, 600, 1455, 1457, 820, 357, -1203, 169, 16611, -893, 359, 231, 418, -547, -95, 3866, -511, -6344, -205, 923, -239, -16205, -1619, 217, -3362, -6342, -1551, 649, -492, 264, -55, 170, 16992, -91, 306, 43, -2770, 582, -1740, 77, -882, 268, -515, -45, -6093, 24, -5596, 9034, 284, 3211, 846, 1158, -1118, -604, -514, 1402, -493, -938, -3892, 242, 643, 1421, -434, -406, -102, -88, -11733, 161, 518, 978, 1508, 248, -1036, 1407, -396, 293, 1154, -1435, 495, 8243, 20, -845, -5373, 659, 2366, 29148, 145, 603, 4088, -251, -2841, -2526, 20682, -1357, -2454, 660, -125, 347, 11772, -113, -357, -2181, -1234, 1908, -432, 16555, -248, 822, 15516, -158, -653, 1573, 93, -2730, -1111, 958, -1550, -1153, 17, 610, 781, -372, -1640, 144, -135, -1171, 22140, -427, -26, 690, -800, -1497, -300, 5438, 390, 11304, 9253, 1098, 5564, -9, 3856, 965, 2016, -12797, 1687, 915, 3687, 539, 2496, 702, -1324, -71, -12955, 7456, 4626, -848, -1815, 831, 2151, 7921, -3000, 123, 1189, -1489, 222, 4973, 1936, 54, -10527, -1238, -1157, 628, 14112, -2164, 1478, -985, -4102, 635, 225, -311, -609, -1015, 301, 507, -85, 443, 186, -552, -711, -16988, -1327, 220, 565, -1673, -543, 18633, 331, 127, -342, 22, -77, -360, -439, -501, -1848, -1147, -483, 1133, -351, 41, 908, 502, -658, 474, -430, -11348, -1, -531, 451, 709, 227, -978, 348, -265, 269, -376, 2511, -188, -111, -387, 809, 1009, 1570, -755, -11463, 667, -895, 446, 276, 145, -513, -117, -462, -340, 1457, -963, 191, -788, -150, -979, -507, -27540, 122, 368, -73, 10051, -465, 642, 507, -6828, 241, -5025, 1598, -1174, 2373, -2272, -1910, -108, 15, 166, 2, 10518, 933, -12716, 510, 778, -424, 414, 4899, 759, 862, -438, -886, 457, 304, 23639, 136, -203, 478, -565, 244, -541, 2419, -773, 1107, -217, 1579, -1037, 476, -97, 995, 17973, 161, 16466, -178, -718, -1606, 947, 1991, 2266, 1249, 2708, -611, 1424, -142, -53, 36, 509, 26159, -144, 357, -37, -234, 587, 311, -509, -1639, -332, -1618, -382, 302, -8657, -68, -30, 545, -12834, 158, 158, 135, 621, -354, -871, 451, 1220, -31, 2, -13414, 60, 3, -380, 541, -44, 552, -366, 155, -462, 61, -232, -15426, 317, 688, 1121, 2933, 7151, -168, -9167, -2521, 745, 2792, -10448, 569, -3823, 630, -4626, -95, -416, 828, 259, 72, 171, 635, -250, -128, -426, -153, 260, -771, 314, 235, 26, 32281, -343, 751, -1443, 324, -684, 1900, -1334, 2022, 30, 1073, -2406, 2080, -485, -320, 15328, -860, -529, -16444, -219, 1736, -149, -160, -828, 1089, 413, 241, 3720, -90, 146, 1109, 243, -321, -256, -68, 88, -50, 571, 1179, -25030, 104, 929, 35, 529, 117, -13724, 734, -1344, 456, 5586, 1566, -12573, -840, -1617, -2494, 1791, 1901, 3066, -2159, -414, -3856, -9894, -1608, -657, 15355, -773, -9217, -658, -972, 4730, -2986, -3478, -757, -1416, -3702, 18089, 629, 7061, 124, 5843, 158, 19017, -2204, -6976, 1629, -5657, 1101, -1859, -1425, -548, -1132, -5043, 1074, -592, -196, 1902, 22705, -1228, 214, -685, -2036, -2368, -315, -914, 533, 218, 1091, -627, 2031, 13922, 104, -450, 4494, -498, -361, 24734, 623, 1029, 2437, -1123, -5092, -6551, 438, 16562, 375, -13102, -193, -2004, 3556, 179, 1832, 2086, 798, -534, -195, -7105, 796, 3969, -12269, 1570, 4273, -2692, 1240, -2901, -2045, -2453, 372, 613, -548, -245, 687, 258, -8964, -1500, -1519, -993, 17571, -357, 916, -1202, 1752, 2081, -536, -3185, -1062, 19335, 721, -9958, 1052, -872, 248, -3133, 456, 1641, 149, -11, 2955, 310, -3178, -18823, 497, -971, -6587, -1380, 351, 106, -43, 607, -4754, 213, 1030, 5377, -804, -2557, 850, 1081, -706, 1325, -14922, -794, -14060, -1953, 891, -3296, 329, -510, -1126, 1113, 1753, -411, 1769, 429, -185, -1020, 194, -106, 11470, -591, -272, 422, 337, 524, -150, 822, 51, -120, 7193, 802, 640, -140, -42, 28125, -1020, 285, -465, 3195, 69, 482, -953, 262, -7672, -373, 5158, 5625, -3003, 550, 5371, 5619, -2200, 5392, -804, 135, 1300, -3610, -23, -433, 13503, 224, 911, -14421, -502, -2151, -1667, -1933, 2888, -277, 547, -989, 3115, -32, -680, -164, 804, 412, 62, -154, -190, 156, -10938, -360, -88, 843, 328, -773, -267, -12668, 856, 1496, -243, -586, 736, -2175, -677, -3069, 7480, -1764, -4024, -2569, 1805, 194, -6814, -1135, -237, 2682, -156, -890, 1285, 368, 1802, -683, -163, 1191, -13063, -496, -335, 17482, 746, 818, 48, 21419, -598, -1753, -1169, -2135, 40, -9114, 592, -3912, 1980, -264, -304, 8138, -185, 286, -3024, 48, -1630, 909, 661, -662, 18085, 240, -201, 69, 192, 305, -22167, 692, -1135, -996, 398, -74, 18553, -958, 1223, -5578, 508, -352, 1234, -450, 497, 780, 79, 51, -221, 255, -26, 13352, -170, 231, 590, 169, -733, -812, -65, -219, -20939, 200, 35, -177, -454, 632, -267, -407, -120, 623, -176, -664, 715, -23, 318, 148, 1125, 16, 709, -21687, -230, -413, 1398, -1235, -283, 1615, 175, -299, 349, 400, -112, 21762, -665, 364, 1089, 1303, -54, 523, -381, -1312, 48, -886, -1260, 408, 415, -8349, 7115, 180, -774, 3508, -971, -255, -195, 81, -2674, -977, -355, -1500, 178, -2081, -4432, -1014, 340, 5818, 138, -106, 16917, 1203, 349, 3271, 961, 363, 6008, -6043, 3736, -730, -4201, -514, -6131, -68, -14935, -1781, -3898, -40, -18944, -461, -1694, -1269, -755, -81, 2369, 484, 531, 14114, 85, 32, -10142, -142, 600, -2374, 375, 675, -2663, 155, -947, 6427, 11476, 1253, 5049, 1063, 2003, -1608, 2463, -2168, -1128, 1079, 383, -996, 368, 1208, -3554, -959, 4596, -1209, -4154, 1270, 9365, -2775, -1751, 998, -20023, -347, 1505, 218, -142, 342, -128, -523, -159, 75, -467, 257, -133, -142, 712, -621, 428, -29584, 13, 402, -455, 119, -483, 1121, -461, 960, 807, -46, 297, 14856, 221, -356, 221, 15037, -4744, -2555, 447, -1418, 1464, 1391, -1404, -5812, 512, -2321, 9882, 242, -2298, -137, -849, -3182, 9394, 1412, 1052, 1369, -904, -494, -231, 1113, 1087, -13317, 768, -1178, -3011, 24, 229, 164, -10170, 328, 308, -591, 213, -543, -82, -790, -875, 794, -558, -7651, -573, 1266, -2084, 2275, -187, 97, 384, -11830, -185, -472, 1365, 11636, -1405, 360, -487, -440, -1820, -349, -293, 285, 25, -139, -415, -540, -108, 1136, -673, 230, 19202, -545, -542, 919, 1221, -518, 196, -21900, 795, 115, -16, 459, 3339, -347, -346, -186, -695, -267, -714, 185, 266, -1218, 120, -249, 233, -110, -30412, 285, 219, 2256, 536, -442, 673, -1487, -477, -60, -1806, 183, -7195, -577, 2230, -7594, -3230, 65, 22963, 111, 390, 7134, -3716, -5123, -475, -32, -98, -466, -118, -43, 74, -1071, -902, 1714, 4004, 26, 97, 1680, 423, 252, 9667, 550, 354, -222, 19, -224, -807, 365, 593, 363, -851, -28, 553, 238, -481, 769, 279, 18367, -462, 286, 4825, -141, 500, 20383, 1618, -31, -514, -2484, -327, -8506, -705, -872, 530, -9997, -36, -431, 2824, 3185, 1712, -318, 9513, -10065, 614, -503, 389, 12830, -113, -15, -1007, -523, -1293, -2102, -543, -1157, -583, 1228, 262, -674, -1847, -242, 299, -12025, 547, -591, -9173, 275, 412, 2493, 997, 1229, 1982, 27554, 245, 106, -1320, -153, -423, -955, -449, 392, 824, 796, -1181, 1640, -884, -70, 8789, 10021, -1806, 1019, 90, 1494, 2071, -911, -1159, 212, 2207, -994, -2500, -497, 92, -11544, -398, -774, 1474, 32, -671, -171, -1250, -249, 1161, -654, -205, -36, 1733, 763 }, .shape22_1 = { 987, -6, -621, -220, -2438, -387, -535, -23, -934, -68, -4985, 575, 483, 7243, -1075, 917, 1739, -1832, -580, 1564, 131, -180, -1271, 3672, 161, 1040, 1737, 2719, 1101, -185, -1410, 221, -422, -8675, -753, -401, -5388, 13, 762, 1378, 1113, 1768, -177, 3397, 2162, 267, 2261, -156, 1708, -848, -79, -1819, -3159, -5548, -745, 7208, -1039, 7555, -134, 2661, -2112, 2270, -1991, 441, -6248, 246, 166, 2092, -1402, -242, -13600, -539, 391, 2395, 11001, -981, 10906, -403, 823, 1647, -294, 93, 504, -5448, 1213, -1849, -3077, 790, -841, 12812, -11266, -1882, -805, -274, 1968, -49, 1189, -80, -281, -40, 409, 2423, 581, -1362, 207, -869, -589, 3294, -318, -4592, -476, 1014, -135, -17999, -194, 807, -2946, -222, 44, -514, -4407, -1201, 1155, -235, 98, 4432, -342, 2386, 1402, -956, 3357, 1959, 4790, -139, -3494, -4280, -589, -8422, 363, -746, 640, -360, -1007, -1100, -7989, -12630, 1006, -1608, -864, -226, -915, -2032, 1274, 596, 1864, 1067, 1597, 460, -2003, -5560, -8020, 2354, 379, -3151, 44, 7024, -698, -2901, 4976, 927, 1223, -93, 172, 189, 6639, -6082, -726, -524, -3068, -3802, 16, -1039, -105, 2333, -350, -306, -379, -832, 1282, 56, 3529, 562, -603, 5954, 294, -1265, 8045, -3990, -169, -123, -3267, 572, -879, 1562, -1185, 799, -9589, 407, -590, 65, -2848, 433, -5547, -19, 7180, -7904, -392, 323, -448, -4481, -3773, -5286, 1957, 226, -2040, 3292, 2987, -1704, 2835, -149, 1435, 823, 1775, -2769, 146, 234, -131, -15, 268, 37, 139, 22, -196, 91, -3503, -5421, 24, -280, 58, 370, 655, 1412, 113, 306, 16404, -234, 315, -957, 72, -1129, 1993, -18719, -1415, 1349, 2340, 541, 313, -1360, 31, 1441, -78, -9905, -393, 367, -712, -2009, 372, -297, -123, 303, -458, -323, 46, 8701, -1301, -8768, -43, 1818, 212, -543, -5077, -8037, -2536, 702, 792, -381, -272, 1941, 6320, -1871, -13938, -262, -2063, 108, -861, 485, -440, 768, 5665, -302, 305, -13784, 2889, -127, -94, 145, 1308, 7911, -8376, -643, -596, 1357, -943, 1329, -84, -62, 1651, 391, -2295, -5456, -357, -4611, 1361, 3961, -295, 642, -698, 8614, 1613, -526, -120, -205, 17, -20171, 1252, -261, 535, -1244, 92, -315, 878, 380, 157, 3217, -493, -773, 513, -510, 11304, -899, -27, 398, -6386, 659, -1001, -2737, -13295, 1219, -1014, -193, 445, -2393, 344, -25, -599, -2848, 884, 94, -11, -564, -36, 9939, -3530, 462, -942, 10089, 824, 2994, -293, 71, 10167, -457, 711, -964, -2128, 2530, 160, -2558, 2451, 1654, -3828, 1560, 879, -1023, -8354, 851, -77, -112, 19572, 2010, -1077, -1329, -1282, 1277, 252, -5622, 4617, 58, -2315, -459, -1249, 92, 708, -737, -3323, 182, 1557, -657, 546, -447, 19117, 1645, -336, -26, -2041, 5926, 4746, -1866, 3922, 2798, 5320, 7, 470, 842, 229, -567, 742, -3306, 659, -871, -226, -2593, -1003, -1373, 595, -768, 20658, 944, 1228, 279, -1531, -618, 361, -4019, -343, -351, 7143, 293, 92, -2713, -269, -30, -332, 4093, 216, 239, -563, 1943, -944, -2268, 70, -209, 440, 1493, -446, 491, -362, 25, -331, 433, -1585, 173, 1126, -3614, -234, -2649, 1181, -641, -160, 3727, -841, -2134, -1396, -5758, -14, 364, -4651, 1151, 194, -5234, 5878, -1348, -1388, -233, 3810, -860, 9479, -24, -6616, 1387, -455, 447, -224, -2997, 12, 3502, -73, 470, -9170, 1677, -740, -592, -1638, 675, -93, -17842, 1750, -847, 993, -2393, -49, -2029, 1940, 588, 475, -3467, 55, 5087, 2989, 380, 915, -2782, 2418, 11303, 1098, 1009, 1372, -5780, -303, 1451, 972, -7433, -571, 1661, 64, 10265, 1541, -50, -964, -738, -253, -3105, -695, -546, -775, -18971, -3094, -2379, 738, 1625, 623, 1073, 782, 723, -3417, -578, -189, 4108, 1115, -1222, -9102, -4736, 347, 946, 322, -3699, 193, -15139, 367, 969, -788, -694, -620, -26, -16, 4, -478, 20792, -1175, -231, 2566, -1270, 162, 181, -1451, -5370, -2429, -8910, -3794, -5807, -1655, 248, 4432, 1393, -2451, -2706, -744, 687, 842, -1281, 2960, -2348, 153, -1671, -1433, -1250, -1096, 2501, -5393, 4266, -1098, 880, -1215, 817, -443, 10053, 705, -689, -2679, -1205, -3302, -809, -918, -1005, 124, -329, 108, -52, -5305, -419, 128, -8137, 1427, 387, -235, -2582, 190, -173, -1031, 2672, -985, 3309, -5927, 7327, -8463, -2, 6035, 743, 552, -14, -580, -68, -11886, 476, 61, 1172, -529, -988, 871, -776, -332, 20870, 384, 7795, -10830, 723, 1690, -519, 962, 663, 1300, -465, 47, -3578, 56, -8131, 2041, -8524, -1303, 6349, 1903, -6726, 1156, -224, 1286, -2355, -3415, 985, -502, -2474, 49, -2789, -3616, -1707, 3363, -140, 1702, -1919, -11518, -404, 62, -6933, -1187, 10830, 132, 284, -639, 1349, 2367, -311, -626, 745, 5660, -152, -121, -5236, -481, 5889, -1263, -8443, -33, 936, 423, -117, 111, -1055, -103, -321, 1286, -611, 777, 827, 422, -162, -6767, -241, 289, -441, -1344, 2706, -1260, -4649, -847, -16107, -263, -1826, -521, -760, 942, 309, -2692, -4835, -853, -806, -276, -322, 5647, 1219, -433, -346, -1171, -1028, 191, -406, 444, 33, 272, 3502, 475, -2178, 1915, -290, -1037, 833, -695, -121, 415, 556, 1025, -2268, 334, 2847, -1768, -389, -14034, -3878, 836, 4605, -1985, -359, 1478, -149, 823, -926, -828, 135, 469, -645, -328, -94, -178, 2820, 781, -2361, -5778, 1312, 3918, -1, -3654, -942, -2495, 615, 210, -17006, -396, -445, 382, 563, -1738, 95, -9107, 4869, 348, 527, 5688, -145, -1195, -2367, -749, -187, 6697, 27, 347, 12571, -64, -427, 3765, 824, -1216, -1126, 5997, 586, 110, -294, -240, 1646, -186, 1360, 413, -6459, -1535, -3208, -520, -621, 8613, 1098, -19, -199, -11446, -657, -353, 906, 678, -19375, -126, 1688, 644, 1231, -2151, -742, 320, -68, -12426, -2750, 1483, -1603, -2639, 3028, 2662, -140, 5405, -917, -407, 207, 9392, -569, 931, -124, -82, 6370, 477, -12264, 1093, 3427, -732, -50, 232, -67, 609, 1615, -463, 583, 1808, 1499, -509, -24431, 231, -72, -192, -333, -7554, -342, -9036, -304, 136, -15450, 1333, -1147, -1488, -1440, 75, 63, 747, 297, -251, 30, -301, -1810, -86, 544, -10446, 1300, 10468, 218, -2471, 1982, 423, 3046, -1112, -657, -104, 10671, -46, -10953, -6205, -1275, 1972, 937, -75, -330, -529, -2581, 1510, -1881, -1372, -1725, 14541, -560, -884, 946, -307, -5031, 7798, -190, 720, 1525, 29, 868, 1238, 372, -462, 2467, -2661, 2721, -1514, 723, -2782, -494, 240, -7147, 587, 751, 1613, 11054, 1074, 275, 972, -970, 27, -75, 24, -9, 163, 88, 21, 87, -78, -743, -128, -2336, -235, -743, -3918, -333, 1088, -195, -166, 782, -119, -3263, 604, 2155, -258, -1282, -129, 43, -5124, -472, 685, -14243, -1294, -99, -1922, -284, -422, -1112, -3194, -1977, 1448, -419, -7172, 20, -70, 2102, 0, 278, 1882, -10005, 1612, 6020, 71, -141, 1027, -43, -864, -448, -21257, -336, -2090, 5207, 674, 722, 1030, 1367, 1963, 6057, 984, -1087, -3690, 47, -61, 104, -81, 895, 22, 728, -191, 3219, 5228, -27, -802, 1438, -9026, -1352, -581, 912, -664, -23, -522, -912, 178, -603, 571, 574, 406, 564, 175, -405, -2965, -1072, 1749, -957, -402, 9431, 1649, -409, 291, 5765, 808, 6754, 727, -37, -254, 1530, 213, 3253, 357, 371, 45, -1276, -12432, 2799, -1924, -176, -1107, -183, 198, 3662, 20, -1166, 2507, -3484 }, .shape22_2 = { 1688, -307, -590, 971, -3616, -1632, -218, 1861, -1479, -367, -6584, 487, -951, 10808, -232, 444, 89, -1216, -1577, 1283, 249, -3, -3646, 2205, -1116, 2630, 2110, 3193, 270, -189, 78, -826, 1010, -10520, -370, 1234, -5604, -262, 1277, 1440, 2225, 2466, 305, 2469, -740, 120, 3184, 2125, 1185, -3230, 1597, -1670, -8283, -9857, -129, 8932, -1355, 8755, 707, -256, -135, 423, 1543, 1782, -4875, 403, 373, 1570, -183, 782, -9617, -2539, 1090, 523, 6929, -1226, 10329, -278, -999, -260, -1810, 666, -463, -6100, 2040, 256, 532, -1475, 383, 13137, -10953, -2226, -1243, 1584, -2348, -809, 3602, -816, 194, 480, 84, 2297, 344, -5181, -6243, -2616, 2093, 7112, -2373, -1346, 291, -372, -863, -16911, -1878, 378, -826, 579, 737, -468, -2288, 264, 634, 108, -254, 4717, -1286, 2885, 986, -4944, -98, 2007, 991, -2252, -2887, -6141, -605, -10474, 896, 6, 235, -407, -70, 478, -8392, -10870, 575, -672, 103, 320, -179, -229, 445, -380, 1124, 3271, -1327, -275, -239, -10381, -9102, 1361, 96, -1255, -277, 9316, -415, -2258, 8992, -117, 1625, -704, -980, 752, 9133, -8792, -423, -272, -865, -2285, 443, -2014, -2592, 3180, 1198, 2570, 3360, -7090, 3311, 697, 2229, 46, -472, 6984, -140, -780, 10391, -1078, 48, -564, -5073, 1576, -826, -483, 952, 1099, -11536, -652, 375, 440, -7319, 2646, -2089, 2804, 3795, -6704, 251, 811, -1224, -1976, -4943, -6671, 780, -2856, -7907, 2447, 3755, -135, 1127, 328, 553, 3450, 351, -5054, -5, 1077, 109, -254, -391, -511, 404, -61, 510, 395, -6044, -7454, 364, -575, 65, -410, -1921, -248, 128, 311, 17131, -2135, -563, -884, 2356, -3951, -1176, -16695, -1534, 1977, 626, 2478, 1554, -1070, 38, -551, 370, -11053, -331, 1062, -1385, -1681, 1028, 3350, 239, -76, -156, 49, 397, 7060, -2834, -6527, 22, 1920, -951, 356, -7674, -8903, -120, 317, -303, 160, 530, 4611, 1083, 514, -12207, -283, 1413, -848, -645, -432, 0, -192, 4780, -3485, -1192, -10574, 1274, -3057, 475, -188, 183, 7865, -11214, -268, 491, 1422, -28, 149, 515, -1651, 670, -450, -958, -4288, 567, -182, 668, 4069, -213, -1176, 148, 8854, -151, 474, 599, 1297, 237, -19186, 2993, -482, -591, -1322, 25, -628, -828, -203, -1500, 5519, -84, 723, -1137, 1217, 13045, -707, -372, -200, -4142, -790, 188, -6760, -8288, 766, 366, 444, -517, -2679, -1470, -61, 161, -3734, 3053, 2012, 439, 627, 524, 5538, 549, -473, -2244, 8399, -6395, 5811, 851, 58, 11376, -6, -337, -689, -1510, -690, -388, -3587, 2665, 3371, -1850, -953, -513, 581, -10296, 548, 1092, 565, 18045, 215, -1486, -1270, 450, -880, 407, -6547, 8393, 206, -515, -1565, -219, -1872, 1479, 382, -569, -5002, -1247, -45, -740, -1791, 17177, -1210, 761, 132, -1627, 4970, 5563, 722, 5614, 430, 5659, 139, 1193, 1513, 1144, -1319, 561, -5145, 1010, 199, 656, -3958, 3544, -1758, 810, -1578, 15976, -139, -1035, -416, -543, -418, 2824, -6541, 94, -673, 11741, 426, -15, -5280, 780, 1795, -4616, 8192, -297, -206, 883, 2369, -395, -4266, -3120, -199, 985, 1240, 352, 232, -170, 176, 413, -495, -1399, 754, 618, -6103, -179, -2546, 965, -1362, -806, 838, -3912, -1346, -3135, -937, 219, 307, -3509, 1210, 2381, -7923, 6358, -885, 2902, 284, 2560, 1789, 6878, 6, -4418, -2206, -1091, 1840, -118, -2659, 1008, 2192, 1651, 1363, -7772, 1252, -1200, 133, -757, 501, -98, -17197, 98, -543, 1743, 621, -809, -1950, -793, 1168, -743, -7124, 166, 7875, -4466, 356, -1430, -467, 8589, 9931, 520, -866, 1945, -599, -434, 113, 589, -3456, 597, 6076, 1114, 9660, 1532, 2073, -138, -721, -1030, -1309, 625, -4040, 1211, -18836, -3963, -4468, 197, 600, -1004, -816, -560, -476, -2160, -2, 26, 8162, 1057, -178, -11739, -1882, 1000, -227, 109, -1852, -1163, -17143, 140, -718, -1150, 33, 1397, -45, -205, 153, -1494, 20509, -51, -904, -599, 1915, 884, 504, -1819, -4487, 1252, -1259, -2200, -5601, -448, -686, 5778, 873, -4282, -533, 295, -450, 1422, 2393, 3267, -3911, 249, -3605, -3190, -1096, -2422, 274, -1918, 4070, -206, -432, 1919, -645, -275, 12954, 311, 1479, -2664, -852, -4809, 1102, -375, 20, 1659, -1179, 1199, 44, -5590, -1112, -566, -11369, -125, -871, 158, 1208, 265, -519, -405, 2439, -1129, 1827, -9461, 8548, -1606, 380, 4924, 662, 1314, -391, -2024, 827, -13381, -198, -142, 1600, 3329, 125, -672, -220, 557, 18642, 60, 7296, -10472, -712, 1188, 808, 64, 479, 555, 264, 394, -611, -810, -7943, -235, -6889, -1575, 1320, -381, -7414, 1740, -744, 369, -626, -6899, -2144, -593, 668, -351, -3756, -5143, -1814, 806, -475, 588, -507, -9088, -629, 154, -6945, -1105, 10658, -435, 384, -757, 1183, 3806, -747, -378, 535, 10224, 626, -866, -1931, -1484, 5818, -750, -9628, -250, 589, -653, -198, 104, -934, 1207, 46, 960, -1032, 4236, 4471, -2896, 1551, -7714, -1921, 746, -671, 5114, 5482, -522, -3344, -1905, -9220, -663, -1355, -611, 65, 1368, 628, -1276, -6780, -2623, -661, -117, -437, 5507, 3205, 928, 537, -9487, 80, -102, -538, -277, 863, -1421, 6054, 1227, 696, 3582, -508, -1757, 145, -1705, -1201, 4157, -3314, 2291, -834, 821, 552, -724, 513, -9730, -8944, 1913, 501, -216, 716, 2766, -823, 2535, 314, 1774, -3372, 235, 244, -1216, -710, 689, 6736, -52, 218, -8382, -444, 920, 569, -4890, -2050, -612, 1708, -481, -15500, -2878, -691, 538, -125, -81, -862, -10094, 12050, -1392, -326, 133, 61, -50, 715, -6662, -673, 10745, -596, 44, 3906, 247, -745, 4950, -210, 497, -1875, 8197, 2141, 1454, -23, -1480, 2184, -804, 5515, -1311, -8893, -2880, -3606, -282, -116, 8084, 618, -403, 1106, -14405, 1159, 229, 742, -184, -19445, -329, -747, -1240, 1487, -1670, -839, -77, -882, -10986, -2851, -24, -747, -3615, 1939, 1389, 132, 5367, 1355, 408, -1272, 11388, 153, 2708, -1503, 169, 7357, 51, -13586, -404, -304, 626, 163, -1814, -515, 445, 589, -1194, 770, 555, 246, -165, -21192, 184, -265, -1116, -485, -8107, -1992, -10805, -880, -1455, -15154, 2312, -1712, -11, -1899, -400, -2, 314, -318, -280, -658, -1066, -2584, 1027, 801, -11960, 1519, 8873, 465, -3229, 1801, -348, 749, 7, 1079, -2051, 11521, -831, -13425, -6315, -1135, 1088, 1056, -46, -1006, 374, -5065, 1163, -402, -50, -1459, 9586, 514, -1439, -638, -155, -5289, 8043, -612, 739, 1084, -60, 891, 786, -6, -1078, 2097, -5333, 3497, 23, -913, 1303, 957, -35, -6418, -146, -971, 2738, 9695, -1722, -2002, 905, -1749, -917, 122, 379, -325, -455, 230, 825, -137, -335, -96, -160, 390, 731, -2621, -5889, -3949, 5138, 839, -1190, -66, 961, -4600, 2345, 1607, -2448, -6653, -592, -106, -7619, -794, -1186, -12587, -11, -2224, -225, -2903, 534, 1355, -7002, 314, 494, 1950, -8545, -2531, -2438, -77, 886, -1851, 944, -10156, 3003, 1846, 1919, 2019, 471, 451, -436, -1012, -20121, 275, 98, 1776, 578, 96, -16, 1156, 3689, 7, -207, 920, 105, -58, -175, 163, 697, -407, -61, 1261, 1297, 5061, 5326, -1126, 516, 1208, -11108, 441, 7, -899, -19, -368, 438, -1911, 602, 716, 313, 853, 1448, -817, -1453, 1384, -4371, 1043, 1884, 1619, 2196, 10075, -1548, -1201, -796, 5228, 2657, 8244, -605, 422, -693, 3171, 657, 5438, -171, 633, 1579, -1718, -12265, 1083, -976, -293, -3802, -306, -668, 7818, -1340, -402, 2231, -4472 }, .shape44 = { -40, -282, 1366, -1173, -3484, 355, -1078, 3800, 4386, -35, -4192, 523, 1291, 678, 156, 2272, -1043, 1075, -1849, -314, -522, 392, 2098, -79, 473, -275, 2, 6398, 451, 94, 173, -431, 1115, -10788, 35, 1823, -3380, -97, -98, -350, -23, -1264, -308, 8948, -695, -79, 3520, 308, 340, -362, -547, 1207, -1182, -10392, -148, 3580, 481, -425, 862, 4894, 736, -152, -626, 23, -5853, 39, -143, 418, -103, -1457, -12826, -122, 283, -225, 10561, -153, 8872, -806, -51, 93, 420, -209, 345, -7661, -732, -48, 479, -225, 276, 13385, -12578, -1440, -265, -274, 1105, -3376, -691, -579, -972, 300, 349, 362, 722, -472, 185, 814, 14, 4746, 761, -336, 1691, 888, -1669, -18717, 827, -2605, 921, 155, 68, 112, -3032, -287, 414, -86, -62, -213, -106, 807, -619, 598, -178, 3104, -481, -1553, 1250, -8363, -686, -9608, 116, -47, 321, -89, 939, -35, -7995, -10159, -526, 145, 363, 2170, 1077, -1223, -738, 120, -408, -390, -80, -404, -1607, -10187, -6432, 961, 94, -1459, 489, 6641, 372, 1007, 5958, -834, 222, 51, 282, -1005, 4473, -8841, -73, -477, -557, 121, -165, -1195, 438, 139, -190, -4205, -4278, -4617, -7592, 40, -422, -459, 594, 7331, 164, 297, 2631, -9075, -78, 372, -6213, -1053, 182, -71, -386, -604, -11720, 552, -617, 413, 1292, 4, -485, 1162, 6051, -5168, -181, 1024, -630, -275, -4067, -8627, 1386, 970, -423, 2973, 2360, 363, -274, 410, 48, 768, 2958, -427, 86, 64, -128, -273, -182, -292, 868, 463, 73, -116, -6509, -5295, -37, 691, 344, -120, 168, 419, 494, -1175, 18896, -135, -376, -218, -453, -916, -1040, -22179, -846, -1005, 264, 159, 597, -952, -825, 393, -328, -14694, 371, -263, 740, 38, -1001, 1289, -668, 187, -155, 143, 683, 7133, -563, -8383, -291, 176, 75, 613, -6965, -11480, 324, -490, 586, 416, 762, 5777, 64, -47, -4124, -1196, -113, 701, -211, 2335, 130, 684, 7278, -158, -213, 297, 10845, -1439, -465, 17, -792, 6499, -10187, -444, -1416, 482, 636, 1472, 752, 157, -334, -3230, -19, -6747, 660, -3082, 4057, 6801, -19, 635, 19, 9807, 526, 126, 444, -190, -418, -26754, -202, 243, 597, 10, 345, 814, -330, 160, 344, 3986, 470, 459, 2387, -549, 11889, -1837, -30, 2608, 615, 2301, -771, -1589, -6935, 1321, 4287, 295, -558, -1503, -611, 2104, 411, -218, 1145, -426, 58, -102, 13, 7499, 476, -4032, -2237, -2658, -1943, 5268, 1039, 389, 7091, -22, 156, -186, 2432, -878, 305, -1726, 3209, 361, -1030, 505, 618, -262, -1877, 268, 757, 24, 24306, 102, 973, 142, -953, -1199, 116, -255, 5370, -347, -365, 937, -6939, -1189, -760, 531, -1759, -705, -557, -620, 1151, 250, 21629, -1532, -128, 1421, -211, 592, 5126, 197, -716, 1113, 5844, -266, -12, -813, 85, 994, -2106, -3915, 1402, 533, 521, -883, 87, -386, -2, -4350, 19790, -180, -363, 60, 101, -1717, 119, -381, 100, -565, 3264, 3052, 200, -7319, 26, 347, -482, 10609, -766, 526, -623, 3495, 339, -4406, -59, -213, 686, -603, 133, 99, 48, 1716, -1214, 1397, -2396, -384, -901, -3750, -660, -4314, 313, 192, 292, 259, -644, 176, 2099, 7961, -29, 642, -2970, 1792, -61, -4348, 578, 1867, -1868, 32, 5262, 137, 6109, 443, -176, 351, 400, 1874, -175, -4065, 697, 292, -744, 121, -5134, 6996, -198, 628, 1073, -599, -116, -17900, 647, -1049, -663, 1427, -94, 721, 311, 337, 1376, -2784, 3947, 1342, 1577, -406, -260, -10228, 109, 2358, 2437, 346, 1261, -308, -2094, 1682, 144, -675, 183, 428, -950, 1249, -1546, 33, -254, 681, -1264, -964, -310, 838, 100, -21952, -1484, -1564, 339, 298, 67, -338, 89, 709, 53, 258, -359, 2803, 1553, -312, -7993, -1627, 1189, 476, -123, 336, -767, -18522, 589, 942, -645, -381, -1913, -582, 55, -876, -509, 25143, 690, -787, -1136, 114, 162, 342, -231, -8742, 99, -646, -474, -1384, -110, -98, 8634, -14, -9676, -312, 358, 496, -676, -97, 1904, -2124, -66, -1868, 502, -513, -3244, 2079, -1476, 5440, -40, -381, 500, -238, -471, 12160, 248, -1005, -2886, 173, -3369, -355, -256, -117, -474, -1282, -355, 130, -4833, 31, -232, -12931, -826, 322, 839, 1537, 73, 226, -1888, -483, -2848, -190, 1271, 3597, -4514, -38, 6093, 347, -68, -415, -105, -1664, -11461, -110, -399, 389, -511, 935, -424, -1708, -1026, 23239, 298, 7363, -9206, -566, 259, -412, -1213, 335, 614, 928, 972, -1919, -407, 509, 303, -13762, -524, 10360, 1318, -2758, 2350, -106, -119, -68, -6155, -255, -448, -34, -64, -4382, 47, 635, -339, 406, -447, -445, -10592, 233, 160, -5515, -1333, 6755, -952, 172, -1260, -294, 3480, -352, -231, 415, 482, -498, -191, -2034, 7934, 7997, -688, -9503, 376, -228, -500, 222, -1021, -407, 261, 179, 622, 1217, -443, -763, -508, -719, -4509, 91, 449, -283, 91, -39, 961, -10148, 1596, -9161, -327, 221, -470, 676, 12, 1416, 984, -10988, -5500, -189, -727, 226, 4691, 688, 759, 930, -6444, -114, -539, -526, -21, -1218, 650, 6088, 419, 6185, -1200, 84, -1232, -34, -107, 60, 2248, 450, 1187, 1264, -181, 857, 2235, -2859, -13483, -192, -586, -207, -5569, 503, 3376, 1243, -700, 2119, -2186, -296, 896, 299, 177, 184, 1375, 2498, 161, 579, -3683, 443, -21, -186, -3474, 238, 274, 277, -325, -8325, -223, 125, 191, 333, -345, -1391, -7372, 11389, -1055, 4066, -1098, 87, -203, 443, 363, -959, 15395, 4016, -254, 1611, -168, -1070, 2709, -768, 506, -1245, 5821, 2499, 1564, 27, 85, 1989, -1092, 150, -972, 660, -33, 687, 545, -1564, 720, -196, -52, -1751, -25380, -1246, -615, 391, -512, -23289, 460, 360, -85, -723, -250, -163, -48, -921, -3988, 425, -1268, -1695, 3233, -1093, -1166, 198, 7602, 21, 354, 733, 12213, -347, 532, -427, 22, 2218, -578, -3382, -474, -625, 78, -4546, 863, -53, -357, -1529, 1014, 710, 1356, -430, -1633, -24823, 95, 26, 590, -591, -7833, -1355, -9771, -502, -907, -15433, 957, 463, 35, -496, 294, 2129, 1274, -160, -83, 531, -767, 285, 232, 5983, -6122, 1620, 4112, -239, -1733, -46, -1321, 467, 613, -3747, -2284, 13991, 373, -17357, -219, -80, -210, 1462, 37, -1692, 548, -5845, 420, 54, -350, -285, 1981, 262, -874, 2844, -435, -6305, 6449, 72, 631, -94, 96, -442, 1137, 89, 364, 3392, -3512, -387, 1055, 318, -1111, -6971, 344, -9105, -96, -9362, 190, -225, 370, 161, -73, -1830, 174, 48, -518, -3486, 137, -235, 810, 23, 80, -642, -35, -316, -269, -373, -2413, -933, 2525, 267, -508, -200, 422, -3470, -1273, 640, -1956, 139, 394, -1043, -11008, -158, -1089, -2023, 202, -979, -744, -159, -392, -37, -1679, 2183, 1365, -2883, -4752, -2255, 109, 1660, -613, -511, 1284, -7331, 947, 7009, -2072, -321, -936, -551, -875, 160, -27027, 654, 265, 164, 376, 726, -149, 2813, -94, 5728, 702, -1118, -2555, 217, -186, -107, 146, -83, -62, -196, 708, 146, 3729, -416, 212, -163, -7861, 347, 83, -1079, -994, 271, -1054, -1647, 139, -20, 354, 1298, -3420, 1130, 161, 475, -3913, 468, 23, 285, -1699, 8234, -947, 222, 260, 4276, -341, 6387, 21, 490, -1908, -1654, -60, 2471, 733, -135, 109, -1136, -14756, 4922, 1165, 149, -3976, -66, -594, 6181, -110, 292, 1129, -591 }, .lsp08 = { 0.2702, 0.5096, 0.6437, 0.7672, 0.9639, 1.0696, 1.2625, 1.5789, 1.9285, 2.2383, 2.5129, 2.8470, 0.1740, 0.3677, 0.6082, 0.8387, 1.1084, 1.3721, 1.6362, 1.8733, 2.0640, 2.3442, 2.6087, 2.8548, 0.1536, 0.3279, 0.5143, 0.6859, 0.9763, 1.2744, 1.5605, 1.8566, 2.1007, 2.3450, 2.6075, 2.8850, 0.2075, 0.4533, 0.7709, 1.0377, 1.2953, 1.5132, 1.7826, 2.0351, 2.2590, 2.4996, 2.6795, 2.8748, 0.1393, 0.2453, 0.3754, 0.5453, 0.8148, 1.1289, 1.4389, 1.7592, 2.0353, 2.3215, 2.5934, 2.8588, 0.1250, 0.3627, 0.7613, 1.1380, 1.4163, 1.5565, 1.6920, 1.8130, 1.8678, 2.0427, 2.4318, 2.8544, 0.2256, 0.4223, 0.6452, 0.8599, 1.0673, 1.3118, 1.5486, 1.8366, 2.0759, 2.3026, 2.5284, 2.8030, 0.2304, 0.4404, 0.6891, 0.8964, 1.1510, 1.4202, 1.6483, 1.8580, 2.1181, 2.3686, 2.6078, 2.9128, 0.2230, 0.3816, 0.5520, 0.6062, 0.7909, 1.0988, 1.4330, 1.7846, 2.0713, 2.3457, 2.6048, 2.8708, 0.2447, 0.5800, 0.8249, 0.9905, 1.1721, 1.3990, 1.6694, 1.9064, 2.1307, 2.4255, 2.6815, 2.9117, 0.1974, 0.3812, 0.5802, 0.7759, 0.9280, 1.1547, 1.4170, 1.6369, 1.8890, 2.2587, 2.5626, 2.8239, 0.1209, 0.2510, 0.4841, 0.8048, 1.1197, 1.3563, 1.6073, 1.8926, 2.1350, 2.3669, 2.6291, 2.8985, 0.2352, 0.4347, 0.6582, 0.8178, 0.9548, 1.1654, 1.4942, 1.8812, 2.1703, 2.3779, 2.6412, 2.8871, 0.2091, 0.4084, 0.6730, 0.9151, 1.1259, 1.3262, 1.5937, 1.8129, 2.0237, 2.3317, 2.5778, 2.8620, 0.1167, 0.2406, 0.4520, 0.7298, 0.9848, 1.2448, 1.5137, 1.7874, 2.0280, 2.3020, 2.5914, 2.8794, 0.3003, 0.4966, 0.6520, 0.8505, 1.1600, 1.3981, 1.5805, 1.8346, 2.0757, 2.3102, 2.5760, 2.8499, 0.2451, 0.4163, 0.5960, 0.7805, 0.9507, 1.2438, 1.5587, 1.8581, 2.0735, 2.3198, 2.5704, 2.8220, 0.3112, 0.5517, 0.7032, 0.8528, 1.1489, 1.4257, 1.6848, 1.9388, 2.1577, 2.4265, 2.6678, 2.9051, 0.2249, 0.3897, 0.5559, 0.7473, 1.0158, 1.3581, 1.6914, 1.9930, 2.1843, 2.3534, 2.5512, 2.8065, 0.2600, 0.4574, 0.7349, 0.9691, 1.1696, 1.3848, 1.6335, 1.9021, 2.1174, 2.3481, 2.5902, 2.8390, 0.2246, 0.3372, 0.4560, 0.5249, 0.7056, 1.0273, 1.3810, 1.7132, 1.9819, 2.2574, 2.5410, 2.8491, 0.1419, 0.4834, 0.8835, 1.1453, 1.2839, 1.4224, 1.5593, 1.7877, 2.1285, 2.4070, 2.6043, 2.8511, 0.1886, 0.3677, 0.5617, 0.8099, 1.1277, 1.3841, 1.5804, 1.8136, 2.0307, 2.2805, 2.5399, 2.8322, 0.2351, 0.4151, 0.6675, 0.8713, 1.0464, 1.3292, 1.6586, 1.9281, 2.1355, 2.3495, 2.6222, 2.8782, 0.2700, 0.4489, 0.6206, 0.7121, 0.7737, 0.9848, 1.3658, 1.7433, 2.0139, 2.2243, 2.4806, 2.8175, 0.2479, 0.4425, 0.6490, 0.8745, 1.1161, 1.3849, 1.6773, 1.9566, 2.1491, 2.3624, 2.5685, 2.8114, 0.2035, 0.3701, 0.5567, 0.7953, 1.0082, 1.2758, 1.5373, 1.7822, 2.0175, 2.2601, 2.4759, 2.7771, 0.1856, 0.3461, 0.5998, 0.9041, 1.2383, 1.4612, 1.6667, 1.9305, 2.1617, 2.4107, 2.6477, 2.8656, 0.2107, 0.3715, 0.5289, 0.6651, 0.8420, 1.1168, 1.4401, 1.7230, 1.9901, 2.2687, 2.5452, 2.8655, 0.1218, 0.2999, 0.6348, 0.9482, 1.2745, 1.5876, 1.9129, 2.2348, 2.4020, 2.4922, 2.6351, 2.8357, 0.1617, 0.3483, 0.5869, 0.8163, 1.0366, 1.2344, 1.4609, 1.7029, 1.9476, 2.2337, 2.5258, 2.8442, 0.2505, 0.4894, 0.7510, 0.9152, 1.0845, 1.3657, 1.6528, 1.8346, 2.0160, 2.2811, 2.5338, 2.8136, 0.0947, 0.1158, 0.0578, -0.0337, -0.0066, 0.0104, -0.0447, -0.0505, -0.0778, -0.0293, 0.0251, -0.0143, 0.0349, -0.0227, -0.0909, 0.0523, 0.0325, -0.0410, -0.1045, -0.0899, -0.0009, 0.0075, -0.0575, -0.0855, -0.0129, 0.0575, 0.0597, 0.0391, 0.0371, -0.0184, -0.0083, 0.0287, 0.0143, 0.0167, 0.0120, -0.0168, 0.0452, 0.0223, -0.0352, 0.0119, -0.0496, -0.0965, -0.0661, -0.0072, 0.1099, 0.0843, -0.0087, -0.0478, -0.0128, -0.0120, -0.0004, 0.0731, 0.1047, 0.0630, 0.0196, -0.0103, -0.0399, -0.0986, -0.0912, -0.0390, -0.0247, -0.0694, -0.0749, -0.0066, 0.0223, 0.0634, 0.0343, -0.0134, 0.0727, 0.0241, 0.0066, 0.0437, 0.0610, 0.0364, 0.0248, -0.0358, -0.0686, -0.0104, 0.0426, 0.0088, -0.0137, -0.0165, 0.0671, 0.0815, -0.0863, -0.0644, -0.0088, 0.0023, 0.0482, 0.1174, 0.1270, 0.0594, 0.0165, 0.0949, 0.1098, 0.0137, 0.4951, 0.4999, 0.4958, 0.4907, 0.4984, 0.4965, 0.4958, 0.4996, 0.4987, 0.4958, 0.4986, 0.4977, 0.2841, 0.2186, 0.1474, 0.1687, 0.2217, 0.2632, 0.2706, 0.2624, 0.2162, 0.2453, 0.2460, 0.2531 }, .lsp11 = { 0.1103, 0.3862, 0.6863, 0.8447, 0.9231, 1.0261, 1.1248, 1.4057, 1.6621, 1.8010, 1.8692, 2.0704, 2.3490, 2.6060, 2.7539, 2.8977, 0.1273, 0.2407, 0.3812, 0.6004, 0.7767, 0.9383, 1.1344, 1.3351, 1.5233, 1.7262, 1.9466, 2.1739, 2.3495, 2.5162, 2.7164, 2.9202, 0.2010, 0.3330, 0.4488, 0.6465, 0.8046, 0.9889, 1.1479, 1.2964, 1.4770, 1.6606, 1.8789, 2.1155, 2.3287, 2.5199, 2.7101, 2.9119, 0.1168, 0.2197, 0.3279, 0.4691, 0.6268, 0.8251, 1.0533, 1.2714, 1.4712, 1.6762, 1.8831, 2.1114, 2.3230, 2.5297, 2.7365, 2.9270, 0.1405, 0.3109, 0.4986, 0.6891, 0.8634, 1.0583, 1.2594, 1.4349, 1.6232, 1.8116, 1.9905, 2.1935, 2.3799, 2.5656, 2.7661, 2.9486, 0.1703, 0.3057, 0.4403, 0.5225, 0.5969, 0.8110, 1.0729, 1.3215, 1.5407, 1.7381, 1.9477, 2.1680, 2.3586, 2.5612, 2.7630, 2.9410, 0.1128, 0.2628, 0.4523, 0.6495, 0.8176, 0.9816, 1.1746, 1.3710, 1.5568, 1.7518, 1.9497, 2.1452, 2.3346, 2.5389, 2.7362, 2.9264, 0.1809, 0.3287, 0.5205, 0.7264, 0.9298, 1.1217, 1.2970, 1.4894, 1.6874, 1.8493, 2.0576, 2.2382, 2.4097, 2.6041, 2.7796, 2.9389, 0.2502, 0.4709, 0.6892, 0.8346, 0.9209, 1.0455, 1.2399, 1.4616, 1.6463, 1.8380, 2.0475, 2.2397, 2.4665, 2.6550, 2.7701, 2.8895, 0.1040, 0.2340, 0.3964, 0.5740, 0.7764, 0.9941, 1.2000, 1.4014, 1.6024, 1.7974, 1.9939, 2.1959, 2.3783, 2.5663, 2.7613, 2.9484, 0.1912, 0.3393, 0.4743, 0.6313, 0.8014, 0.9879, 1.1855, 1.3922, 1.5678, 1.7289, 1.9271, 2.1165, 2.3089, 2.5414, 2.7448, 2.9269, 0.0965, 0.2025, 0.3398, 0.4990, 0.6934, 0.9386, 1.1730, 1.3766, 1.5783, 1.7783, 1.9790, 2.1831, 2.3670, 2.5578, 2.7641, 2.9516, 0.2126, 0.3652, 0.5545, 0.7170, 0.8674, 1.0640, 1.2558, 1.4061, 1.5904, 1.8095, 1.9760, 2.1505, 2.3549, 2.5575, 2.7023, 2.8877, 0.1827, 0.3426, 0.4894, 0.6488, 0.7960, 0.9535, 1.1217, 1.2798, 1.4566, 1.6453, 1.8044, 2.0042, 2.2379, 2.4611, 2.6697, 2.8966, 0.2034, 0.3822, 0.5231, 0.6960, 0.9200, 1.0394, 1.1616, 1.3772, 1.5493, 1.7330, 1.9646, 2.1233, 2.3334, 2.5361, 2.7087, 2.9470, 0.1050, 0.2060, 0.3705, 0.5998, 0.8337, 1.0577, 1.2559, 1.4327, 1.6334, 1.8165, 1.9853, 2.2058, 2.4063, 2.5818, 2.7625, 2.9458, 0.1419, 0.4053, 0.6660, 0.8911, 1.0405, 1.1547, 1.2506, 1.3926, 1.5669, 1.7527, 1.9694, 2.2054, 2.3889, 2.5743, 2.7586, 2.9174, 0.1514, 0.2825, 0.4309, 0.5772, 0.7470, 0.9703, 1.1462, 1.3316, 1.5321, 1.7259, 1.9282, 2.1266, 2.3106, 2.5064, 2.7067, 2.9094, 0.1693, 0.3156, 0.4878, 0.6635, 0.8206, 0.9569, 1.1154, 1.3064, 1.5109, 1.7184, 1.9179, 2.1036, 2.2763, 2.4820, 2.6949, 2.9105, 0.1432, 0.2718, 0.4241, 0.5564, 0.6939, 0.9011, 1.1582, 1.3948, 1.6181, 1.8024, 1.9814, 2.1740, 2.3459, 2.5456, 2.7491, 2.9307, 0.2294, 0.3857, 0.5590, 0.7434, 0.9189, 1.0941, 1.2740, 1.4456, 1.6178, 1.7994, 1.9689, 2.1644, 2.3525, 2.5385, 2.7468, 2.9405, 0.1667, 0.3109, 0.4612, 0.6032, 0.7375, 0.8866, 1.0840, 1.3053, 1.4982, 1.7044, 1.9146, 2.1117, 2.2942, 2.4983, 2.7084, 2.9132, 0.1810, 0.3205, 0.4696, 0.6231, 0.7641, 0.9959, 1.2427, 1.4361, 1.5889, 1.7544, 1.9083, 2.0733, 2.2457, 2.4461, 2.6793, 2.9098, 0.1164, 0.3753, 0.6068, 0.7503, 1.0100, 1.2131, 1.3793, 1.5302, 1.6300, 1.7950, 1.9057, 2.1031, 2.3830, 2.5745, 2.6949, 2.8779, 0.1571, 0.4378, 0.6735, 0.8312, 0.8944, 0.9818, 1.1622, 1.4094, 1.6423, 1.8066, 1.9258, 2.1838, 2.4363, 2.6279, 2.7358, 2.8790, 0.1398, 0.2686, 0.4248, 0.6156, 0.7870, 1.0035, 1.2012, 1.3689, 1.5363, 1.7398, 1.9604, 2.1619, 2.3345, 2.5097, 2.7271, 2.9368, 0.1913, 0.3338, 0.4987, 0.6446, 0.7852, 1.0163, 1.1886, 1.3610, 1.5379, 1.7230, 1.8880, 2.0862, 2.2960, 2.4928, 2.7122, 2.9151, 0.0908, 0.1752, 0.2899, 0.5365, 0.7761, 1.0100, 1.2124, 1.4060, 1.6019, 1.8010, 1.9774, 2.1905, 2.3733, 2.5623, 2.7660, 2.9565, 0.1773, 0.3179, 0.4925, 0.6864, 0.8452, 0.9897, 1.1860, 1.3722, 1.5515, 1.7658, 1.9802, 2.1819, 2.3620, 2.5442, 2.7250, 2.9220, 0.1286, 0.2341, 0.3689, 0.5364, 0.7176, 0.9350, 1.1083, 1.2943, 1.4974, 1.7059, 1.9047, 2.1145, 2.3242, 2.5361, 2.7453, 2.9329, 0.2273, 0.3834, 0.5565, 0.7192, 0.8431, 0.9962, 1.1763, 1.3571, 1.5774, 1.7419, 1.9202, 2.1131, 2.2919, 2.4898, 2.6895, 2.9180, 0.1775, 0.3058, 0.4274, 0.6023, 0.8151, 1.0734, 1.3211, 1.5178, 1.6706, 1.8154, 1.9686, 2.1537, 2.3461, 2.5276, 2.7181, 2.9121, 0.1653, 0.4304, 0.6361, 0.7824, 0.9183, 1.0452, 1.2071, 1.4077, 1.6206, 1.8299, 2.0089, 2.1948, 2.3900, 2.5982, 2.7844, 2.9487, 0.1492, 0.2609, 0.3820, 0.5485, 0.7243, 0.9319, 1.1538, 1.3579, 1.5266, 1.7002, 1.8873, 2.1016, 2.3175, 2.5221, 2.7241, 2.9243, 0.2074, 0.3781, 0.5209, 0.6869, 0.8577, 0.9875, 1.1849, 1.3568, 1.4907, 1.7335, 1.8902, 2.1224, 2.3099, 2.4918, 2.7023, 2.8765, 0.1359, 0.2254, 0.3286, 0.4432, 0.6586, 0.8964, 1.1125, 1.3523, 1.5626, 1.7579, 1.9846, 2.1905, 2.3548, 2.5542, 2.7663, 2.9346, 0.1430, 0.2966, 0.4685, 0.6493, 0.8315, 1.0304, 1.2220, 1.4082, 1.5995, 1.7888, 1.9774, 2.1737, 2.3607, 2.5577, 2.7558, 2.9405, 0.1477, 0.2694, 0.4056, 0.5626, 0.7051, 0.8647, 1.0491, 1.2488, 1.4814, 1.7072, 1.9150, 2.1147, 2.3038, 2.5144, 2.7184, 2.9202, 0.1690, 0.3033, 0.4580, 0.6686, 0.8536, 1.0293, 1.2124, 1.3998, 1.5718, 1.7607, 1.9580, 2.1245, 2.2971, 2.4762, 2.6896, 2.9177, 0.1092, 0.2779, 0.4853, 0.6880, 0.9011, 1.0953, 1.2752, 1.4618, 1.6623, 1.8484, 2.0264, 2.2152, 2.4017, 2.5835, 2.7671, 2.9436, 0.1497, 0.3637, 0.6014, 0.8032, 0.9963, 1.1835, 1.3741, 1.5698, 1.7382, 1.9094, 2.0710, 2.2392, 2.4082, 2.5926, 2.7762, 2.9536, 0.1434, 0.2492, 0.3966, 0.5934, 0.8033, 1.0657, 1.2796, 1.4276, 1.5745, 1.7833, 1.9288, 2.1247, 2.3543, 2.5412, 2.7049, 2.8872, 0.1612, 0.2926, 0.4574, 0.6387, 0.8265, 1.0180, 1.1808, 1.3526, 1.5564, 1.7536, 1.9187, 2.1192, 2.3149, 2.5006, 2.7101, 2.9217, 0.0828, 0.1863, 0.3235, 0.5050, 0.7250, 0.9867, 1.2093, 1.3941, 1.5980, 1.7932, 1.9809, 2.1894, 2.3918, 2.5773, 2.7540, 2.9329, 0.2001, 0.3655, 0.5290, 0.6761, 0.8027, 0.9972, 1.2090, 1.4255, 1.6085, 1.7825, 1.9804, 2.1681, 2.3457, 2.5325, 2.7319, 2.9196, 0.1505, 0.2767, 0.4254, 0.6054, 0.7821, 0.9567, 1.1294, 1.3080, 1.4984, 1.6954, 1.8666, 2.0736, 2.2875, 2.4969, 2.7072, 2.9163, 0.1589, 0.4151, 0.5749, 0.6651, 0.8061, 1.0470, 1.2616, 1.3690, 1.4985, 1.7808, 1.9825, 2.1068, 2.2751, 2.5448, 2.7133, 2.8689, 0.0916, 0.1846, 0.3788, 0.6329, 0.8774, 1.0687, 1.2653, 1.4561, 1.6573, 1.8449, 2.0402, 2.2254, 2.3968, 2.5861, 2.7792, 2.9508, 0.2282, 0.4159, 0.5834, 0.6899, 0.8108, 1.0321, 1.2795, 1.5262, 1.6936, 1.8469, 2.0922, 2.2607, 2.3795, 2.5301, 2.7386, 2.9530, 0.1651, 0.3004, 0.4555, 0.6179, 0.7891, 0.9584, 1.1372, 1.3707, 1.5951, 1.7880, 1.9434, 2.1465, 2.3311, 2.5081, 2.6977, 2.8970, 0.1279, 0.3828, 0.6330, 0.8323, 0.9652, 1.1175, 1.2319, 1.3511, 1.5115, 1.6392, 1.7835, 1.9558, 2.2008, 2.4635, 2.6910, 2.9058, 0.1193, 0.2185, 0.3521, 0.5311, 0.7378, 0.9239, 1.1105, 1.3217, 1.5362, 1.7504, 1.9536, 2.1627, 2.3560, 2.5506, 2.7548, 2.9453, 0.1806, 0.3432, 0.4981, 0.6948, 0.8928, 1.0527, 1.2467, 1.4140, 1.6326, 1.7950, 1.9935, 2.1969, 2.3512, 2.5682, 2.7445, 2.9277, 0.1846, 0.3112, 0.4568, 0.5891, 0.7317, 0.8493, 1.0204, 1.2022, 1.3688, 1.6020, 1.8428, 2.0710, 2.2725, 2.4879, 2.7057, 2.9160, 0.0880, 0.2514, 0.5332, 0.7272, 0.8906, 1.1354, 1.3199, 1.4941, 1.6010, 1.7151, 1.8712, 2.0643, 2.2755, 2.5375, 2.7054, 2.8891, 0.1382, 0.2833, 0.4658, 0.6897, 0.9071, 1.0716, 1.2469, 1.4143, 1.5910, 1.7947, 1.9805, 2.1581, 2.3338, 2.5215, 2.7292, 2.9211, 0.1061, 0.3494, 0.6327, 0.8570, 0.9748, 1.0560, 1.1529, 1.3250, 1.6032, 1.8340, 1.9711, 2.1157, 2.3011, 2.5464, 2.8078, 2.9803, 0.1603, 0.2839, 0.4307, 0.5980, 0.7980, 1.0399, 1.1971, 1.3524, 1.5715, 1.7838, 1.9468, 2.1498, 2.3627, 2.5514, 2.7327, 2.9148, 0.1691, 0.3117, 0.4796, 0.6895, 0.8732, 1.0164, 1.1916, 1.3707, 1.5384, 1.7202, 1.8857, 2.0672, 2.2487, 2.4593, 2.6789, 2.8940, 0.0965, 0.1702, 0.3191, 0.5721, 0.8100, 1.0241, 1.2272, 1.4196, 1.6093, 1.8057, 1.9884, 2.2037, 2.3925, 2.5805, 2.7578, 2.9366, 0.1950, 0.3519, 0.5272, 0.6973, 0.8732, 1.0656, 1.2112, 1.3959, 1.6116, 1.7821, 1.9445, 2.1592, 2.3348, 2.5142, 2.7440, 2.9297, 0.1388, 0.2557, 0.4120, 0.5727, 0.7354, 0.9196, 1.0985, 1.2805, 1.4643, 1.6535, 1.8340, 2.0546, 2.2758, 2.4778, 2.6921, 2.9122, 0.1823, 0.3336, 0.4957, 0.6771, 0.8563, 1.0137, 1.2299, 1.3849, 1.5718, 1.7667, 1.9193, 2.1326, 2.3135, 2.5268, 2.7133, 2.8998, 0.0790, 0.1901, 0.4083, 0.6456, 0.8463, 1.0285, 1.2297, 1.4181, 1.6159, 1.8056, 1.9971, 2.1912, 2.3816, 2.5746, 2.7692, 2.9497, 0.0049, 0.0116, 0.0045, 0.0039, -0.0010, -0.0122, -0.0205, -0.0034, -0.0140, -0.0041, 0.0191, -0.0322, 0.0002, -0.0124, -0.0269, 0.0059, 0.0586, 0.0339, -0.0389, -0.0319, -0.0079, -0.0205, -0.0363, -0.0211, 0.0241, 0.0595, 0.0469, 0.0283, 0.0176, -0.0183, -0.0173, -0.0004, 0.0024, 0.0145, 0.0534, 0.0197, -0.0065, -0.0067, 0.0133, 0.0358, -0.0104, -0.0386, -0.0109, -0.0078, 0.0275, 0.0565, 0.0251, -0.0027, -0.0053, 0.0171, 0.0088, 0.0495, 0.0141, 0.0039, -0.0445, -0.0426, -0.0184, -0.0280, -0.0223, 0.0039, -0.0171, -0.0606, -0.0786, -0.0430, 0.0544, 0.0595, 0.0320, -0.0012, 0.0108, 0.0185, 0.0066, 0.0408, 0.0552, -0.0073, -0.0247, -0.0480, -0.0288, 0.0186, 0.0212, -0.0013, 0.0403, 0.0598, 0.0690, 0.0516, -0.0298, -0.0177, 0.0278, 0.0168, -0.0106, 0.0251, 0.0386, 0.0331, -0.0052, 0.0133, 0.0291, -0.0158, -0.0329, -0.0367, 0.0287, 0.0462, -0.0176, 0.0049, 0.0242, -0.0034, 0.0135, 0.0086, -0.0149, 0.0241, 0.0504, 0.0246, -0.0273, -0.0369, -0.0108, -0.0449, -0.0625, -0.0414, -0.0292, -0.0571, -0.0440, -0.0088, 0.0098, 0.0009, -0.0004, 0.0007, -0.0314, -0.0208, -0.0138, -0.0277, -0.0044, 0.0522, 0.0315, -0.0270, -0.0277, -0.0256, -0.0103, -0.0201, -0.0287, -0.0279, -0.0182, 0.0472, 0.0613, 0.0450, 0.0413, 0.0333, 0.0444, 0.0223, 0.0061, 0.0316, 0.0321, 0.0501, 0.0460, 0.0250, 0.0227, 0.0235, 0.0099, 0.0185, -0.0347, -0.0684, -0.0189, 0.0242, -0.0190, -0.0273, -0.0012, -0.0253, 0.0293, -0.0231, -0.0219, -0.0010, 0.0153, 0.0128, -0.0166, -0.0435, -0.0417, -0.0121, -0.0351, -0.0390, 0.0077, -0.0278, -0.0355, 0.0092, -0.0063, 0.0005, 0.0216, 0.0461, 0.0538, 0.0451, 0.0298, -0.0130, 0.0058, 0.0206, 0.0471, 0.0499, 0.0280, 0.0086, -0.0007, -0.0317, 0.0259, 0.0176, 0.0043, 0.0212, 0.0138, 0.0106, 0.0220, -0.0025, 0.0050, 0.0122, -0.0051, -0.0086, -0.0472, -0.0005, 0.0193, 0.0032, 0.0246, 0.0222, 0.0090, -0.0320, -0.0713, -0.0526, -0.0151, -0.0440, -0.0648, -0.0466, -0.0092, 0.0115, -0.0129, 0.0053, -0.0344, -0.0385, 0.0392, 0.0599, 0.0414, 0.0165, -0.0098, -0.0320, -0.0261, -0.0055, -0.0139, -0.0110, 0.0084, 0.0172, -0.0492, -0.0537, -0.0320, -0.0036, 0.0265, 0.0385, 0.0064, -0.0280, -0.0230, 0.0134, 0.0241, 0.0106, 0.0387, 0.0105, 0.0068, 0.0260, 0.4940, 0.4911, 0.4849, 0.4820, 0.4837, 0.4839, 0.4824, 0.4799, 0.4812, 0.4782, 0.4788, 0.4711, 0.4706, 0.4671, 0.4601, 0.4578, 0.2954, 0.2121, 0.1859, 0.1958, 0.1474, 0.1086, 0.1351, 0.1362, 0.1486, 0.1342, 0.1215, 0.1423, 0.1634, 0.1588, 0.1539, 0.1857 }, .lsp16 = { 0.1813, 0.3911, 0.6301, 0.8012, 1.0057, 1.2041, 1.4271, 1.6943, 1.9402, 2.1733, 2.3521, 2.4989, 2.5839, 2.6846, 2.7634, 2.8950, 0.1311, 0.3183, 0.4659, 0.5601, 0.6658, 0.7828, 1.0065, 1.2717, 1.5185, 1.7339, 1.9530, 2.2189, 2.3739, 2.4991, 2.6984, 2.9256, 0.1627, 0.4519, 0.6323, 0.7012, 0.7848, 0.9801, 1.1810, 1.3222, 1.5413, 1.8129, 1.9338, 2.0809, 2.3180, 2.5189, 2.7066, 2.9514, 0.1475, 0.2447, 0.4240, 0.5669, 0.7872, 0.9838, 1.1823, 1.3814, 1.5358, 1.6820, 1.8794, 2.1419, 2.4132, 2.6112, 2.7911, 2.9511, 0.1224, 0.2876, 0.5013, 0.6985, 0.8902, 1.0901, 1.2835, 1.4768, 1.6596, 1.8538, 2.0467, 2.2304, 2.4124, 2.5942, 2.7729, 2.9531, 0.1741, 0.3034, 0.4677, 0.5879, 0.7258, 0.9648, 1.1417, 1.3220, 1.5081, 1.7151, 1.9212, 2.1286, 2.3208, 2.4938, 2.6765, 2.8891, 0.1657, 0.3174, 0.4907, 0.6559, 0.8295, 1.0254, 1.2071, 1.3880, 1.5737, 1.7845, 1.9027, 2.1139, 2.3323, 2.5157, 2.7323, 2.9015, 0.1592, 0.2758, 0.4417, 0.6315, 0.8257, 0.9873, 1.1277, 1.2830, 1.4337, 1.6315, 1.8899, 2.1356, 2.3572, 2.5632, 2.7468, 2.9420, 0.1524, 0.4325, 0.5931, 0.7036, 0.7696, 0.8923, 1.1739, 1.4773, 1.6609, 1.7911, 1.9666, 2.1972, 2.3754, 2.5045, 2.6613, 2.8882, 0.2130, 0.3013, 0.3721, 0.4257, 0.5079, 0.7015, 0.9815, 1.2554, 1.4648, 1.6966, 1.9138, 2.1075, 2.3318, 2.5292, 2.7453, 2.9347, 0.1142, 0.3748, 0.6205, 0.7642, 0.8121, 0.9022, 0.9843, 1.1558, 1.4467, 1.7422, 1.9574, 2.1302, 2.3812, 2.5898, 2.7720, 2.9583, 0.1255, 0.2339, 0.3570, 0.5323, 0.7458, 1.0003, 1.1729, 1.3567, 1.5217, 1.6977, 1.8924, 2.0942, 2.3145, 2.5408, 2.7553, 2.9337, 0.1316, 0.2289, 0.4327, 0.6663, 0.8509, 0.9994, 1.1697, 1.3804, 1.5609, 1.6903, 1.8572, 2.1019, 2.3687, 2.5789, 2.7715, 2.9472, 0.1502, 0.2546, 0.3883, 0.5333, 0.6976, 0.9163, 1.1071, 1.3364, 1.5420, 1.7525, 1.8948, 2.0839, 2.2819, 2.4651, 2.6875, 2.8987, 0.1593, 0.3014, 0.4573, 0.6354, 0.8157, 0.9805, 1.1783, 1.3747, 1.5678, 1.7326, 1.9286, 2.1340, 2.3253, 2.5280, 2.7180, 2.9298, 0.1811, 0.3167, 0.4655, 0.6507, 0.8198, 1.0075, 1.1892, 1.3743, 1.5227, 1.7090, 1.8849, 2.0743, 2.2750, 2.4830, 2.6896, 2.8953, 0.1846, 0.3577, 0.5315, 0.7290, 0.9176, 1.1016, 1.2654, 1.4525, 1.6315, 1.8268, 2.0238, 2.1934, 2.3868, 2.5753, 2.7682, 2.9469, 0.0876, 0.1439, 0.2048, 0.3654, 0.6281, 0.8853, 1.0907, 1.2992, 1.5227, 1.7373, 1.9395, 2.1419, 2.3488, 2.5486, 2.7466, 2.9348, 0.1391, 0.4170, 0.6561, 0.7953, 0.8734, 0.9986, 1.1870, 1.4520, 1.6042, 1.7910, 2.0135, 2.1870, 2.3358, 2.5066, 2.7409, 2.9955, 0.0804, 0.1355, 0.2599, 0.4998, 0.7408, 0.9474, 1.1276, 1.3428, 1.5556, 1.7712, 1.9699, 2.1535, 2.3605, 2.5548, 2.7489, 2.9325, 0.1304, 0.3087, 0.4979, 0.6584, 0.8414, 1.0329, 1.2244, 1.4189, 1.6118, 1.8200, 1.9985, 2.1893, 2.3915, 2.5794, 2.7647, 2.9344, 0.1895, 0.2849, 0.3705, 0.4126, 0.6265, 0.9207, 1.1774, 1.3762, 1.5757, 1.7728, 1.9568, 2.1662, 2.3615, 2.5575, 2.7561, 2.9416, 0.1800, 0.3078, 0.4805, 0.6796, 0.8503, 1.0046, 1.1703, 1.3269, 1.4862, 1.6502, 1.8454, 2.0873, 2.3175, 2.5356, 2.7516, 2.9469, 0.1950, 0.3233, 0.4568, 0.5940, 0.7589, 0.9978, 1.1701, 1.3383, 1.5017, 1.6565, 1.8243, 2.0605, 2.2938, 2.5147, 2.7419, 2.9396, 0.2531, 0.4391, 0.5790, 0.7170, 0.8998, 1.1430, 1.3577, 1.5326, 1.6328, 1.7627, 1.9726, 2.1762, 2.3563, 2.5478, 2.7385, 2.9067, 0.1805, 0.2788, 0.3591, 0.3881, 0.5441, 0.8055, 1.0766, 1.3165, 1.5316, 1.7508, 1.9477, 2.1374, 2.3438, 2.5484, 2.7501, 2.9410, 0.2044, 0.3671, 0.5396, 0.7042, 0.8582, 0.9831, 1.1261, 1.3194, 1.4769, 1.6979, 1.8717, 2.0463, 2.2620, 2.4739, 2.7054, 2.9208, 0.1048, 0.2175, 0.4206, 0.5923, 0.7483, 0.9400, 1.1356, 1.3799, 1.5958, 1.7320, 1.8984, 2.1296, 2.3594, 2.5492, 2.7387, 2.9305, 0.0842, 0.1729, 0.3951, 0.6447, 0.8688, 1.0605, 1.2472, 1.4330, 1.6232, 1.8144, 2.0216, 2.1915, 2.3878, 2.5763, 2.7685, 2.9464, 0.1461, 0.2593, 0.4105, 0.5677, 0.7328, 0.8919, 1.0484, 1.2302, 1.4386, 1.6635, 1.8873, 2.1024, 2.3116, 2.5268, 2.7273, 2.9269, 0.1503, 0.3108, 0.4756, 0.6731, 0.8600, 1.0233, 1.2115, 1.3971, 1.5915, 1.7892, 1.9517, 2.1603, 2.3487, 2.5460, 2.7308, 2.8998, 0.2163, 0.3669, 0.5125, 0.6709, 0.8143, 0.9930, 1.2095, 1.4205, 1.6176, 1.7112, 1.8398, 2.0896, 2.3513, 2.5290, 2.6667, 2.8960, 0.2133, 0.4382, 0.6287, 0.8702, 1.1088, 1.3749, 1.6062, 1.7446, 1.8333, 1.9122, 1.9614, 2.0669, 2.1789, 2.3449, 2.6038, 2.8849, 0.1598, 0.2719, 0.3877, 0.4815, 0.5926, 0.7795, 1.0449, 1.3045, 1.5210, 1.7391, 1.9462, 2.1397, 2.3553, 2.5458, 2.7540, 2.9392, 0.2918, 0.5607, 0.6801, 0.7404, 0.8285, 0.9431, 1.1579, 1.4080, 1.6332, 1.8472, 1.9738, 2.0771, 2.2890, 2.5178, 2.7445, 2.9830, 0.1664, 0.2842, 0.3965, 0.5463, 0.8162, 1.0346, 1.1849, 1.3446, 1.5122, 1.7563, 1.9960, 2.2002, 2.3796, 2.5689, 2.7712, 2.9550, 0.0911, 0.2397, 0.5052, 0.7868, 1.0299, 1.1311, 1.2244, 1.3333, 1.4395, 1.6790, 1.9369, 2.1717, 2.3689, 2.5538, 2.7340, 2.9326, 0.1647, 0.2931, 0.3836, 0.4978, 0.6255, 0.9243, 1.1339, 1.3001, 1.5269, 1.8010, 1.9715, 2.1419, 2.3784, 2.5503, 2.6719, 2.8745, 0.2440, 0.3802, 0.4756, 0.6613, 0.8627, 1.0292, 1.2291, 1.4060, 1.5198, 1.7354, 1.9044, 2.1010, 2.3147, 2.4996, 2.7171, 2.9041, 0.1590, 0.2876, 0.4572, 0.5996, 0.7713, 0.9490, 1.1205, 1.2815, 1.4516, 1.6385, 1.8179, 2.0457, 2.2759, 2.4785, 2.6861, 2.9080, 0.2297, 0.4309, 0.5712, 0.6717, 0.8138, 1.0463, 1.2492, 1.4560, 1.6796, 1.8458, 1.9642, 2.1452, 2.3636, 2.5395, 2.7456, 2.9495, 0.2975, 0.4678, 0.4996, 0.5809, 0.6279, 0.6884, 0.8606, 1.1386, 1.4412, 1.6876, 1.8760, 2.0932, 2.3178, 2.5166, 2.7345, 2.9280, 0.1278, 0.3737, 0.6004, 0.7069, 0.8147, 1.0180, 1.2581, 1.3812, 1.4855, 1.7268, 1.9970, 2.1258, 2.2936, 2.5702, 2.7563, 2.8983, 0.1314, 0.2508, 0.3999, 0.5680, 0.7424, 0.9367, 1.1286, 1.3175, 1.5336, 1.7404, 1.9317, 2.1404, 2.3514, 2.5562, 2.7510, 2.9402, 0.1043, 0.2367, 0.4293, 0.6376, 0.8160, 0.9836, 1.1779, 1.3850, 1.5835, 1.7875, 1.9765, 2.1593, 2.3654, 2.5577, 2.7465, 2.9398, 0.1529, 0.2515, 0.3454, 0.4374, 0.7011, 0.9015, 1.0744, 1.3532, 1.5699, 1.7545, 2.0021, 2.1259, 2.2278, 2.4546, 2.7264, 2.9425, 0.1429, 0.2808, 0.4395, 0.6334, 0.8069, 0.9705, 1.1520, 1.3250, 1.5109, 1.7285, 1.9356, 2.1469, 2.3479, 2.5554, 2.7512, 2.9348, 0.1625, 0.3022, 0.4756, 0.6315, 0.8032, 0.9924, 1.1596, 1.3204, 1.4994, 1.6929, 1.8955, 2.1090, 2.3025, 2.5018, 2.6908, 2.8980, 0.1692, 0.3427, 0.5228, 0.7756, 0.9688, 1.0950, 1.3056, 1.4360, 1.5675, 1.8049, 1.9376, 2.1151, 2.3407, 2.5012, 2.7192, 2.9258, 0.0474, 0.1251, 0.1939, 0.3841, 0.6501, 0.9231, 1.1153, 1.3240, 1.5478, 1.7599, 1.9651, 2.1510, 2.3645, 2.5552, 2.7542, 2.9393, 0.2196, 0.4656, 0.7492, 0.9922, 1.1678, 1.2489, 1.3112, 1.3657, 1.4223, 1.5302, 1.7212, 1.9996, 2.2523, 2.4844, 2.7036, 2.9145, 0.1128, 0.2368, 0.3704, 0.5476, 0.7723, 0.9968, 1.1930, 1.3992, 1.6013, 1.7957, 1.9888, 2.1857, 2.3825, 2.5705, 2.7616, 2.9434, 0.1341, 0.2768, 0.4510, 0.6359, 0.8332, 1.0335, 1.2004, 1.3952, 1.5762, 1.7681, 1.9815, 2.1735, 2.3657, 2.5552, 2.7514, 2.9498, 0.1247, 0.2559, 0.3516, 0.4726, 0.6861, 0.9483, 1.1852, 1.3858, 1.5851, 1.7815, 1.9778, 2.1737, 2.3729, 2.5664, 2.7620, 2.9429, 0.1988, 0.3320, 0.4777, 0.6737, 0.8425, 1.0265, 1.1694, 1.3655, 1.5463, 1.7135, 1.9385, 2.1650, 2.3529, 2.5367, 2.7545, 2.9585, 0.1376, 0.2620, 0.4273, 0.6169, 0.7755, 0.9441, 1.1169, 1.3157, 1.5179, 1.7020, 1.8931, 2.1059, 2.3112, 2.5136, 2.7169, 2.9198, 0.2112, 0.4385, 0.6091, 0.7618, 0.9553, 1.1543, 1.3445, 1.5396, 1.7153, 1.9192, 2.1263, 2.3593, 2.5958, 2.8171, 2.9394, 3.0409, 0.1347, 0.2099, 0.2646, 0.3453, 0.5266, 0.7869, 1.0513, 1.2795, 1.4880, 1.7181, 1.9294, 2.1332, 2.3362, 2.5442, 2.7433, 2.9362, 0.3141, 0.5935, 0.7517, 0.8313, 0.8568, 0.9570, 1.0250, 1.1275, 1.3422, 1.6303, 1.8577, 2.0705, 2.2957, 2.5095, 2.7244, 2.9262, 0.0962, 0.2116, 0.3961, 0.5641, 0.7122, 0.8883, 1.1023, 1.3481, 1.5623, 1.7554, 1.9618, 2.1675, 2.3706, 2.5556, 2.7430, 2.9337, 0.0898, 0.1510, 0.3060, 0.5820, 0.8221, 1.0388, 1.2261, 1.4289, 1.6054, 1.8103, 1.9941, 2.1844, 2.3742, 2.5711, 2.7632, 2.9474, 0.1326, 0.2316, 0.3761, 0.5177, 0.6782, 0.8761, 1.0952, 1.3175, 1.5078, 1.7034, 1.9051, 2.1245, 2.3424, 2.5484, 2.7444, 2.9389, 0.1740, 0.3293, 0.5174, 0.6824, 0.8394, 1.0372, 1.2046, 1.3723, 1.5656, 1.7444, 1.9442, 2.1386, 2.3139, 2.4960, 2.7071, 2.9297, 0.2304, 0.3775, 0.4865, 0.6182, 0.7842, 0.9208, 1.1151, 1.2843, 1.4641, 1.6988, 1.9209, 2.1260, 2.3099, 2.5229, 2.7414, 2.9276, 0.0094, 0.0261, -0.0037, 0.0041, -0.0092, -0.0044, -0.0232, -0.0073, -0.0047, -0.0021, 0.0250, -0.0580, -0.0140, -0.0342, -0.0586, 0.0020, 0.0449, 0.0155, -0.0523, -0.0279, 0.0299, -0.0183, -0.0736, -0.0639, -0.0017, 0.0336, 0.0209, 0.0046, 0.0077, -0.0148, -0.0114, -0.0120, 0.0115, -0.0050, 0.0445, 0.0048, 0.0188, -0.0137, -0.0080, 0.0239, -0.0184, -0.0524, -0.0195, -0.0126, 0.0284, 0.0632, 0.0141, -0.0093, -0.0096, 0.0196, 0.0230, 0.0379, 0.0308, 0.0237, -0.0224, -0.0600, -0.0755, -0.1074, -0.0988, -0.0606, -0.1038, -0.1552, -0.1480, -0.0672, 0.0504, 0.0676, 0.0336, -0.0042, 0.0729, 0.1013, 0.0868, 0.0846, 0.0954, 0.0515, -0.0066, -0.0851, -0.0485, 0.0294, 0.0395, 0.0087, 0.0078, 0.0446, 0.0881, 0.0672, -0.0384, -0.0025, 0.0415, 0.0353, 0.0080, 0.0052, 0.0190, 0.0182, 0.0069, 0.0168, 0.0374, 0.0037, -0.0292, -0.0429, 0.0302, 0.0681, -0.0233, -0.0238, -0.0003, -0.0043, 0.0054, -0.0029, -0.0149, 0.0642, 0.0622, 0.0341, -0.0232, -0.0461, -0.0082, -0.0469, -0.0618, -0.0326, -0.0452, -0.0649, -0.0597, -0.0398, -0.0318, -0.0116, 0.0011, 0.0009, -0.0384, -0.0384, -0.0156, -0.0260, -0.0007, 0.0473, 0.0111, -0.0358, -0.0484, -0.0204, -0.0029, -0.0090, -0.0285, -0.0495, -0.0376, 0.0917, 0.1192, 0.1026, 0.0745, 0.0397, 0.0463, 0.0253, 0.0025, 0.0465, 0.0100, 0.0488, 0.0416, 0.0223, 0.0263, 0.0072, -0.0053, 0.0595, 0.0060, -0.0518, -0.0316, -0.0043, -0.0133, -0.0233, -0.0075, -0.0251, 0.0277, -0.0067, -0.0136, -0.0004, 0.0235, 0.0112, -0.0182, -0.0324, -0.0210, -0.0035, -0.0395, -0.0384, 0.0005, -0.0150, -0.0356, 0.0127, -0.0033, -0.0034, 0.0205, 0.0747, 0.1138, 0.1015, 0.0995, -0.0161, -0.0045, 0.0129, 0.0472, 0.0575, 0.0222, 0.0091, 0.0037, -0.0471, 0.0371, 0.0132, 0.0208, 0.0247, 0.0117, 0.0164, 0.0225, 0.0124, -0.0023, 0.0088, -0.0046, 0.0047, -0.0393, 0.0018, 0.0148, 0.0020, 0.0044, 0.0165, 0.0229, -0.0208, -0.0477, -0.0310, -0.0164, -0.0390, -0.0764, -0.0525, -0.0094, 0.0075, -0.0102, -0.0045, -0.0504, -0.0709, 0.0822, 0.0710, 0.0426, 0.0014, -0.0371, -0.0400, -0.0157, -0.0155, -0.0173, -0.0138, -0.0015, 0.0134, -0.0418, -0.0682, -0.0256, 0.0050, 0.0360, 0.0354, 0.0074, -0.0396, -0.0235, 0.0284, 0.0494, 0.0153, 0.0448, 0.0025, -0.0061, 0.0252, 0.1000, 0.2260, 0.2158, 0.2116, 0.2198, 0.2055, 0.2110, 0.1873, 0.1907, 0.2071, 0.2164, 0.2009, 0.2059, 0.2124, 0.2141, 0.2093, 0.0875, 0.0981, 0.1177, 0.1071, 0.1033, 0.1248, 0.1048, 0.1238, 0.1166, 0.1008, 0.1062, 0.0992, 0.0994, 0.1067, 0.0999, 0.1187, 0.0750, 0.0794, 0.0828, 0.0854, 0.0859, 0.0801, 0.0891, 0.0933, 0.0969, 0.0920, 0.0915, 0.0862, 0.0868, 0.0891, 0.0842, 0.0824, 0.0625, 0.0930, 0.0815, 0.0853, 0.0898, 0.0828, 0.0822, 0.0910, 0.0873, 0.0906, 0.0856, 0.0840, 0.0774, 0.0785, 0.0684, 0.0711, 0.3319, 0.4219, 0.4588, 0.4090, 0.4092, 0.4014, 0.3548, 0.3353, 0.3708, 0.3352, 0.3720, 0.3538, 0.4084, 0.4289, 0.4060, 0.4210, 0.0588, 0.0209, -0.0082, -0.0115, -0.0343, -0.0621, -0.0541, -0.0346, -0.0346, -0.0366, -0.0220, -0.0265, -0.0102, 0.0374, 0.0306, 0.0404, 0.0306, 0.0090, -0.0054, 0.0333, 0.0047, 0.0238, 0.0141, 0.0165, 0.0306, 0.0420, 0.0159, 0.0124, 0.0414, 0.0158, -0.0237, 0.0141, 0.0765, 0.0057, -0.0260, -0.0426, -0.0395, -0.0126, -0.0579, -0.0417 }, .lsp22_1 = { 0.0664, 0.1875, 0.4300, 0.6730, 0.8793, 1.0640, 1.2563, 1.4433, 1.6394, 1.8176, 2.0029, 2.1921, 2.3796, 2.5671, 2.7595, 2.9536, 0.2128, 0.4052, 0.5311, 0.6404, 0.7875, 0.8775, 1.0974, 1.3261, 1.5563, 1.6790, 1.8339, 2.1195, 2.3226, 2.4609, 2.6440, 2.8947, 0.2024, 0.3362, 0.4834, 0.6784, 0.9088, 1.0850, 1.2188, 1.4054, 1.6102, 1.7767, 1.9679, 2.1436, 2.3445, 2.5467, 2.7429, 2.9320, 0.1181, 0.2279, 0.4413, 0.6114, 0.7710, 0.9427, 1.1142, 1.2707, 1.4892, 1.7416, 1.9526, 2.1466, 2.3629, 2.5445, 2.7293, 2.9205, 0.1155, 0.2720, 0.4886, 0.6812, 0.8594, 1.0422, 1.2315, 1.4116, 1.6137, 1.8020, 1.9758, 2.1743, 2.3602, 2.5568, 2.7472, 2.9374, 0.1110, 0.3312, 0.4735, 0.5612, 0.7129, 0.8146, 1.0233, 1.3155, 1.5765, 1.7746, 1.9574, 2.1416, 2.3220, 2.5384, 2.7334, 2.9318, 0.1656, 0.3350, 0.4215, 0.5609, 0.6759, 0.8503, 1.1405, 1.4094, 1.6057, 1.6860, 1.7639, 2.0031, 2.2680, 2.5076, 2.7263, 2.9368, 0.1466, 0.3638, 0.4587, 0.5674, 0.7381, 0.8669, 0.9619, 1.1658, 1.4667, 1.7440, 1.9335, 2.1018, 2.3022, 2.5281, 2.7359, 2.9261, 0.1061, 0.2566, 0.4739, 0.6751, 0.8711, 1.0704, 1.2720, 1.4655, 1.6605, 1.8494, 2.0290, 2.2197, 2.4008, 2.5912, 2.7772, 2.9513, 0.1116, 0.2364, 0.3971, 0.6316, 0.8583, 1.0335, 1.1686, 1.3302, 1.5612, 1.7877, 1.9829, 2.2052, 2.3596, 2.5460, 2.7341, 2.9290, 0.2661, 0.4186, 0.5126, 0.6477, 0.8818, 1.1045, 1.2852, 1.4128, 1.5851, 1.7593, 1.9399, 2.1757, 2.3684, 2.5136, 2.6927, 2.9064, 0.1495, 0.2749, 0.4391, 0.6304, 0.8239, 1.0181, 1.1995, 1.3759, 1.5669, 1.7722, 1.9671, 2.1635, 2.3586, 2.5528, 2.7445, 2.9311, 0.0912, 0.1759, 0.3066, 0.5660, 0.8005, 0.9568, 1.1832, 1.4504, 1.6259, 1.7948, 2.0113, 2.2002, 2.3654, 2.5583, 2.7929, 2.9735, 0.1353, 0.2747, 0.4078, 0.5977, 0.7658, 0.9124, 1.1081, 1.3630, 1.5875, 1.7847, 1.9323, 2.1181, 2.3321, 2.5046, 2.7183, 2.9225, 0.1938, 0.4063, 0.4982, 0.6002, 0.7702, 0.9071, 1.1631, 1.3885, 1.6043, 1.8118, 1.9306, 2.0893, 2.2724, 2.4609, 2.6283, 2.8802, 0.1857, 0.3351, 0.4381, 0.6101, 0.7561, 0.8555, 1.0384, 1.3171, 1.5667, 1.6904, 1.7552, 1.9689, 2.2597, 2.5260, 2.7272, 2.9337, 0.1037, 0.2159, 0.4188, 0.6174, 0.8035, 1.0285, 1.2256, 1.4230, 1.6400, 1.8322, 2.0144, 2.1988, 2.3810, 2.5682, 2.7613, 2.9438, 0.1625, 0.2776, 0.4225, 0.6001, 0.7879, 0.9087, 1.0801, 1.2759, 1.4899, 1.7448, 1.9911, 2.1770, 2.3723, 2.5777, 2.7971, 2.9444, 0.2111, 0.3640, 0.5839, 0.7290, 0.8051, 1.0023, 1.2315, 1.4143, 1.5878, 1.7755, 1.9804, 2.1498, 2.3312, 2.5350, 2.7613, 2.9472, 0.1423, 0.2646, 0.4136, 0.6350, 0.8070, 0.9514, 1.1168, 1.3213, 1.5776, 1.7721, 1.9404, 2.1545, 2.3385, 2.5137, 2.7396, 2.9553, 0.1132, 0.2386, 0.4103, 0.5931, 0.7808, 0.9881, 1.1840, 1.3860, 1.6021, 1.7990, 1.9922, 2.1885, 2.3852, 2.5717, 2.7640, 2.9510, 0.1267, 0.2602, 0.3913, 0.5944, 0.7598, 0.9198, 1.0781, 1.2715, 1.5299, 1.7573, 1.9308, 2.1346, 2.3267, 2.5419, 2.7466, 2.9320, 0.2023, 0.3417, 0.4392, 0.6141, 0.7439, 0.8593, 1.1096, 1.3543, 1.5185, 1.6553, 1.7862, 2.0341, 2.2718, 2.4834, 2.7103, 2.9466, 0.1113, 0.2470, 0.3677, 0.5686, 0.7700, 0.9356, 1.0806, 1.2452, 1.4830, 1.7344, 1.9268, 2.1404, 2.3371, 2.5169, 2.7329, 2.9012, 0.1664, 0.3554, 0.5573, 0.7471, 0.9245, 1.0998, 1.2787, 1.4655, 1.6654, 1.8346, 2.0179, 2.2159, 2.4096, 2.5946, 2.7790, 2.9530, 0.1313, 0.2625, 0.4731, 0.6444, 0.8110, 0.9878, 1.1493, 1.3212, 1.5719, 1.8138, 1.9861, 2.1943, 2.3714, 2.5578, 2.7346, 2.9296, 0.1186, 0.3035, 0.5049, 0.6860, 0.8670, 0.9975, 1.1364, 1.3471, 1.5695, 1.7412, 1.9346, 2.1506, 2.3413, 2.5531, 2.7794, 2.9627, 0.1108, 0.2697, 0.4787, 0.6344, 0.7909, 0.9586, 1.1440, 1.3511, 1.5686, 1.7601, 1.9246, 2.1241, 2.3293, 2.5390, 2.7315, 2.9333, 0.0985, 0.2302, 0.3544, 0.5759, 0.7620, 0.9651, 1.1497, 1.3080, 1.5500, 1.7845, 1.9518, 2.1734, 2.3565, 2.5665, 2.7605, 2.9102, 0.1208, 0.2727, 0.4381, 0.5736, 0.7382, 0.8390, 1.0102, 1.2648, 1.5100, 1.7440, 1.9619, 2.1430, 2.3307, 2.5159, 2.7264, 2.9211, 0.1582, 0.2777, 0.4475, 0.6551, 0.8591, 1.0084, 1.1414, 1.3291, 1.5902, 1.7826, 1.9543, 2.1659, 2.3233, 2.5044, 2.6935, 2.9199, 0.1360, 0.2873, 0.4585, 0.6295, 0.7592, 0.9089, 1.0492, 1.2733, 1.5391, 1.7768, 1.9372, 2.1329, 2.3168, 2.5015, 2.6857, 2.8837, 0.0886, 0.1829, 0.3696, 0.6126, 0.8334, 1.0135, 1.2303, 1.4674, 1.6743, 1.8564, 2.0530, 2.2370, 2.3960, 2.5787, 2.7756, 2.9377, 0.2005, 0.3537, 0.4700, 0.6249, 0.7385, 0.9097, 1.1759, 1.3811, 1.5314, 1.6705, 1.8546, 2.1229, 2.3292, 2.5251, 2.7951, 2.9646, 0.1999, 0.3112, 0.4722, 0.7146, 0.8908, 1.0028, 1.1831, 1.3903, 1.6125, 1.7514, 1.9083, 2.1248, 2.3271, 2.5339, 2.6945, 2.8918, 0.1243, 0.2606, 0.4382, 0.5850, 0.7705, 0.9727, 1.1214, 1.3059, 1.5218, 1.7406, 1.9137, 2.1353, 2.3354, 2.5299, 2.7287, 2.9068, 0.1039, 0.2426, 0.4265, 0.6284, 0.8152, 0.9941, 1.2004, 1.4038, 1.5912, 1.7763, 1.9650, 2.1598, 2.3474, 2.5488, 2.7419, 2.9322, 0.1364, 0.2420, 0.3886, 0.5864, 0.7663, 0.8844, 1.0860, 1.3242, 1.5518, 1.7893, 2.0004, 2.1562, 2.3619, 2.5516, 2.7687, 2.9181, 0.1483, 0.2851, 0.4479, 0.6312, 0.7924, 0.9821, 1.1705, 1.3386, 1.5375, 1.7226, 1.9053, 2.0991, 2.2898, 2.4953, 2.7000, 2.9146, 0.2332, 0.4561, 0.5407, 0.6212, 0.7524, 0.8215, 0.9522, 1.1685, 1.5216, 1.7132, 1.8291, 2.0647, 2.2811, 2.4857, 2.7071, 2.9281, 0.1348, 0.3126, 0.5179, 0.7192, 0.9227, 1.1363, 1.3223, 1.4756, 1.6509, 1.8191, 1.9991, 2.1976, 2.3877, 2.5768, 2.7590, 2.9386, 0.1093, 0.2211, 0.4763, 0.6703, 0.8282, 0.9536, 1.1202, 1.3796, 1.6043, 1.8031, 1.9832, 2.1604, 2.3578, 2.5856, 2.7650, 2.9291, 0.1865, 0.3027, 0.4580, 0.6719, 0.8400, 1.0082, 1.1901, 1.3782, 1.5448, 1.6885, 1.9477, 2.1381, 2.2797, 2.5113, 2.7465, 2.9414, 0.1575, 0.3124, 0.4649, 0.6262, 0.8095, 0.9858, 1.1676, 1.3602, 1.5646, 1.7582, 1.9550, 2.1671, 2.3628, 2.5734, 2.7670, 2.9519, 0.1174, 0.2777, 0.4663, 0.6333, 0.8169, 1.0096, 1.1885, 1.3847, 1.5803, 1.7571, 1.9380, 2.1398, 2.3414, 2.5407, 2.7360, 2.9375, 0.1073, 0.2264, 0.4083, 0.5973, 0.7474, 0.9514, 1.1349, 1.3337, 1.5433, 1.7348, 1.9380, 2.1436, 2.3441, 2.5438, 2.7457, 2.9383, 0.1472, 0.2880, 0.4793, 0.6268, 0.8015, 1.0063, 1.1715, 1.3644, 1.5525, 1.7410, 1.9258, 2.1227, 2.3214, 2.5149, 2.7148, 2.9196, 0.1414, 0.2565, 0.4349, 0.6111, 0.7695, 0.9496, 1.1212, 1.3265, 1.5218, 1.7209, 1.9015, 2.0887, 2.3158, 2.5077, 2.7233, 2.9421, 0.1252, 0.2667, 0.4454, 0.6431, 0.8371, 1.0124, 1.2110, 1.4160, 1.6240, 1.8242, 2.0047, 2.1974, 2.3902, 2.5778, 2.7637, 2.9481, 0.1321, 0.2565, 0.3846, 0.5847, 0.7578, 0.9259, 1.0637, 1.2239, 1.4690, 1.7346, 1.9750, 2.1882, 2.3712, 2.5509, 2.7280, 2.8885, 0.1437, 0.2930, 0.4428, 0.6156, 0.8045, 0.9638, 1.1450, 1.3138, 1.5144, 1.7355, 1.9469, 2.1534, 2.3414, 2.5452, 2.7353, 2.9334, 0.1692, 0.2770, 0.3831, 0.6100, 0.7825, 0.9302, 1.0690, 1.2481, 1.4615, 1.6799, 1.9165, 2.1739, 2.3435, 2.5349, 2.7520, 2.9163, 0.1235, 0.2489, 0.4354, 0.6343, 0.8236, 1.0066, 1.1908, 1.3474, 1.5656, 1.8275, 2.0620, 2.2548, 2.4135, 2.5913, 2.7639, 2.9334, 0.1090, 0.1961, 0.3854, 0.5701, 0.7024, 0.8843, 1.1393, 1.3785, 1.5940, 1.7797, 1.9442, 2.1740, 2.3853, 2.5773, 2.7727, 2.9406, 0.1560, 0.3477, 0.5011, 0.6287, 0.7612, 0.9896, 1.1510, 1.3420, 1.5435, 1.6816, 1.8731, 2.0651, 2.2613, 2.4999, 2.7027, 2.8971, 0.1459, 0.2416, 0.3833, 0.5450, 0.7916, 0.9223, 1.0662, 1.1953, 1.4029, 1.6616, 1.9320, 2.1459, 2.3386, 2.5081, 2.6799, 2.9195, 0.1546, 0.3854, 0.6184, 0.8460, 1.0599, 1.2428, 1.3906, 1.5550, 1.7388, 1.8945, 2.0757, 2.2386, 2.4014, 2.5705, 2.7574, 2.9400, 0.1080, 0.2307, 0.4112, 0.6067, 0.7725, 0.9467, 1.1285, 1.3205, 1.5348, 1.7609, 1.9937, 2.1878, 2.3583, 2.5515, 2.7199, 2.9049, 0.1482, 0.3178, 0.4983, 0.6342, 0.7783, 0.9880, 1.2019, 1.3404, 1.5223, 1.7296, 1.9211, 2.0943, 2.2928, 2.5008, 2.7136, 2.9224, 0.1145, 0.2910, 0.4891, 0.6492, 0.8126, 0.9530, 1.1180, 1.3155, 1.5054, 1.6893, 1.8899, 2.1188, 2.3389, 2.5512, 2.7313, 2.9224, 0.0939, 0.1689, 0.3250, 0.5792, 0.7698, 0.9245, 1.1574, 1.3865, 1.5959, 1.7977, 1.9821, 2.1528, 2.3326, 2.5540, 2.7553, 2.9179, 0.1243, 0.2474, 0.3923, 0.6199, 0.7908, 0.9379, 1.1497, 1.3734, 1.5582, 1.7420, 1.9539, 2.1385, 2.3240, 2.5277, 2.7311, 2.9178, 0.1961, 0.3748, 0.5176, 0.6387, 0.8169, 1.0477, 1.2124, 1.3869, 1.5604, 1.7225, 1.8770, 2.0837, 2.2960, 2.5103, 2.6945, 2.8862, 0.1295, 0.2403, 0.4149, 0.6189, 0.7913, 0.9130, 1.0832, 1.2787, 1.4860, 1.7112, 1.9502, 2.1348, 2.2776, 2.4982, 2.7431, 2.9522, 0.0160, 0.0362, 0.0097, 0.0057, -0.0014, -0.0073, -0.0046, -0.0064, -0.0121, 0.0019, 0.0149, -0.0440, -0.0479, -0.0382, -0.0480, -0.0182, 0.0170, 0.0114, -0.0298, -0.0175, -0.0033, -0.0354, -0.0510, -0.0025, 0.0307, 0.0351, 0.0338, 0.0420, 0.0138, -0.0175, -0.0102, 0.0053, 0.0084, -0.0003, 0.0412, -0.0027, 0.0145, -0.0039, 0.0083, 0.0400, 0.0001, -0.0262, 0.0055, -0.0082, 0.0348, 0.0433, 0.0137, -0.0024, -0.0055, 0.0262, 0.0521, 0.0349, 0.0185, 0.0076, -0.0319, -0.0561, -0.0460, -0.0253, -0.0097, 0.0163, 0.0184, -0.0037, -0.0480, -0.0371, 0.0628, 0.0665, 0.0296, -0.0057, 0.0253, 0.0227, 0.0350, 0.0692, 0.0545, 0.0218, 0.0094, -0.0449, -0.0372, 0.0005, 0.0258, 0.0118, 0.0285, 0.0760, 0.0822, 0.0527, -0.0299, -0.0049, 0.0170, 0.0195, 0.0136, 0.0286, 0.0289, 0.0139, 0.0054, 0.0152, 0.0244, 0.0028, -0.0056, -0.0260, 0.0307, 0.0572, -0.0087, 0.0088, 0.0062, 0.0000, 0.0125, 0.0000, -0.0292, 0.0820, 0.0872, 0.0646, 0.0346, 0.0076, -0.0022, -0.0253, -0.0567, -0.0188, -0.0336, -0.0673, -0.0549, -0.0166, -0.0259, -0.0140, 0.0040, -0.0029, -0.0430, -0.0531, -0.0253, -0.0019, -0.0071, 0.0393, 0.0072, -0.0327, -0.0236, -0.0235, -0.0177, -0.0186, -0.0280, -0.0201, -0.0077, 0.0383, 0.0418, 0.0321, 0.0294, 0.0169, 0.0468, 0.0301, 0.0133, 0.0363, 0.0516, 0.0937, 0.1240, 0.1404, 0.1325, 0.1178, 0.0999, 0.0251, -0.0037, -0.0495, -0.0703, -0.0219, -0.0261, -0.0304, -0.0204, -0.0372, 0.0355, 0.0131, -0.0093, -0.0099, -0.0069, -0.0034, -0.0065, -0.0208, -0.0231, -0.0117, -0.0211, -0.0243, 0.0046, -0.0107, -0.0070, 0.0123, 0.0230, 0.0152, 0.0164, 0.0412, 0.0619, 0.0858, 0.0862, -0.0056, 0.0125, 0.0182, 0.0347, 0.0388, 0.0456, 0.0407, -0.0249, -0.0460, 0.0206, 0.0299, 0.0253, 0.0207, 0.0177, 0.0238, 0.0253, 0.0030, 0.0042, 0.0020, -0.0081, -0.0136, -0.0290, -0.0042, 0.0122, 0.0051, 0.0107, 0.0228, 0.0211, -0.0068, -0.0436, -0.0299, -0.0078, -0.0779, -0.1157, -0.0679, 0.0172, 0.0150, -0.0051, 0.0081, -0.0512, -0.0616, 0.0576, 0.0799, 0.0803, 0.0336, 0.0001, -0.0298, -0.0747, -0.0115, -0.0101, -0.0170, -0.0050, 0.0174, -0.0290, -0.0601, -0.0150, 0.0121, 0.0165, 0.0230, 0.0028, -0.0317, -0.0165, 0.0356, 0.0451, 0.0120, 0.0321, 0.0084, -0.0058, 0.0122, 0.1935, 0.1802, 0.2195, 0.2410, 0.2201, 0.1915, 0.1840, 0.1935, 0.2213, 0.2079, 0.1858, 0.1974, 0.2239, 0.2173, 0.1840, 0.2120, 0.4912, 0.4777, 0.4607, 0.4395, 0.4426, 0.4388, 0.4416, 0.4345, 0.4239, 0.4331, 0.4522, 0.4423, 0.4475, 0.4387, 0.4525, 0.4446 }, .lsp22_2 = { 0.0712, 0.1830, 0.4167, 0.6669, 0.8738, 1.0696, 1.2555, 1.4426, 1.6427, 1.8138, 1.9966, 2.1925, 2.3872, 2.5748, 2.7713, 2.9597, 0.1894, 0.3942, 0.5418, 0.6747, 0.7517, 0.8763, 1.1189, 1.3072, 1.5011, 1.6790, 1.8342, 2.0781, 2.2929, 2.4566, 2.6613, 2.9204, 0.1767, 0.3403, 0.5173, 0.7055, 0.8899, 1.0696, 1.2302, 1.4111, 1.5989, 1.7751, 1.9618, 2.1544, 2.3454, 2.5356, 2.7362, 2.9315, 0.1240, 0.2361, 0.4423, 0.6326, 0.7729, 0.9387, 1.1142, 1.2847, 1.4746, 1.7126, 1.9482, 2.1642, 2.3536, 2.5506, 2.7593, 2.9197, 0.1213, 0.2782, 0.5011, 0.6910, 0.8564, 1.0462, 1.2315, 1.4232, 1.6178, 1.8028, 1.9813, 2.1766, 2.3670, 2.5591, 2.7475, 2.9403, 0.1382, 0.2995, 0.4693, 0.5874, 0.6929, 0.8102, 1.0094, 1.2960, 1.5511, 1.7607, 1.9699, 2.1680, 2.3367, 2.5459, 2.7370, 2.9105, 0.1428, 0.2690, 0.3713, 0.4757, 0.6664, 0.9019, 1.1276, 1.3674, 1.5471, 1.6695, 1.8261, 2.0572, 2.2753, 2.4963, 2.7187, 2.9114, 0.1669, 0.3085, 0.4489, 0.5724, 0.6934, 0.8465, 0.9680, 1.1641, 1.4320, 1.6841, 1.8977, 2.1061, 2.3118, 2.5152, 2.7329, 2.9274, 0.1128, 0.2709, 0.4803, 0.6878, 0.8673, 1.0693, 1.2749, 1.4657, 1.6650, 1.8434, 2.0339, 2.2300, 2.4003, 2.5951, 2.7762, 2.9465, 0.1201, 0.2345, 0.4021, 0.6379, 0.8651, 1.0256, 1.1630, 1.3250, 1.5395, 1.7808, 2.0011, 2.1997, 2.3618, 2.5505, 2.7561, 2.9351, 0.2575, 0.4163, 0.5081, 0.6484, 0.8570, 1.0832, 1.2732, 1.3933, 1.5497, 1.7725, 1.9945, 2.2098, 2.3514, 2.5216, 2.7146, 2.8969, 0.1367, 0.2656, 0.4470, 0.6398, 0.8146, 1.0125, 1.2142, 1.3960, 1.5558, 1.7338, 1.9465, 2.1769, 2.4031, 2.5746, 2.7335, 2.9046, 0.0868, 0.1723, 0.2785, 0.5071, 0.7732, 1.0024, 1.1924, 1.4220, 1.6149, 1.8064, 1.9951, 2.1935, 2.3777, 2.5748, 2.7661, 2.9488, 0.1428, 0.2592, 0.3875, 0.5810, 0.7513, 0.9334, 1.1096, 1.3565, 1.5869, 1.7788, 1.9036, 2.0893, 2.3332, 2.5289, 2.7204, 2.9053, 0.2313, 0.4066, 0.4960, 0.5853, 0.7799, 0.9201, 1.1365, 1.3499, 1.5119, 1.7641, 1.9095, 2.0911, 2.2653, 2.4587, 2.7010, 2.8900, 0.1927, 0.3424, 0.4682, 0.6035, 0.7330, 0.8492, 1.0477, 1.3083, 1.5602, 1.6945, 1.7806, 2.0066, 2.2566, 2.4864, 2.7021, 2.9180, 0.0962, 0.1933, 0.3968, 0.6077, 0.8083, 1.0224, 1.2307, 1.4344, 1.6350, 1.8173, 2.0024, 2.1894, 2.3812, 2.5648, 2.7535, 2.9483, 0.1469, 0.2679, 0.4272, 0.6080, 0.7949, 0.9247, 1.0741, 1.2722, 1.5144, 1.7679, 2.0030, 2.1944, 2.3890, 2.5928, 2.8116, 2.9555, 0.1618, 0.3917, 0.6111, 0.7511, 0.8325, 1.0010, 1.2397, 1.4147, 1.5764, 1.7359, 1.9300, 2.1325, 2.3096, 2.5480, 2.7725, 2.9697, 0.1561, 0.2634, 0.4062, 0.6139, 0.8059, 0.9618, 1.0948, 1.3179, 1.5846, 1.7622, 1.9399, 2.1476, 2.3330, 2.5232, 2.7412, 2.9554, 0.1076, 0.2320, 0.3977, 0.5798, 0.7707, 0.9975, 1.1884, 1.3793, 1.6059, 1.8038, 1.9928, 2.1942, 2.3881, 2.5742, 2.7717, 2.9547, 0.1360, 0.2493, 0.3827, 0.5644, 0.7384, 0.9087, 1.0865, 1.2902, 1.5185, 1.7246, 1.9170, 2.1175, 2.3324, 2.5442, 2.7441, 2.9437, 0.1684, 0.2990, 0.4406, 0.5834, 0.7305, 0.9028, 1.0801, 1.2756, 1.4646, 1.6514, 1.8346, 2.0493, 2.2594, 2.4765, 2.6985, 2.9089, 0.1145, 0.2295, 0.3421, 0.5032, 0.7007, 0.9057, 1.0830, 1.2733, 1.4885, 1.6897, 1.8933, 2.1128, 2.3188, 2.5271, 2.7284, 2.9266, 0.1705, 0.3815, 0.6120, 0.7964, 0.9342, 1.0926, 1.2741, 1.4645, 1.6552, 1.8040, 1.9778, 2.1931, 2.3836, 2.5827, 2.7905, 2.9494, 0.1284, 0.2622, 0.4714, 0.6559, 0.8004, 1.0005, 1.1416, 1.3163, 1.5773, 1.8144, 1.9947, 2.2001, 2.3836, 2.5710, 2.7447, 2.9262, 0.1164, 0.2882, 0.5349, 0.7310, 0.8483, 0.9729, 1.1331, 1.3350, 1.5307, 1.7306, 1.9409, 2.1275, 2.3229, 2.5358, 2.7455, 2.9447, 0.1159, 0.2646, 0.4677, 0.6375, 0.7771, 0.9557, 1.1398, 1.3514, 1.5717, 1.7512, 1.9337, 2.1323, 2.3272, 2.5409, 2.7377, 2.9212, 0.1080, 0.2143, 0.3475, 0.5307, 0.7358, 0.9681, 1.1489, 1.3289, 1.5553, 1.7664, 1.9696, 2.1780, 2.3676, 2.5568, 2.7493, 2.9347, 0.1331, 0.2430, 0.3879, 0.5092, 0.6324, 0.8119, 1.0327, 1.2657, 1.4999, 1.7107, 1.9178, 2.1272, 2.3296, 2.5340, 2.7372, 2.9353, 0.1557, 0.2873, 0.4558, 0.6548, 0.8472, 1.0106, 1.1480, 1.3281, 1.5856, 1.7740, 1.9564, 2.1651, 2.3295, 2.5207, 2.7005, 2.9151, 0.1397, 0.2761, 0.4533, 0.6374, 0.7510, 0.8767, 1.0408, 1.2909, 1.5368, 1.7560, 1.9424, 2.1332, 2.3210, 2.5116, 2.6924, 2.8886, 0.0945, 0.1653, 0.3601, 0.6129, 0.8378, 1.0333, 1.2417, 1.4539, 1.6507, 1.8304, 2.0286, 2.2157, 2.3975, 2.5865, 2.7721, 2.9426, 0.1892, 0.3863, 0.4896, 0.5909, 0.7294, 0.9483, 1.1575, 1.3542, 1.4796, 1.6535, 1.9070, 2.1435, 2.3281, 2.4967, 2.7039, 2.9222, 0.1614, 0.3129, 0.5086, 0.7048, 0.8730, 1.0239, 1.1905, 1.3799, 1.5697, 1.7503, 1.9103, 2.1115, 2.3235, 2.5234, 2.6973, 2.8957, 0.1199, 0.2590, 0.4273, 0.5935, 0.7542, 0.9625, 1.1225, 1.2998, 1.5361, 1.7102, 1.9097, 2.1269, 2.3157, 2.5304, 2.7212, 2.9175, 0.1087, 0.2373, 0.4261, 0.6277, 0.8092, 0.9884, 1.1954, 1.4077, 1.6048, 1.7799, 1.9693, 2.1662, 2.3426, 2.5501, 2.7459, 2.9257, 0.1262, 0.2216, 0.3857, 0.5799, 0.7148, 0.8610, 1.0752, 1.3306, 1.5549, 1.7605, 1.9727, 2.1580, 2.3612, 2.5602, 2.7554, 2.9372, 0.1445, 0.2832, 0.4469, 0.6283, 0.7991, 0.9796, 1.1504, 1.3323, 1.5313, 1.7140, 1.8968, 2.0990, 2.2826, 2.4903, 2.7003, 2.9031, 0.1647, 0.4068, 0.5428, 0.6539, 0.7682, 0.8479, 0.9372, 1.1691, 1.4776, 1.7314, 1.9071, 2.0918, 2.2774, 2.5029, 2.7152, 2.9221, 0.1274, 0.3052, 0.5238, 0.7280, 0.9229, 1.1211, 1.3071, 1.4784, 1.6564, 1.8235, 2.0028, 2.1999, 2.3763, 2.5608, 2.7510, 2.9356, 0.1076, 0.2195, 0.4815, 0.6873, 0.8241, 0.9443, 1.1066, 1.3687, 1.6087, 1.8105, 1.9857, 2.1486, 2.3505, 2.5854, 2.7785, 2.9376, 0.1755, 0.3089, 0.4695, 0.6648, 0.8315, 1.0202, 1.1774, 1.3554, 1.5393, 1.7141, 1.9247, 2.1284, 2.2983, 2.4975, 2.7296, 2.9401, 0.1636, 0.3166, 0.4594, 0.6199, 0.8161, 0.9879, 1.1738, 1.3642, 1.5680, 1.7633, 1.9598, 2.1695, 2.3692, 2.5846, 2.7809, 2.9563, 0.1219, 0.2662, 0.4620, 0.6491, 0.8353, 1.0150, 1.2065, 1.3944, 1.5785, 1.7631, 1.9389, 2.1434, 2.3400, 2.5316, 2.7359, 2.9513, 0.1072, 0.2258, 0.3968, 0.5642, 0.7222, 0.9367, 1.1458, 1.3347, 1.5424, 1.7373, 1.9303, 2.1432, 2.3451, 2.5415, 2.7444, 2.9394, 0.1393, 0.2950, 0.4724, 0.6407, 0.8034, 1.0031, 1.1712, 1.3552, 1.5519, 1.7411, 1.9198, 2.1160, 2.3238, 2.5119, 2.7134, 2.9205, 0.1358, 0.2613, 0.4239, 0.5991, 0.7643, 0.9379, 1.1213, 1.3115, 1.5067, 1.7031, 1.8768, 2.0836, 2.3092, 2.5134, 2.7237, 2.9286, 0.1267, 0.2695, 0.4524, 0.6591, 0.8396, 1.0173, 1.2183, 1.4205, 1.6306, 1.8162, 2.0106, 2.2082, 2.3773, 2.5787, 2.7551, 2.9387, 0.1314, 0.2529, 0.3837, 0.5494, 0.7446, 0.9097, 1.0489, 1.2385, 1.4691, 1.7170, 1.9600, 2.1770, 2.3594, 2.5356, 2.7215, 2.9088, 0.1538, 0.2931, 0.4449, 0.6041, 0.7959, 0.9666, 1.1355, 1.3214, 1.5150, 1.7230, 1.9433, 2.1408, 2.3459, 2.5476, 2.7273, 2.9330, 0.1771, 0.2834, 0.4136, 0.5856, 0.7516, 0.9363, 1.0596, 1.2462, 1.4737, 1.6627, 1.8810, 2.1150, 2.3202, 2.5274, 2.7403, 2.9490, 0.1248, 0.2494, 0.4397, 0.6352, 0.8226, 1.0015, 1.1799, 1.3458, 1.5654, 1.8228, 2.0646, 2.2550, 2.4161, 2.5964, 2.7675, 2.9383, 0.0933, 0.1993, 0.3105, 0.4371, 0.6417, 0.8935, 1.1244, 1.3508, 1.5649, 1.7595, 1.9581, 2.1648, 2.3639, 2.5569, 2.7573, 2.9468, 0.1794, 0.3229, 0.4758, 0.6238, 0.7821, 0.9640, 1.1205, 1.3116, 1.5054, 1.6803, 1.8658, 2.0651, 2.2793, 2.4856, 2.6867, 2.9105, 0.1252, 0.2397, 0.3844, 0.5398, 0.7044, 0.8799, 1.0526, 1.2270, 1.4269, 1.6412, 1.8532, 2.0784, 2.2957, 2.5051, 2.7139, 2.9210, 0.1391, 0.3494, 0.5738, 0.8024, 1.0098, 1.2094, 1.3830, 1.5509, 1.7222, 1.8782, 2.0604, 2.2479, 2.4154, 2.5968, 2.7767, 2.9450, 0.1122, 0.2180, 0.4175, 0.6074, 0.7559, 0.9465, 1.1513, 1.3340, 1.5215, 1.7491, 1.9911, 2.1894, 2.3433, 2.5377, 2.7380, 2.9183, 0.1595, 0.3029, 0.4842, 0.6324, 0.7874, 0.9814, 1.1992, 1.3554, 1.5017, 1.7274, 1.9168, 2.0853, 2.2964, 2.5300, 2.7187, 2.9041, 0.1350, 0.2747, 0.4791, 0.6638, 0.8050, 0.9644, 1.1238, 1.2987, 1.4844, 1.6754, 1.8778, 2.0987, 2.3279, 2.5424, 2.7410, 2.9356, 0.0914, 0.1727, 0.3143, 0.5124, 0.7123, 0.9323, 1.1706, 1.3821, 1.5864, 1.7828, 1.9701, 2.1560, 2.3445, 2.5486, 2.7433, 2.9372, 0.1222, 0.2359, 0.3931, 0.5912, 0.7776, 0.9505, 1.1623, 1.3723, 1.5484, 1.7316, 1.9321, 2.1283, 2.3148, 2.5269, 2.7299, 2.9213, 0.2089, 0.3872, 0.5090, 0.6413, 0.7967, 1.0226, 1.1897, 1.3908, 1.5954, 1.7202, 1.8614, 2.1030, 2.2973, 2.5079, 2.7491, 2.8944, 0.1288, 0.2423, 0.4108, 0.6062, 0.7688, 0.9188, 1.0876, 1.2866, 1.4897, 1.6910, 1.9219, 2.1076, 2.2805, 2.5023, 2.7155, 2.9203, 0.0192, 0.0462, 0.0128, 0.0054, -0.0156, -0.0118, -0.0135, 0.0030, -0.0120, 0.0031, 0.0240, -0.0451, -0.0439, -0.0432, -0.0527, -0.0207, 0.0253, 0.0084, -0.0305, -0.0144, 0.0046, -0.0378, -0.0467, -0.0102, 0.0280, 0.0540, 0.0151, 0.0437, 0.0141, -0.0257, -0.0058, 0.0073, 0.0107, 0.0054, 0.0371, -0.0105, 0.0165, -0.0143, 0.0148, 0.0382, -0.0054, -0.0284, 0.0001, -0.0218, 0.0258, 0.0517, 0.0157, -0.0032, -0.0190, 0.0343, 0.0576, 0.0346, 0.0392, -0.0158, -0.0323, -0.0578, -0.0617, -0.0242, -0.0144, 0.0188, 0.0249, 0.0021, -0.0422, -0.0420, 0.0750, 0.0762, 0.0325, -0.0066, 0.0332, 0.0376, 0.0388, 0.0630, 0.0525, 0.0196, 0.0051, -0.0484, -0.0322, 0.0059, 0.0132, 0.0079, 0.0237, 0.0774, 0.0697, 0.0184, -0.0321, -0.0327, 0.0274, 0.0284, 0.0057, 0.0289, 0.0478, 0.0142, -0.0053, 0.0114, 0.0292, -0.0032, -0.0111, -0.0389, 0.0282, 0.0613, 0.0200, -0.0006, 0.0111, 0.0048, 0.0273, 0.0017, -0.0369, 0.0655, 0.0758, 0.0555, 0.0238, -0.0024, -0.0100, -0.0419, -0.0696, -0.0158, -0.0479, -0.0744, -0.0356, -0.0245, -0.0400, -0.0112, 0.0134, 0.0001, -0.0422, -0.0514, -0.0081, 0.0083, -0.0151, 0.0323, -0.0001, -0.0444, -0.0406, -0.0214, -0.0050, -0.0235, -0.0205, -0.0264, -0.0324, 0.0334, 0.0392, 0.0265, 0.0289, 0.0180, 0.0493, 0.0227, 0.0194, 0.0365, 0.0544, 0.0674, 0.0559, 0.0732, 0.0911, 0.0942, 0.0735, 0.0174, -0.0113, -0.0553, -0.0665, -0.0227, -0.0259, -0.0266, -0.0239, -0.0379, 0.0329, 0.0173, -0.0210, -0.0114, -0.0063, 0.0060, -0.0089, -0.0198, -0.0282, -0.0080, -0.0179, -0.0290, 0.0046, -0.0126, -0.0066, 0.0350, 0.0532, 0.0235, 0.0198, 0.0212, 0.0449, 0.0681, 0.0677, -0.0049, 0.0086, 0.0120, 0.0356, 0.0454, 0.0592, 0.0449, -0.0271, -0.0510, -0.0110, 0.0234, 0.0203, 0.0243, 0.0242, 0.0133, 0.0098, 0.0040, 0.0024, -0.0005, -0.0075, -0.0126, -0.0393, -0.0052, 0.0165, 0.0016, -0.0193, 0.0239, 0.0336, 0.0029, -0.0586, -0.0539, -0.0094, -0.0664, -0.0898, -0.0540, -0.0066, 0.0134, -0.0074, 0.0067, -0.0521, -0.0431, 0.0104, 0.0690, 0.0663, 0.0197, -0.0017, -0.0518, -0.0597, -0.0171, -0.0054, -0.0140, -0.0080, 0.0172, -0.0362, -0.0713, -0.0310, 0.0096, 0.0243, 0.0381, -0.0062, -0.0392, -0.0281, 0.0386, 0.0461, 0.0069, 0.0384, 0.0080, -0.0141, 0.0171, 0.3368, 0.3128, 0.3304, 0.3392, 0.3185, 0.3037, 0.2789, 0.2692, 0.2779, 0.2796, 0.2891, 0.2643, 0.2647, 0.2593, 0.2927, 0.3283, 0.4978, 0.4988, 0.4969, 0.4997, 0.4957, 0.4985, 0.4970, 0.4978, 0.4938, 0.4951, 0.4994, 0.4971, 0.4981, 0.4983, 0.4967, 0.4789 }, .lsp44 = { 0.0927, 0.2291, 0.4059, 0.5779, 0.7288, 0.8821, 1.0377, 1.1915, 1.3433, 1.4931, 1.6475, 1.7989, 1.9381, 2.0858, 2.2321, 2.3765, 2.5187, 2.6530, 2.7895, 2.9354, 0.0944, 0.1974, 0.3046, 0.4714, 0.6116, 0.7829, 0.9027, 1.0375, 1.1869, 1.3488, 1.5036, 1.6781, 1.8276, 1.9983, 2.1449, 2.3089, 2.4534, 2.6113, 2.7553, 2.9062, 0.1168, 0.2843, 0.4907, 0.6706, 0.8100, 0.9417, 1.0753, 1.2014, 1.3151, 1.4496, 1.5832, 1.7379, 1.8642, 2.0230, 2.1681, 2.3250, 2.4676, 2.6242, 2.7602, 2.9066, 0.1353, 0.2335, 0.3370, 0.4380, 0.5819, 0.7353, 0.8671, 1.0160, 1.1435, 1.2977, 1.4860, 1.6739, 1.8412, 2.0028, 2.1537, 2.3124, 2.4741, 2.6272, 2.7862, 2.9536, 0.1003, 0.2226, 0.3584, 0.4971, 0.6291, 0.7710, 0.9157, 1.0669, 1.2143, 1.3624, 1.5104, 1.6681, 1.8164, 1.9823, 2.1394, 2.3082, 2.4677, 2.6306, 2.7909, 2.9382, 0.1056, 0.2027, 0.2956, 0.4005, 0.5215, 0.6708, 0.8545, 1.0557, 1.2344, 1.4023, 1.5676, 1.7278, 1.8808, 2.0381, 2.1846, 2.3376, 2.4887, 2.6377, 2.7878, 2.9504, 0.1015, 0.2462, 0.4122, 0.5783, 0.7233, 0.8833, 1.0377, 1.1903, 1.3341, 1.4727, 1.6138, 1.7582, 1.8912, 2.0370, 2.1701, 2.3125, 2.4500, 2.6006, 2.7507, 2.9166, 0.1787, 0.2418, 0.3265, 0.5379, 0.6584, 0.7681, 0.9545, 1.1050, 1.2125, 1.3528, 1.4763, 1.6705, 1.8136, 1.9594, 2.0936, 2.2724, 2.4394, 2.5919, 2.7037, 2.8747, 0.0859, 0.1600, 0.2980, 0.4933, 0.6696, 0.8285, 0.9958, 1.1545, 1.3107, 1.4591, 1.6127, 1.7652, 1.9143, 2.0680, 2.2171, 2.3643, 2.5141, 2.6611, 2.8143, 2.9691, 0.0910, 0.2110, 0.3364, 0.4718, 0.5856, 0.7298, 0.8910, 1.0514, 1.1988, 1.3572, 1.5178, 1.6861, 1.8399, 2.0099, 2.1639, 2.3225, 2.4774, 2.6321, 2.7863, 2.9412, 0.1904, 0.2874, 0.3681, 0.4981, 0.6248, 0.7880, 0.9121, 1.0750, 1.2185, 1.3809, 1.5296, 1.7007, 1.8592, 2.0470, 2.1913, 2.3250, 2.4519, 2.5984, 2.7408, 2.9023, 0.0917, 0.2067, 0.3246, 0.4961, 0.6310, 0.8024, 0.9438, 1.1008, 1.2362, 1.3892, 1.5407, 1.7033, 1.8427, 2.0061, 2.1498, 2.3117, 2.4550, 2.6053, 2.7462, 2.9029, 0.0989, 0.2193, 0.3756, 0.5410, 0.6929, 0.8368, 0.9801, 1.1250, 1.2677, 1.4184, 1.5677, 1.7292, 1.8770, 2.0311, 2.1803, 2.3306, 2.4836, 2.6339, 2.7943, 2.9549, 0.0861, 0.1943, 0.3057, 0.4867, 0.6194, 0.7592, 0.9184, 1.1052, 1.2486, 1.4064, 1.5609, 1.7273, 1.8703, 2.0291, 2.1686, 2.3225, 2.4628, 2.6115, 2.7471, 2.9005, 0.0932, 0.2110, 0.3737, 0.5479, 0.7120, 0.8570, 0.9975, 1.1364, 1.2772, 1.4220, 1.5612, 1.7089, 1.8410, 1.9827, 2.1263, 2.2859, 2.4459, 2.6172, 2.7788, 2.9395, 0.1193, 0.2341, 0.3523, 0.5029, 0.6437, 0.7803, 0.9367, 1.1007, 1.2392, 1.3869, 1.5425, 1.7168, 1.8709, 2.0248, 2.1584, 2.2949, 2.4308, 2.5823, 2.7235, 2.9034, 0.0834, 0.1988, 0.3557, 0.5261, 0.6767, 0.8427, 1.0029, 1.1683, 1.3138, 1.4527, 1.6046, 1.7583, 1.9011, 2.0517, 2.1928, 2.3397, 2.4839, 2.6291, 2.7771, 2.9329, 0.0938, 0.1967, 0.3213, 0.4675, 0.6068, 0.7664, 0.9418, 1.1120, 1.2535, 1.3932, 1.5243, 1.6801, 1.8346, 1.9931, 2.1376, 2.3035, 2.4636, 2.6244, 2.7829, 2.9371, 0.1017, 0.2552, 0.4327, 0.6017, 0.7467, 0.8797, 1.0097, 1.1442, 1.2628, 1.4049, 1.5541, 1.7090, 1.8461, 1.9982, 2.1486, 2.3029, 2.4513, 2.6075, 2.7594, 2.9209, 0.1031, 0.2295, 0.3747, 0.5122, 0.6596, 0.7935, 0.9345, 1.1050, 1.2384, 1.3543, 1.4739, 1.6136, 1.7447, 1.8914, 2.0434, 2.1916, 2.3557, 2.5396, 2.7419, 2.9401, 0.1007, 0.2374, 0.3715, 0.5173, 0.6465, 0.8069, 0.9553, 1.1145, 1.2594, 1.4143, 1.5617, 1.7166, 1.8457, 2.0012, 2.1462, 2.2864, 2.4258, 2.5910, 2.7372, 2.9018, 0.0808, 0.1726, 0.2849, 0.4592, 0.6118, 0.7853, 0.9588, 1.1256, 1.2751, 1.4392, 1.5898, 1.7514, 1.8977, 2.0554, 2.1937, 2.3430, 2.4831, 2.6249, 2.7601, 2.9155, 0.1669, 0.2574, 0.3694, 0.5569, 0.6773, 0.8061, 1.0160, 1.1667, 1.2791, 1.4041, 1.5452, 1.7207, 1.8524, 2.0038, 2.1414, 2.3338, 2.4747, 2.6157, 2.7303, 2.8848, 0.1598, 0.2521, 0.3416, 0.5149, 0.6703, 0.7941, 0.9408, 1.1164, 1.2017, 1.3293, 1.4908, 1.6783, 1.8438, 1.9927, 2.1149, 2.2698, 2.4420, 2.6193, 2.7583, 2.9103, 0.0902, 0.1978, 0.3265, 0.4578, 0.5878, 0.7439, 0.9110, 1.0906, 1.2556, 1.4125, 1.5688, 1.7295, 1.8829, 2.0472, 2.2058, 2.3537, 2.5075, 2.6548, 2.8058, 2.9538, 0.0818, 0.1695, 0.2794, 0.4470, 0.6069, 0.7641, 0.9313, 1.0946, 1.2411, 1.4072, 1.5640, 1.7186, 1.8651, 2.0254, 2.1726, 2.3286, 2.4784, 2.6287, 2.7750, 2.9339, 0.1980, 0.3134, 0.4099, 0.4975, 0.6491, 0.8376, 0.9441, 1.0298, 1.1795, 1.3866, 1.5784, 1.7209, 1.8137, 1.9271, 2.0863, 2.2930, 2.4696, 2.6184, 2.7587, 2.9251, 0.1338, 0.2341, 0.3566, 0.4797, 0.6129, 0.7580, 0.9093, 1.0491, 1.1911, 1.3313, 1.4841, 1.6503, 1.8035, 1.9685, 2.1128, 2.2694, 2.4093, 2.5728, 2.7206, 2.8994, 0.0937, 0.2034, 0.3447, 0.5032, 0.6370, 0.7993, 0.9674, 1.1323, 1.2830, 1.4199, 1.5492, 1.7010, 1.8513, 2.0087, 2.1550, 2.3115, 2.4643, 2.6237, 2.7812, 2.9392, 0.1085, 0.2152, 0.3126, 0.4569, 0.5718, 0.7213, 0.8837, 1.0604, 1.2053, 1.3755, 1.5397, 1.7001, 1.8409, 2.0039, 2.1498, 2.3080, 2.4535, 2.6063, 2.7505, 2.9110, 0.0562, 0.2066, 0.4034, 0.5490, 0.6682, 0.7924, 0.9495, 1.0800, 1.1869, 1.3156, 1.4834, 1.6619, 1.8404, 2.0199, 2.1509, 2.2755, 2.4072, 2.5580, 2.6993, 2.8913, 0.0939, 0.2303, 0.3742, 0.5260, 0.6662, 0.8294, 0.9769, 1.1315, 1.2792, 1.4153, 1.5436, 1.6701, 1.8215, 1.9920, 2.1310, 2.3005, 2.4534, 2.5786, 2.7204, 2.9068, 0.1005, 0.2442, 0.3898, 0.5398, 0.6958, 0.8474, 1.0008, 1.1556, 1.3020, 1.4456, 1.5954, 1.7470, 1.8922, 2.0500, 2.2019, 2.3492, 2.4963, 2.6412, 2.7890, 2.9423, 0.1022, 0.2031, 0.3213, 0.4402, 0.5637, 0.7117, 0.8673, 1.0242, 1.1727, 1.3206, 1.4846, 1.6465, 1.8015, 1.9655, 2.1233, 2.2873, 2.4464, 2.6074, 2.7685, 2.9409, 0.1985, 0.3497, 0.4622, 0.5982, 0.7489, 0.8752, 0.9925, 1.1679, 1.3288, 1.4606, 1.5820, 1.7492, 1.8922, 2.0511, 2.1780, 2.3373, 2.4760, 2.6233, 2.7466, 2.8978, 0.1284, 0.2433, 0.3630, 0.4852, 0.6117, 0.7460, 0.8904, 1.0360, 1.1738, 1.3142, 1.4696, 1.6185, 1.7719, 1.9318, 2.0961, 2.2697, 2.4408, 2.6046, 2.7681, 2.9451, 0.1042, 0.2286, 0.3598, 0.5064, 0.6438, 0.7899, 0.9350, 1.0891, 1.2323, 1.3807, 1.5225, 1.6747, 1.8153, 1.9669, 2.1145, 2.2832, 2.4430, 2.6085, 2.7748, 2.9346, 0.0780, 0.1724, 0.2440, 0.3489, 0.5280, 0.7426, 0.9272, 1.0914, 1.2562, 1.4188, 1.5804, 1.7376, 1.8909, 2.0473, 2.1946, 2.3457, 2.4950, 2.6424, 2.7926, 2.9549, 0.1103, 0.2608, 0.4087, 0.5538, 0.6923, 0.8418, 0.9940, 1.1507, 1.2919, 1.4406, 1.5802, 1.7262, 1.8638, 2.0085, 2.1572, 2.2975, 2.4329, 2.5866, 2.7380, 2.9107, 0.1297, 0.2532, 0.4003, 0.5329, 0.6733, 0.7950, 0.9557, 1.0859, 1.2235, 1.3538, 1.5037, 1.6389, 1.7964, 1.9285, 2.0898, 2.2541, 2.4231, 2.5711, 2.6875, 2.8947, 0.0871, 0.1968, 0.3425, 0.4949, 0.6424, 0.7959, 0.9534, 1.1132, 1.2656, 1.4229, 1.5785, 1.7271, 1.8729, 2.0355, 2.1998, 2.3562, 2.5151, 2.6663, 2.8145, 2.9534, 0.1038, 0.2204, 0.3248, 0.4566, 0.5947, 0.7443, 0.8811, 1.0379, 1.2031, 1.3772, 1.5430, 1.7092, 1.8625, 2.0322, 2.1904, 2.3417, 2.4960, 2.6458, 2.7979, 2.9485, 0.1329, 0.2763, 0.3943, 0.5147, 0.6512, 0.8071, 0.9410, 1.0879, 1.2298, 1.3850, 1.5282, 1.6674, 1.8137, 1.9993, 2.1344, 2.2749, 2.4257, 2.5863, 2.7410, 2.9184, 0.1052, 0.2142, 0.3584, 0.5033, 0.6387, 0.7804, 0.9320, 1.0780, 1.2172, 1.3764, 1.5421, 1.6887, 1.8246, 1.9833, 2.1245, 2.2797, 2.4237, 2.5779, 2.7257, 2.9097, 0.1092, 0.2676, 0.4071, 0.5355, 0.6661, 0.8142, 0.9621, 1.1173, 1.2628, 1.4185, 1.5696, 1.7220, 1.8595, 2.0178, 2.1720, 2.3221, 2.4718, 2.6259, 2.7775, 2.9334, 0.0929, 0.2017, 0.3073, 0.4570, 0.5775, 0.7635, 0.9299, 1.0832, 1.2334, 1.3935, 1.5420, 1.7112, 1.8601, 2.0309, 2.1735, 2.3230, 2.4543, 2.6034, 2.7418, 2.8988, 0.0775, 0.2005, 0.3490, 0.5200, 0.6747, 0.8383, 0.9885, 1.1738, 1.3141, 1.4236, 1.5892, 1.7402, 1.8474, 2.0210, 2.1593, 2.2730, 2.4235, 2.5604, 2.7128, 2.9005, 0.1104, 0.2292, 0.3353, 0.4732, 0.6152, 0.7675, 0.9164, 1.0907, 1.2594, 1.4064, 1.5218, 1.6426, 1.8018, 1.9937, 2.1362, 2.2961, 2.4523, 2.6083, 2.7613, 2.9202, 0.0826, 0.2000, 0.3384, 0.5144, 0.6694, 0.8377, 0.9870, 1.1461, 1.2950, 1.4495, 1.5872, 1.7387, 1.8793, 2.0329, 2.1723, 2.3114, 2.4415, 2.5908, 2.7354, 2.9028, 0.1063, 0.2268, 0.3442, 0.4735, 0.6116, 0.7507, 0.9028, 1.0768, 1.2426, 1.4052, 1.5566, 1.7015, 1.8243, 1.9742, 2.1276, 2.2824, 2.4262, 2.5953, 2.7627, 2.9290, 0.1150, 0.2814, 0.4543, 0.6095, 0.7373, 0.8592, 0.9908, 1.1108, 1.2339, 1.3590, 1.4864, 1.6168, 1.7392, 1.8752, 2.0212, 2.1688, 2.3128, 2.4869, 2.7019, 2.9239, 0.0948, 0.2074, 0.3433, 0.4943, 0.6346, 0.7645, 0.8809, 1.0610, 1.2307, 1.3487, 1.4655, 1.6186, 1.7534, 1.8859, 2.0486, 2.2200, 2.3835, 2.5581, 2.7565, 2.9502, 0.1062, 0.2239, 0.3683, 0.5197, 0.6704, 0.8184, 0.9642, 1.1127, 1.2556, 1.3976, 1.5405, 1.6940, 1.8375, 1.9888, 2.1377, 2.2980, 2.4555, 2.6184, 2.7849, 2.9452, 0.0888, 0.2005, 0.2847, 0.4322, 0.5763, 0.7577, 0.9262, 1.1095, 1.2719, 1.4331, 1.5843, 1.7452, 1.8845, 2.0385, 2.1805, 2.3345, 2.4750, 2.6217, 2.7555, 2.9013, 0.1713, 0.2617, 0.3868, 0.5859, 0.7073, 0.8535, 1.0593, 1.1778, 1.3109, 1.4508, 1.5910, 1.7463, 1.8911, 2.0651, 2.2035, 2.3355, 2.4947, 2.6440, 2.7424, 2.8943, 0.1346, 0.2549, 0.4089, 0.5488, 0.6949, 0.8394, 0.9810, 1.1145, 1.2528, 1.4044, 1.5423, 1.6872, 1.8274, 1.9726, 2.1403, 2.2809, 2.4128, 2.5564, 2.6887, 2.8895, 0.0776, 0.1621, 0.2553, 0.4191, 0.5988, 0.7921, 0.9651, 1.1350, 1.2930, 1.4475, 1.6011, 1.7585, 1.9068, 2.0638, 2.2102, 2.3594, 2.5096, 2.6581, 2.8099, 2.9654, 0.0864, 0.1778, 0.2854, 0.4235, 0.5568, 0.7220, 0.8963, 1.0609, 1.2217, 1.3830, 1.5422, 1.7018, 1.8551, 2.0206, 2.1783, 2.3328, 2.4869, 2.6366, 2.7923, 2.9539, 0.1144, 0.2576, 0.4186, 0.5594, 0.6875, 0.8221, 0.9598, 1.0944, 1.2273, 1.3713, 1.5152, 1.6628, 1.8070, 1.9525, 2.0965, 2.2535, 2.4132, 2.5725, 2.7250, 2.9150, 0.1079, 0.2221, 0.3334, 0.4845, 0.6083, 0.7516, 0.9018, 1.0594, 1.2060, 1.3673, 1.5212, 1.6880, 1.8208, 1.9831, 2.1269, 2.2909, 2.4366, 2.6027, 2.7339, 2.8924, 0.0994, 0.2233, 0.3634, 0.5145, 0.6568, 0.8131, 0.9746, 1.1296, 1.2666, 1.4116, 1.5748, 1.7264, 1.8649, 2.0217, 2.1716, 2.3293, 2.4900, 2.6455, 2.7818, 2.9362, 0.1120, 0.2079, 0.3128, 0.4124, 0.5291, 0.6816, 0.8478, 1.0150, 1.1772, 1.3456, 1.5208, 1.6882, 1.8458, 2.0078, 2.1627, 2.3198, 2.4733, 2.6251, 2.7796, 2.9489, 0.0853, 0.2030, 0.3669, 0.5326, 0.6678, 0.8086, 0.9526, 1.1142, 1.2551, 1.4158, 1.5694, 1.7073, 1.8431, 1.9686, 2.1153, 2.2376, 2.3686, 2.5591, 2.7320, 2.9104, 0.0905, 0.2166, 0.3539, 0.5201, 0.6700, 0.8346, 0.9883, 1.1457, 1.2714, 1.3845, 1.5172, 1.6688, 1.8008, 1.9535, 2.1019, 2.2708, 2.4135, 2.5974, 2.7486, 2.9033, 0.0084, 0.0374, 0.0164, -0.0153, 0.0288, 0.0107, -0.0255, -0.0242, 0.0000, -0.0055, -0.0081, -0.0075, -0.0022, -0.0052, -0.0069, -0.0017, 0.0003, 0.0091, 0.0028, -0.0027, 0.0085, 0.0043, -0.0235, -0.0411, 0.0202, 0.0359, 0.0376, 0.0321, 0.0306, -0.0358, -0.0276, -0.0090, 0.0032, 0.0048, 0.0309, 0.0332, 0.0284, 0.0237, 0.0051, -0.0101, -0.0233, -0.0428, -0.0585, -0.0387, 0.0039, 0.0081, 0.0029, -0.0017, -0.0006, -0.0068, 0.0044, 0.0182, 0.0376, 0.0387, -0.0334, -0.0269, -0.0182, -0.0069, -0.0026, 0.0035, -0.0049, -0.0212, -0.0408, -0.0245, 0.0186, 0.0189, 0.0153, 0.0120, 0.0157, 0.0055, -0.0046, 0.0179, 0.0284, -0.0032, -0.0261, -0.0205, -0.0039, 0.0174, 0.0299, 0.0207, 0.0012, -0.0056, 0.0010, 0.0141, -0.0119, 0.0190, 0.0315, 0.0033, -0.0128, 0.0300, 0.0328, 0.0308, 0.0353, 0.0266, 0.0066, -0.0328, -0.0273, 0.0054, 0.0145, 0.0175, 0.0015, -0.0171, 0.0062, -0.0164, 0.0045, -0.0071, 0.0025, 0.0278, 0.0283, 0.0117, -0.0026, -0.0285, -0.0408, -0.0366, -0.0059, -0.0208, -0.0354, -0.0334, -0.0263, -0.0064, 0.0072, -0.0006, -0.0235, -0.0037, -0.0307, -0.0294, -0.0163, -0.0197, -0.0235, 0.0192, 0.0013, -0.0219, -0.0123, -0.0004, -0.0081, -0.0096, -0.0123, -0.0101, 0.0021, 0.0151, 0.0106, 0.0151, 0.0292, 0.0033, 0.0283, 0.0124, 0.0058, -0.0017, -0.0038, 0.0152, 0.0141, 0.0132, 0.0178, 0.0157, 0.0073, 0.0176, 0.0141, 0.0097, -0.0092, -0.0163, -0.0230, -0.0134, -0.0099, -0.0147, 0.0040, -0.0183, -0.0175, -0.0080, -0.0083, -0.0290, -0.0417, -0.0398, -0.0269, -0.0199, -0.0143, -0.0053, -0.0099, -0.0054, -0.0199, -0.0219, -0.0170, 0.0107, 0.0194, 0.0035, 0.0437, 0.0406, 0.0215, 0.0120, 0.0053, -0.0028, 0.0238, 0.0337, 0.0217, 0.0011, 0.0227, 0.0244, 0.0327, 0.0378, 0.0437, 0.0356, -0.0033, 0.0113, 0.0407, 0.0334, -0.0125, -0.0003, -0.0141, -0.0273, -0.0137, -0.0079, -0.0145, -0.0071, 0.0114, 0.0181, 0.0150, 0.0085, -0.0077, -0.0038, -0.0219, -0.0263, -0.0187, -0.0233, 0.0133, 0.0265, -0.0156, -0.0091, -0.0110, -0.0016, 0.0143, 0.0177, 0.0240, 0.0082, -0.0143, -0.0257, -0.0014, 0.0002, 0.0082, 0.0180, 0.0325, 0.0340, -0.0153, -0.0389, -0.0240, 0.0082, 0.0140, 0.0046, -0.0138, -0.0378, -0.0366, 0.0297, 0.0252, 0.0078, 0.0063, 0.0006, 0.0044, 0.0074, 0.0094, 0.0113, 0.0105, 0.0137, 0.0438, 0.0262, -0.0078, -0.0185, -0.0215, -0.0407, -0.0435, -0.0208, -0.0004, -0.0144, -0.0205, -0.0248, -0.0159, -0.0069, -0.0153, 0.0132, 0.0355, 0.0298, 0.0120, 0.0072, 0.0236, 0.0526, 0.0479, 0.0233, -0.0133, -0.0283, -0.0468, -0.0549, -0.0370, 0.0032, 0.0056, 0.0023, 0.0050, 0.0024, 0.0279, 0.0116, -0.0045, -0.0012, 0.0107, 0.0190, 0.0253, 0.0191, 0.0043, 0.0193, -0.0348, -0.0246, 0.0123, 0.0210, 0.0135, -0.0096, -0.0109, -0.0076, -0.0156, -0.0290, 0.0160, 0.0194, 0.0219, 0.0259, 0.0250, 0.0195, 0.4948, 0.4961, 0.4940, 0.4878, 0.4849, 0.4727, 0.4571, 0.4551, 0.4534, 0.4468, 0.4412, 0.4354, 0.4298, 0.4272, 0.4498, 0.4506, 0.4560, 0.4592, 0.4758, 0.4941, 0.2476, 0.1771, 0.1974, 0.1881, 0.1667, 0.1826, 0.2067, 0.2031, 0.1734, 0.1534, 0.1415, 0.1761, 0.1897, 0.1772, 0.1651, 0.1247, 0.1041, 0.1231, 0.1809, 0.2234 }, }; static const uint8_t tab7[][35] = { {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0}, {0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0}, {0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0}, {0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0}, {0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0}, {0,0,1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1,0,1}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0} }; static const uint8_t tab8[][5] = { {0, 0, 0, 1, 1}, {0, 1, 0, 0, 1}, {1, 1, 0, 0, 0}, {1, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 1, 0, 0, 1}, {1, 1, 0, 0, 0}, {1, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 1, 0, 0, 1}, {1, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 1, 0, 1, 0} }; static const uint8_t tab9[][45] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 },{ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 },{ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0 },{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static const uint8_t tab10[][25] = { {1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0}, {1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0}, {1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}, {0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1}, {1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0}, {0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1} }; static const uint8_t tab11[][55] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0 },{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, },{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, },{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, },{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },{ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, } }; static const uint8_t tab12[][15] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0}, {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0}, {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0}, {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}, }; static const struct { int size; const uint8_t *tab; } tabs[] = { {0 , NULL}, {5 , &tab8 [0][0]},{5 , &tab8 [0][0]}, {15, &tab12[0][0]}, {5 , &tab8 [0][0]},{25, &tab10[0][0]}, {15, &tab12[0][0]}, {35, &tab7 [0][0]},{5 , &tab8 [0][0]}, {45, &tab9 [0][0]}, {25, &tab10[0][0]},{55, &tab11[0][0]}, {15, &tab12[0][0]} }; #endif /* AVCODEC_TWINVQ_DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/twinvq_data.h
C
asf20
694,479
/* * ADX ADPCM codecs * Copyright (c) 2001,2003 BERO * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * SEGA CRI adx codecs. * * Reference documents: * http://ku-www.ss.titech.ac.jp/~yatsushi/adx.html * adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/ */ #ifndef AVCODEC_ADX_H #define AVCODEC_ADX_H typedef struct { int s1,s2; } PREV; typedef struct { PREV prev[2]; int header_parsed; unsigned char dec_temp[18*2]; int in_temp; } ADXContext; #define BASEVOL 0x4000 #define SCALE1 0x7298 #define SCALE2 0x3350 #endif /* AVCODEC_ADX_H */
123linslouis-android-video-cutter
jni/libavcodec/adx.h
C
asf20
1,325
/* * VC3/DNxHD decoder. * Copyright (c) 2007 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ //#define TRACE //#define DEBUG #include "avcodec.h" #include "get_bits.h" #include "dnxhddata.h" #include "dsputil.h" typedef struct { AVCodecContext *avctx; AVFrame picture; GetBitContext gb; int cid; ///< compression id unsigned int width, height; unsigned int mb_width, mb_height; uint32_t mb_scan_index[68]; /* max for 1080p */ int cur_field; ///< current interlaced field VLC ac_vlc, dc_vlc, run_vlc; int last_dc[3]; DSPContext dsp; DECLARE_ALIGNED(16, DCTELEM, blocks)[8][64]; ScanTable scantable; const CIDEntry *cid_table; } DNXHDContext; #define DNXHD_VLC_BITS 9 #define DNXHD_DC_VLC_BITS 7 static av_cold int dnxhd_decode_init(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; dsputil_init(&ctx->dsp, avctx); avctx->coded_frame = &ctx->picture; ctx->picture.type = FF_I_TYPE; return 0; } static int dnxhd_init_vlc(DNXHDContext *ctx, int cid) { if (!ctx->cid_table) { int index; if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unsupported cid %d\n", cid); return -1; } ctx->cid_table = &ff_dnxhd_cid_table[index]; init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, ctx->cid_table->ac_bits, 1, 1, ctx->cid_table->ac_codes, 2, 2, 0); init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, ctx->cid_table->bit_depth+4, ctx->cid_table->dc_bits, 1, 1, ctx->cid_table->dc_codes, 1, 1, 0); init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, ctx->cid_table->run_bits, 1, 1, ctx->cid_table->run_codes, 2, 2, 0); ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } return 0; } static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_size, int first_field) { static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 }; int i; if (buf_size < 0x280) return -1; if (memcmp(buf, header_prefix, 5)) { av_log(ctx->avctx, AV_LOG_ERROR, "error in header\n"); return -1; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; ctx->picture.interlaced_frame = 1; ctx->picture.top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); dprintf(ctx->avctx, "width %d, heigth %d\n", ctx->width, ctx->height); if (buf[0x21] & 0x40) { av_log(ctx->avctx, AV_LOG_ERROR, "10 bit per component\n"); return -1; } ctx->cid = AV_RB32(buf + 0x28); dprintf(ctx->avctx, "compression id %d\n", ctx->cid); if (dnxhd_init_vlc(ctx, ctx->cid) < 0) return -1; if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size\n"); return -1; } ctx->mb_width = ctx->width>>4; ctx->mb_height = buf[0x16d]; dprintf(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height); if ((ctx->height+15)>>4 == ctx->mb_height && ctx->picture.interlaced_frame) ctx->height <<= 1; if (ctx->mb_height > 68 || (ctx->mb_height<<ctx->picture.interlaced_frame) > (ctx->height+15)>>4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return -1; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i<<2)); dprintf(ctx->avctx, "mb scan index %d\n", ctx->mb_scan_index[i]); if (buf_size < ctx->mb_scan_index[i] + 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index\n"); return -1; } } return 0; } static int dnxhd_decode_dc(DNXHDContext *ctx) { int len; len = get_vlc2(&ctx->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); return len ? get_xbits(&ctx->gb, len) : 0; } static void dnxhd_decode_dct_block(DNXHDContext *ctx, DCTELEM *block, int n, int qscale) { int i, j, index, index2; int level, component, sign; const uint8_t *weigth_matrix; if (n&2) { component = 1 + (n&1); weigth_matrix = ctx->cid_table->chroma_weight; } else { component = 0; weigth_matrix = ctx->cid_table->luma_weight; } ctx->last_dc[component] += dnxhd_decode_dc(ctx); block[0] = ctx->last_dc[component]; //av_log(ctx->avctx, AV_LOG_DEBUG, "dc %d\n", block[0]); for (i = 1; ; i++) { index = get_vlc2(&ctx->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); //av_log(ctx->avctx, AV_LOG_DEBUG, "index %d\n", index); level = ctx->cid_table->ac_level[index]; if (!level) { /* EOB */ //av_log(ctx->avctx, AV_LOG_DEBUG, "EOB\n"); return; } sign = get_sbits(&ctx->gb, 1); if (ctx->cid_table->ac_index_flag[index]) { level += get_bits(&ctx->gb, ctx->cid_table->index_bits)<<6; } if (ctx->cid_table->ac_run_flag[index]) { index2 = get_vlc2(&ctx->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2); i += ctx->cid_table->run[index2]; } if (i > 63) { av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); return; } j = ctx->scantable.permutated[i]; //av_log(ctx->avctx, AV_LOG_DEBUG, "j %d\n", j); //av_log(ctx->avctx, AV_LOG_DEBUG, "level %d, weigth %d\n", level, weigth_matrix[i]); level = (2*level+1) * qscale * weigth_matrix[i]; if (ctx->cid_table->bit_depth == 10) { if (weigth_matrix[i] != 8) level += 8; level >>= 4; } else { if (weigth_matrix[i] != 32) level += 32; level >>= 6; } //av_log(NULL, AV_LOG_DEBUG, "i %d, j %d, end level %d\n", i, j, level); block[j] = (level^sign) - sign; } } static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) { int dct_linesize_luma = ctx->picture.linesize[0]; int dct_linesize_chroma = ctx->picture.linesize[1]; uint8_t *dest_y, *dest_u, *dest_v; int dct_offset; int qscale, i; qscale = get_bits(&ctx->gb, 11); skip_bits1(&ctx->gb); //av_log(ctx->avctx, AV_LOG_DEBUG, "qscale %d\n", qscale); for (i = 0; i < 8; i++) { ctx->dsp.clear_block(ctx->blocks[i]); dnxhd_decode_dct_block(ctx, ctx->blocks[i], i, qscale); } if (ctx->picture.interlaced_frame) { dct_linesize_luma <<= 1; dct_linesize_chroma <<= 1; } dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << 4); dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << 3); dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << 3); if (ctx->cur_field) { dest_y += ctx->picture.linesize[0]; dest_u += ctx->picture.linesize[1]; dest_v += ctx->picture.linesize[2]; } dct_offset = dct_linesize_luma << 3; ctx->dsp.idct_put(dest_y, dct_linesize_luma, ctx->blocks[0]); ctx->dsp.idct_put(dest_y + 8, dct_linesize_luma, ctx->blocks[1]); ctx->dsp.idct_put(dest_y + dct_offset, dct_linesize_luma, ctx->blocks[4]); ctx->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize_luma, ctx->blocks[5]); if (!(ctx->avctx->flags & CODEC_FLAG_GRAY)) { dct_offset = dct_linesize_chroma << 3; ctx->dsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); ctx->dsp.idct_put(dest_v, dct_linesize_chroma, ctx->blocks[3]); ctx->dsp.idct_put(dest_u + dct_offset, dct_linesize_chroma, ctx->blocks[6]); ctx->dsp.idct_put(dest_v + dct_offset, dct_linesize_chroma, ctx->blocks[7]); } return 0; } static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int buf_size) { int x, y; for (y = 0; y < ctx->mb_height; y++) { ctx->last_dc[0] = ctx->last_dc[1] = ctx->last_dc[2] = 1<<(ctx->cid_table->bit_depth+2); // for levels +2^(bitdepth-1) init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3); for (x = 0; x < ctx->mb_width; x++) { //START_TIMER; dnxhd_decode_macroblock(ctx, x, y); //STOP_TIMER("decode macroblock"); } } return 0; } static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DNXHDContext *ctx = avctx->priv_data; AVFrame *picture = data; int first_field = 1; dprintf(avctx, "frame size %d\n", buf_size); decode_coding_unit: if (dnxhd_decode_header(ctx, buf, buf_size, first_field) < 0) return -1; if ((avctx->width || avctx->height) && (ctx->width != avctx->width || ctx->height != avctx->height)) { av_log(avctx, AV_LOG_WARNING, "frame size changed: %dx%d -> %dx%d\n", avctx->width, avctx->height, ctx->width, ctx->height); first_field = 1; } avctx->pix_fmt = PIX_FMT_YUV422P; if (avcodec_check_dimensions(avctx, ctx->width, ctx->height)) return -1; avcodec_set_dimensions(avctx, ctx->width, ctx->height); if (first_field) { if (ctx->picture.data[0]) avctx->release_buffer(avctx, &ctx->picture); if (avctx->get_buffer(avctx, &ctx->picture) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } } dnxhd_decode_macroblocks(ctx, buf + 0x280, buf_size - 0x280); if (first_field && ctx->picture.interlaced_frame) { buf += ctx->cid_table->coding_unit_size; buf_size -= ctx->cid_table->coding_unit_size; first_field = 0; goto decode_coding_unit; } *picture = ctx->picture; *data_size = sizeof(AVPicture); return buf_size; } static av_cold int dnxhd_decode_close(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; if (ctx->picture.data[0]) avctx->release_buffer(avctx, &ctx->picture); free_vlc(&ctx->ac_vlc); free_vlc(&ctx->dc_vlc); free_vlc(&ctx->run_vlc); return 0; } AVCodec dnxhd_decoder = { "dnxhd", AVMEDIA_TYPE_VIDEO, CODEC_ID_DNXHD, sizeof(DNXHDContext), dnxhd_decode_init, NULL, dnxhd_decode_close, dnxhd_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), };
123linslouis-android-video-cutter
jni/libavcodec/dnxhddec.c
C
asf20
11,719
/* * E-AC-3 decoder tables * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Tables taken directly from the E-AC-3 spec. */ #include "eac3dec_data.h" #include "ac3.h" const uint8_t ff_eac3_bits_vs_hebap[20] = { 0, 2, 3, 4, 5, 7, 8, 9, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, }; /** * Table E3.6, Gk=1 * No gain (Gk=1) inverse quantization, remapping scale factors * ff_eac3_gaq_remap[hebap+8] */ const int16_t ff_eac3_gaq_remap_1[12] = { 4681, 2185, 1057, 520, 258, 129, 64, 32, 16, 8, 2, 0 }; /** * Table E3.6, Gk=2 & Gk=4, A * Large mantissa inverse quantization, remapping scale factors * ff_eac3_gaq_remap_2_4_a[hebap-8][Gk=2,4] */ const int16_t ff_eac3_gaq_remap_2_4_a[9][2] = { { -10923, -4681 }, { -14043, -6554 }, { -15292, -7399 }, { -15855, -7802 }, { -16124, -7998 }, { -16255, -8096 }, { -16320, -8144 }, { -16352, -8168 }, { -16368, -8180 } }; /** * Table E3.6, Gk=2 & Gk=4, B * Large mantissa inverse quantization, negative mantissa remapping offsets * ff_eac3_gaq_remap_3_4_b[hebap-8][Gk=2,4] */ const int16_t ff_eac3_gaq_remap_2_4_b[9][2] = { { -5461, -1170 }, { -11703, -4915 }, { -14199, -6606 }, { -15327, -7412 }, { -15864, -7805 }, { -16126, -7999 }, { -16255, -8096 }, { -16320, -8144 }, { -16352, -8168 } }; static const int16_t vq_hebap1[4][6] = { { 7167, 4739, 1106, 4269, 10412, 4820}, { -5702, -3187, -14483, -1392, -2027, 849}, { 633, 6199, 7009, -12779, -2306, -2636}, { -1468, -7031, 7592, 10617, -5946, -3062}, }; static const int16_t vq_hebap2[8][6] = { { -12073, 608, -7019, 590, 4000, 869}, { 6692, 15689, -6178, -9239, -74, 133}, { 1855, -989, 20596, -2920, -4475, 225}, { -1194, -3901, -821, -6566, -875, -20298}, { -2762, -3181, -4094, -5623, -16945, 9765}, { 1547, 6839, 1980, 20233, -1071, -4986}, { 6221, -17915, -5516, 6266, 358, 1162}, { 3753, -1066, 4283, -3227, 15928, 10186}, }; static const int16_t vq_hebap3[16][6] = { { -10028, 20779, 10982, -4560, 798, -68}, { 11050, 20490, -6617, -5342, -1797, -1631}, { 3977, -542, 7118, -1166, 18844, 14678}, { -4320, -96, -7295, -492, -22050, -4277}, { 2692, 5856, 5530, 21862, -7212, -5325}, { -135, -23391, 962, 8115, -644, 382}, { -1563, 3400, -3299, 4693, -6892, 22398}, { 3535, 3030, 7296, 6214, 20476, -12099}, { 57, -6823, 1848, -22349, -5919, 6823}, { -821, -3655, -387, -6253, -1735, -22373}, { -6046, 1586, -18890, -14392, 9214, 705}, { -5716, 264, -17964, 14618, 7921, -337}, { -110, 108, 8, 74, -89, -50}, { 6612, -1517, 21687, -1658, -7949, -246}, { 21667, -6335, -8290, -101, -1349, -22}, { -22003, -6476, 7974, 648, 2054, -331}, }; static const int16_t vq_hebap4[32][6] = { { 6636, -4593, 14173, -17297, -16523, 864}, { 3658, 22540, 104, -1763, -84, 6}, { 21580, -17815, -7282, -1575, -2078, -320}, { -2233, 10017, -2728, 14938, -13640, -17659}, { -1564, -17738, -19161, 13735, 2757, 2951}, { 4520, 5510, 7393, 10799, 19231, -13770}, { 399, 2976, -1099, 5013, -1159, 22095}, { 3624, -2359, 4680, -2238, 22702, 3765}, { -4201, -8285, -6810, -12390, -18414, 15382}, { -5198, -6869, -10047, -8364, -16022, -20562}, { -142, -22671, -368, 4391, -464, -13}, { 814, -1118, -1089, -22019, 74, 1553}, { -1618, 19222, -17642, -13490, 842, -2309}, { 4689, 16490, 20813, -15387, -4164, -3968}, { -3308, 11214, -13542, 13599, -19473, 13770}, { 1817, 854, 21225, -966, -1643, -268}, { -2587, -107, -20154, 376, 1174, -304}, { -2919, 453, -5390, 750, -22034, -978}, { -19012, 16839, 10000, -3580, 2211, 1459}, { 1363, -2658, -33, -4067, 1165, -21985}, { -8592, -2760, -17520, -15985, 14897, 1323}, { 652, -9331, 3253, -14622, 12181, 19692}, { -6361, 5773, -15395, 17291, 16590, -2922}, { -661, -601, 1609, 22610, 992, -1045}, { 4961, 9107, 11225, 7829, 16320, 18627}, { -21872, -1433, 138, 1470, -1891, -196}, { -19499, -18203, 11056, -516, 2543, -2249}, { -1196, -17574, 20150, 11462, -401, 2619}, { 4638, -8154, 11891, -15759, 17615, -14955}, { -83, 278, 323, 55, -154, 232}, { 7788, 1462, 18395, 15296, -15763, -1131}, }; static const int16_t vq_hebap5[128][6] = { { -3394, -19730, 2963, 9590, 4660, 19673}, { -15665, -6405, 17671, 3860, -8232, -19429}, { 4467, 412, -17873, -8037, 691, -17307}, { 3580, 2363, 6886, 3763, 6379, -20522}, { -17230, -14133, -1396, -23939, 8373, -12537}, { -8073, -21469, -15638, 3214, 8105, -5965}, { 4343, 5169, 2683, -16822, -5146, -16558}, { 6348, -10668, 12995, -25500, -22090, 4091}, { -2880, -8366, -5968, -17158, -2638, 23132}, { -5095, -14281, -22371, 21741, 3689, 2961}, { -2443, -17739, 25155, 2707, 1594, 7}, { -18379, 9010, 4270, 731, -426, -640}, { -23695, 24732, 5642, 612, -308, -964}, { -767, 1268, 225, 1635, 173, 916}, { 5455, 6493, 4902, 10560, 23041, -17140}, { 17219, -21054, -18716, 4936, -3420, 3357}, { -1390, 15488, -21946, -14611, 1339, 542}, { -6866, -2254, -12070, -3075, -19981, -20622}, { -1803, 11775, 1343, 8917, 693, 24497}, { -21610, 9462, 4681, 9254, -7815, 15904}, { -5559, -3018, -9169, -1347, -22547, 12868}, { -366, 5076, -1727, 20427, -283, -2923}, { -1886, -6313, -939, -2081, -1399, 3513}, { -3161, -537, -5075, 11268, 19396, 989}, { 2345, 4153, 5769, -4273, 233, -399}, { -21894, -1138, -16474, 5902, 5488, -3211}, { 10007, -12530, 18829, 20932, -1158, 1790}, { -1165, 5014, -1199, 6415, -8418, -21038}, { 1892, -3534, 3815, -5846, 16427, 20288}, { -2664, -11627, -4147, -18311, -22710, 14848}, { 17256, 10419, 7764, 12040, 18956, 2525}, { -21419, -18685, -10897, 4368, -7051, 4539}, { -1574, 2050, 5760, 24756, 15983, 17678}, { -538, -22867, 11067, 10301, 385, 528}, { -8465, -3025, -16357, -23237, 16491, 3654}, { 5840, 575, 11890, 1947, 25157, 6653}, { 6625, -3516, -1964, 3850, -390, -116}, { 18005, 20900, 14323, -7621, -10922, 11802}, { -4857, -2932, -13334, -7815, 21622, 2267}, { -579, -9431, -748, -21321, 12367, 8265}, { -8317, 1375, -17847, 2921, 9062, 22046}, { 18398, 8635, -1503, -2418, -18295, -14734}, { -2987, 15129, -3331, 22300, 13878, -13639}, { 5874, -19026, 15587, 11350, -20738, 1971}, { 1581, -6955, -21440, 2455, 65, 414}, { 515, -4468, -665, -4672, 125, -19222}, { 21495, -20301, -1872, -1926, -211, -1022}, { 5189, -12250, -1775, -23550, -4546, 5813}, { 321, -6331, 14646, 6975, -1773, 867}, { -13814, 3180, 7927, 444, 19552, 3146}, { -6660, 12252, -1972, 17408, -24280, -12956}, { -745, 14356, -1107, 23742, -9631, -18344}, { 18284, -7909, -7531, 19118, 7721, -12659}, { 1926, 15101, -12848, 2153, 21631, 1864}, { -2130, 23416, 17056, -15597, -1544, 87}, { 8314, -11824, 14581, -20591, 7891, -2099}, { 19600, 22814, -17304, -2040, 285, -3863}, { -8214, -18322, 10724, -13744, -13469, -1666}, { 14351, 4880, -20034, 964, -4221, -180}, { -24598, -16635, 19724, 5925, 4777, 4414}, { -2495, 23493, -16141, 2918, -1038, -2010}, { 18974, -2540, 13343, 1405, -6194, -1136}, { 2489, 13670, 22638, -7311, -129, -2792}, { -13962, 16775, 23012, 728, 3397, 162}, { 3038, 993, 8774, -21969, -6609, 910}, { -12444, -22386, -2626, -5295, 19520, 9872}, { -1911, -18274, -18506, -14962, 4760, 7119}, { 8298, -2978, 25886, 7660, -7897, 1020}, { 6132, 15127, 18757, -24370, -6529, -6627}, { 7924, 12125, -9459, -23962, 5502, 937}, { -17056, -5373, 2522, 327, 1129, -390}, { 15774, 19955, -10380, 11172, -3107, 14853}, { -11904, -8091, -17928, -22287, -17237, -6803}, { -12862, -2172, -6509, 5927, 12458, -22355}, { -497, 322, 1038, -6643, -5404, 20311}, { 1083, -22984, -8494, 12130, -762, 2623}, { 5067, 19712, -1901, -30, -325, 85}, { 987, -5830, 4212, -9030, 9121, -25038}, { -7868, 7284, -12292, 12914, -21592, 20941}, { -1630, -7694, -2187, -8525, -5604, -25196}, { -6668, 388, -22535, 1526, 9082, 193}, { -7867, -22308, 5163, 362, 944, -259}, { 3824, -11850, 7591, -23176, 25342, 23771}, { -10504, 4123, -21111, 21173, 22439, -838}, { -4723, 21795, 6184, -122, 1642, -717}, { 24504, 19887, -2043, 986, 7, -55}, { -27313, -135, 2437, 259, 89, 307}, { 24446, -3873, -5391, -820, -2387, 361}, { 5529, 5784, 18682, 242, -21896, -4003}, { 22304, 4483, 722, -12242, 7570, 15448}, { 8673, 3009, 20437, 21108, -21100, -3080}, { -1132, 2705, -1825, 5420, -785, 18532}, { 16932, -13517, -16509, -14858, -20327, -14221}, { 2219, 1380, 21474, -1128, 327, 83}, { -2177, 21517, -3856, -14180, -204, -2191}, { 953, -9426, 15874, -10710, -3231, 21030}, { -421, -1377, 640, -8239, -20976, 2174}, { 4309, 18514, -9100, -18319, -15518, 3704}, { -5943, 449, -8387, 1075, -22210, -4992}, { 2953, 12788, 18285, 1430, 14937, 21731}, { -2913, 401, -4739, -20105, 1699, -1147}, { 3449, 5241, 8853, 22134, -7547, 1451}, { -2154, 8584, 18120, -15614, 19319, -5991}, { 3501, 2841, 5897, 6397, 8630, 23018}, { 2467, 2956, 379, 5703, -22047, -2189}, { -16963, -594, 18822, -5295, 1640, 774}, { 2896, -1424, 3586, -2292, 19910, -1822}, { -18575, 21219, -14001, -12573, 16466, 635}, { -1998, -19314, -16527, 12208, -16576, -7854}, { -9674, 1012, -21645, 2883, -12712, 2321}, { -1005, 471, -3629, 8045, -11087, 25533}, { 4141, -21472, -2673, 756, -663, -523}, { 6490, 8531, 19289, 18949, 6092, -9347}, { 16965, 24599, 14024, 10072, -536, -10438}, { -8147, 2145, -23028, -17073, 5451, -4401}, { -14873, 20520, -18303, -9717, -11885, -17831}, { -2290, -14120, 2070, 22467, 1671, 725}, { -8538, 14629, 3521, -20577, 6673, 8200}, { 20248, 4410, -1366, -585, 1229, -2449}, { 7467, -7148, 13667, -8246, 22392, -17320}, { -1932, 3875, -9064, -3812, 958, 265}, { -4399, 2959, -15911, 19598, 4954, -1105}, { 18009, -9923, -18137, -3862, 11178, 5821}, { -14596, -1227, 9660, 21619, 11228, -11721}, { -721, -1700, 109, -2142, 61, -6772}, { -24619, -22520, 5608, -1957, -1761, -1012}, { -23728, -4451, -2688, -14679, -4266, 9919}, { 8495, -894, 20438, -13820, -17267, 139}, }; static const int16_t vq_hebap6[256][6] = { { 10154, 7365, 16861, 18681, -22893, -3636}, { -2619, -3788, -5529, -5192, -9009, -20298}, { -5583, -22800, 21297, 7012, 745, 720}, { 428, -1459, 109, -3082, 361, -8403}, { 8161, 22401, 241, 1755, -874, -2824}, { 1140, 12643, 2306, 22263, -25146, -17557}, { -2609, 3379, 10337, -19730, -15468, -23944}, { -4040, -12796, -25772, 13096, 3905, 1315}, { 4624, -23799, 13608, 25317, -1175, 2173}, { -97, 13747, -5122, 23255, 4214, -22145}, { 6878, -322, 18264, -854, -11916, -733}, { 17280, -12669, -9693, 23563, -16240, -1309}, { 5802, -4968, 19526, -21194, -24622, -183}, { 5851, -16137, 15229, -9496, -1538, 377}, { 14096, 25057, 13419, 8290, 23320, 16818}, { -7261, 118, -15867, 19097, 9781, -277}, { -4288, 21589, -13288, -16259, 16633, -4862}, { 4909, -19217, 23411, 14705, -722, 125}, { 19462, -4732, -1928, -11527, 20770, 5425}, { -27562, -2881, -4331, 384, -2103, 1367}, { -266, -9175, 5441, 26333, -1924, 4221}, { -2970, -20170, -21816, 5450, -7426, 5344}, { -221, -6696, 603, -9140, 1308, -27506}, { 9621, -8380, -1967, 9403, -1651, 22817}, { 7566, -5250, -4165, 1385, -990, 560}, { -1262, 24738, -19057, 10741, 7585, -7098}, { 451, 20130, -9949, -6015, -2188, -1458}, { 22249, 9380, 9096, 10959, -2365, -3724}, { 18668, -650, -1234, 11092, 7678, 5969}, { 19207, -1485, -1076, -731, -684, 43}, { -4973, 13430, 20139, 60, 476, -935}, { -20029, 8710, 2499, 1016, -1158, 335}, { -26413, 18598, -2201, -669, 3409, 793}, { -4726, 8875, -24607, -9646, 3643, -283}, { 13303, -21404, -3691, -1184, -1970, 1612}, { 173, 60, 919, 1229, 6942, -665}, { 16377, 16991, 5341, -14015, -2304, -20390}, { 25334, -10609, 11947, -7653, -6363, 14058}, { 23929, -13259, -7226, -937, 234, -187}, { 6311, -1877, 12506, -1879, 18751, -23341}, { 621, 6445, 3354, -24274, 8406, 5315}, { -3297, -5034, -4704, -5080, -25730, 5347}, { -1275, -13295, -965, -23318, 1214, 26259}, { -6252, 10035, -20105, 15301, -16073, 5136}, { 9562, -3911, -19510, 4745, 22270, -4171}, { 7978, -19600, 14024, -5745, -20855, 8939}, { 7, -4039, 991, -6065, 52, -19423}, { 3485, 2969, 7732, 7786, 25312, 6206}, { -959, -12812, -1840, -22743, 7324, 10830}, { -4686, 1678, -10172, -5205, 4294, -1271}, { 3889, 1302, 7450, 638, 20374, -3133}, { -12496, -9123, 18463, -12343, -7238, 18552}, { -6185, 8649, -6903, -895, 17109, 16604}, { -9896, 28579, 2845, 1640, 2925, -298}, { 14968, -25988, 14878, -24012, 1815, -6474}, { 26107, 5166, 21225, 15873, 21617, 14825}, { -21684, 16438, 20504, -14346, -7114, -4162}, { 28647, 90, -1572, 789, -902, -75}, { -1479, 2471, -4061, 3612, -2240, 10914}, { 8616, 17491, 17255, -17456, 17022, -16357}, { -20722, -18597, 25274, 17720, -3573, 1695}, { -997, 6129, -6303, 11250, -11359, -19739}, { -74, -4001, -1584, 13384, 162, -144}, { -529, 21068, 7923, -11396, 422, -26}, { 7102, -13531, -20055, 2629, -178, -429}, { 9201, 1368, -22238, 2623, -20499, 24889}, { -432, 6675, -266, 8723, 80, 28024}, { 19493, -3108, -9261, 1910, -21777, 5345}, { 14079, -11489, 12604, 6079, 19877, 1315}, { 10947, 9837, -18612, 15742, 4792, 605}, { -1777, 3758, -4087, 21696, 6024, -576}, { 3567, -3578, 16379, 2680, -1752, 716}, { -5049, -1399, -4550, -652, -17721, -3366}, { -3635, -4372, -6522, -22152, 7382, 1458}, { 12242, 19190, 5646, -7815, -20289, 21344}, { -7508, 19952, 23542, -9753, 5669, -1990}, { -2275, 15438, 10907, -17879, 6497, 13582}, { -15894, -15646, -4716, 6019, 24250, -6179}, { -2049, -6856, -1208, 918, 17735, -69}, { -3721, 9099, -16065, -23621, 5981, -2344}, { 7862, -8918, 24033, 25508, -11033, -741}, { -12588, 19468, 14649, 15451, -21226, 1171}, { 2102, 1147, 2789, 4096, 2179, 8750}, { -18214, -17758, -10366, -5203, -1066, -3541}, { -2819, -19958, -11921, 6032, 8315, 10374}, { -9078, -2100, 19431, -17, 732, -689}, { -14512, -19224, -7095, 18727, 1870, 22906}, { 3912, 659, 25597, -4006, 9619, 877}, { 2616, 22695, -5770, 17920, 3812, 20220}, { 2561, 26847, -5245, -10908, 2256, -517}, { -4974, 198, -21983, -3608, 22174, -18924}, { 21308, -1211, 19144, 16691, -1588, 11390}, { -1790, 3959, -3488, 7003, -7107, 20877}, { -6108, -17955, -18722, 24763, 16508, 3211}, { 20462, -24987, -20361, 4484, -5111, -478}, { -6378, -1998, -10229, -561, -22039, -22339}, { 3047, -18850, 7586, 14743, -19862, 6351}, { -5047, 1405, -9672, 1055, -21881, 11170}, { 3481, -9699, 6526, -16655, 22813, 21907}, { -18570, 17501, 14664, 1291, 5026, 19676}, { 16134, -19810, -16956, -17939, -16933, 5800}, { -8224, 4908, 8935, 2272, -1140, -23217}, { 1572, 2753, -1598, 2143, -3346, -21926}, { -9832, -1060, -27818, 1214, 7289, 150}, { 98, 1538, 535, 17429, -23198, -901}, { 21340, -20146, 3297, -1744, -8207, -21462}, { -4166, -4633, -17902, 5478, 1285, 136}, { 18713, 21003, 24818, 11421, 1282, -4618}, { -3535, 7636, -265, 2141, -829, -2035}, { -3184, 19713, 2775, -2, 1090, 104}, { -6771, -20185, 2938, -2125, -36, 1268}, { 9560, 9430, 9586, 22100, 13827, 6296}, { -535, -20018, 4276, -1868, -448, -17183}, { -24352, 14244, -13647, -21040, 2271, 11555}, { -2646, 15437, -4589, 18638, -4299, -622}, { -20064, 4169, 18115, -1404, 13722, -1825}, { -16359, 9080, 744, 22021, 125, 10794}, { 9644, -14607, -18479, -14714, 11174, -20754}, { -326, -23762, 6144, 7909, 602, 1540}, { -6650, 6634, -12683, 21396, 20785, -6839}, { 4252, -21043, 5628, 18687, 23860, 8328}, { 17986, 5704, -5245, -18093, -555, 3219}, { 6091, 14232, -5117, -17456, -19452, -11649}, { -21586, 11302, 15434, 25590, 6777, -26683}, { 21355, -8244, 5877, -3540, 6079, -2567}, { 2603, -2455, 5421, -12286, -19100, 5574}, { -1721, -26393, -23664, 22904, -349, 3787}, { 2189, -1203, 5340, 3249, -22617, 104}, { -1664, -11020, -2857, -20723, -24049, 19900}, { 22873, -7345, -18481, -14616, -8400, -12965}, { 3777, 3958, 8239, 20494, -6991, -1201}, { -160, -1613, -793, -8681, 573, 776}, { 4297, -3786, 20373, 6082, -5321, -18400}, { 18745, 2463, 12546, -7749, -7734, -2183}, { 11074, -4720, 22119, 1825, -24351, 4080}, { 1503, -19178, -1569, 13, -313, 375}, { 318, -575, 2544, 178, 102, 40}, { -15996, -26897, 5008, 3320, 686, 1159}, { 25755, 26886, 574, -5930, -3916, 1407}, { -9148, -7665, -2875, -8384, -18663, 26400}, { -7445, -18040, -18396, 8802, -2252, -21886}, { 7851, 11773, 27485, -12847, -1410, 19590}, { 2240, 5947, 11247, 15980, -6499, 24280}, { 21673, -18515, 9771, 6550, -2730, 334}, { -4149, 1576, -11010, 89, -24429, -5710}, { 7720, 1478, 21412, -25025, -8385, 9}, { -2448, 10218, -12756, -16079, 1161, -21284}, { -8757, -14429, -22918, -14812, 2629, 13844}, { -7252, 2843, -9639, 2882, -14625, 24497}, { -674, -6530, 414, -23333, -21343, 454}, { 2104, -6312, 10887, 18087, -1199, 175}, { -493, -562, -2739, 118, -1074, 93}, { -10011, -4075, -28071, 22180, 15077, -636}, { -4637, -16408, -9003, -20418, -11608, -20932}, { 4815, 15892, 24238, -13634, -3074, -1059}, { -6724, 4610, -18772, -15283, -16685, 23988}, { 15349, -674, -3682, 21679, 4475, -12088}, { 4756, 2593, 5354, 6001, 15063, 26490}, { -23815, -17251, 6944, 378, 694, 670}, { 23392, -8839, -14713, 7544, -876, 11088}, { 3640, 3336, 22593, -3495, -2328, -113}, { 284, 6914, 3097, 10171, 6638, -18621}, { 2472, 5976, 11054, -11936, -603, -663}, { 16175, 16441, 13164, -4043, 4667, 7431}, { 19338, 15534, -6533, 1681, -4857, 17048}, { 17027, 532, -19064, -1441, -5130, 1085}, { -12617, -17609, 2062, -25332, 19009, -16121}, { 10056, -21000, -13634, -2949, 15367, 19934}, { -648, -1605, 10046, -1592, 13296, 19808}, { -1054, 10744, 538, 24938, 9630, -9052}, { -10099, 3042, -25076, -24052, 13971, 100}, { 6547, 6907, 7031, 10348, 23775, -17886}, { -22793, -1984, -1393, -3330, 9267, 14317}, { -14346, -3967, 3042, 16254, -17303, 9646}, { -21393, 23628, 16773, 716, 2663, 114}, { -19016, -3038, 1574, -245, 1463, -793}, { 22410, 23441, -14637, -530, 17310, 13617}, { -11582, 7935, -13954, 23465, -24628, 26550}, { -1045, 3679, -2218, 10572, 20999, -3702}, { -15513, 197, 16718, -24603, 4945, 5}, { 10781, 4335, 26790, -9059, -16152, -2840}, { 16075, -24100, -3933, -6833, 12645, -7029}, { 2096, -25572, -8370, 6814, 11, 1178}, { -11848, -583, -8889, -20543, -10471, -380}, { -2487, 24777, -21639, -19341, 1660, -732}, { 2313, 13679, 4085, 24549, 24691, -21179}, { -2366, -504, -4130, -10570, 23668, 1961}, { 20379, 17809, -9506, 3733, -18954, -6292}, { -3856, 16802, -929, -20310, -17739, 6797}, { 12431, 6078, -11272, -14450, 6913, 23476}, { 7636, -1655, 23017, 10719, -8292, 838}, { -8559, -1235, -18096, 3897, 16093, 1490}, { -3586, 8276, 15165, -3791, -21149, 1741}, { -4497, 21739, 2366, -278, -4792, 15549}, { -23122, -13708, 7668, 16232, 24120, 15025}, { -20043, 12821, -20160, 16691, -11655, -16081}, { -12601, 20239, 3496, -2549, -6745, -11850}, { 4441, 7812, 20783, 17080, 11523, -9643}, { 24766, 8494, -23298, -3262, 11101, -7120}, { -10107, -7623, -22152, -18303, 26645, 9550}, { -25549, 477, 7874, -1538, 1123, -168}, { 470, 9834, -347, 23945, -10381, -9467}, { -4096, -9702, -6856, -21544, 20845, 7174}, { 5370, 9748, -23765, -1190, 512, -1538}, { -1006, -10046, -12649, 19234, -1790, -890}, { 15108, 23620, -15646, -2522, -1203, -1325}, { -7406, -2605, 1095, -247, -473, 177}, { 8089, 4, 12424, -22284, 10405, -7728}, { 22196, 10775, -5043, 690, 534, -212}, { -3153, -1418, -16835, 18426, 15821, 22956}, { 5681, -2229, 3196, -3414, -21817, -14807}, { 19, 787, 1032, 170, -8295, -645}, { -882, -2319, -27105, 432, -4392, 1499}, { -1354, -11819, -76, -20380, -10293, 11328}, { 211, -4753, -4675, -6933, -13538, 14479}, { 6043, 5260, -459, -462, 143, -65}, { -2572, 7256, -3317, 9212, -23184, -9990}, { -24882, -9532, 18874, 6101, 2429, -14482}, { 8314, 2277, 14192, 3512, 25881, 22000}, { 208, 20218, -281, -24778, -63, -1183}, { 1095, -6034, 2706, -21935, -2655, 563}, { 23, -5930, 243, -8989, 5345, 20558}, { -15466, 12699, 4160, 11087, 20621, -10416}, { 20995, -85, -8468, 194, 1003, -9515}, { -19637, -3335, -14081, 3574, -23381, -667}, { -2076, 3489, -3192, -19367, 539, -1530}, { 7352, -15213, 22596, 19369, 1043, 16627}, { -1872, -413, 1235, -5276, -3550, 21903}, { 7931, -2008, 16968, -6799, 29393, -2475}, { -13589, 8389, -23636, -22091, -14178, -14297}, { -11575, -20090, 16056, -1848, 15721, 4500}, { 3849, -16581, 20161, -21155, 7778, 11864}, { -6547, -1273, -18837, -11218, 11636, 1044}, { 2528, -6691, -17917, -11362, -4894, -1008}, { 1241, 4260, 2319, 6111, 3485, 20209}, { 3014, -3048, 5316, -4539, 20831, 8702}, { -1790, -14683, 278, 13956, -10065, -10547}, { -22732, -7957, -1154, 13821, -1484, -1247}, { -7317, -615, 13094, 18927, 9897, 1452}, { 2552, -2338, 3424, -4630, 11124, -19584}, { -11125, -20553, -10855, -10783, -20767, 6833}, { 984, -15095, 5775, 25125, 5377, -19799}, { 517, 13272, -7458, -1711, 20612, -6013}, { -21417, 13251, -20795, 13449, 17281, 13104}, { -15811, -16248, 23093, -4037, -8195, 871}, { 582, 12571, -21129, -14766, -9187, 5685}, { 4318, -1776, 11425, -17763, -9921, 577}, { 6013, 16830, 17655, -25766, -4400, -3550}, { -13744, -16541, 3636, -3330, -21091, -15886}, { 6565, -11147, 8649, -13114, 23345, -13565}, { -2542, -9046, -7558, 29240, 3701, -383}, { -10612, 24995, 1893, -8210, 20920, -16210}, { 5276, 16726, 10659, 19940, -4799, -19324}, { -532, -9300, 27856, 4965, -241, 536}, { -765, -20706, -3412, 18870, 2765, 1420}, { -3059, 2708, -19022, -331, 3537, 116}, }; static const int16_t vq_hebap7[512][6] = { { -21173, 21893, 10390, 13646, 10718, -9177}, { -22519, -8193, 18328, -6629, 25518, -10848}, { 6800, -13758, -13278, 22418, 14667, -20938}, { 2347, 10516, 1125, -3455, 5569, 27136}, { -6617, 11851, -24524, 22937, 20362, -6019}, { -21768, 10681, -19615, -15021, -8478, -2081}, { -2745, 8684, -4895, 27739, 7554, -11961}, { -1020, 2460, -954, 4754, -627, -16368}, { -19702, 23097, 75, -13684, -2644, 2108}, { 4049, -2872, 5851, -4459, 22150, 12560}, { -21304, -17129, -730, 7419, -11658, -10523}, { 11332, 1792, 26666, 23518, -19561, -491}, { -17827, -16777, -13606, -14389, -22029, -2464}, { 1091, -5967, -7975, -16977, -20432, -21931}, { 18388, -1103, 1933, 13342, -17463, 18114}, { 22646, 17345, -9966, 17919, 18274, 698}, { 1484, 20297, -5754, -26515, 4941, -22263}, { -2603, 4587, -5842, 18464, 8767, -2568}, { -2797, -1602, 21713, 3099, -25683, 3224}, { -19027, 4693, -5007, 6060, 1972, -15095}, { -2189, 9516, -530, 20669, -4662, -8301}, { -22325, -8887, 2529, -11352, 5476, 998}, { 22100, -5052, 1651, -2657, 4615, 2319}, { 20855, -3078, -3330, 4105, 13470, 3069}, { 85, 17289, 10264, -14752, 214, 90}, { -26365, -18849, -19352, 19244, -10218, 9909}, { -9739, 20497, -6579, -6983, 2891, -738}, { 20575, -15860, -22913, 6870, 76, 327}, { 8744, -12877, -22945, -2372, -19424, -9771}, { -12886, 16183, 21084, 3821, 749, -13792}, { -15995, 18399, 2391, -17661, 19484, -6018}, { 1423, 11734, 4051, 19290, 6857, -19681}, { -5200, 9766, 18246, 2463, 18764, -4852}, { -597, 19498, 1323, -9096, -308, -1104}, { -3099, -25731, -15665, 25332, 4634, 2635}, { 19623, -2384, -7913, 11796, -9333, -14084}, { 2642, 26453, -21091, -10354, -1693, -1711}, { 22031, 21625, 11580, -22915, -4141, 129}, { -6122, 3542, 915, -261, -17, -383}, { 1696, 6704, -1425, 20838, 857, -4416}, { 1423, -15280, -8550, -9667, 5210, 5687}, { -4520, -613, -11683, 5618, 4230, 619}, { 937, -4963, -14102, -17104, -6906, -5952}, { -15068, -481, -7237, -14894, 18876, 21673}, { -25658, 2910, 1143, -327, -458, -995}, { -9656, -819, -24900, 2804, 20225, 1083}, { -1111, -3682, -1788, -19492, 966, 821}, { 7293, -21759, 10790, -7059, -23293, -1723}, { -282, -11093, 170, -20950, -28926, 12615}, { 17938, 3713, -1563, 885, 5, 564}, { 6116, 22696, 2242, -6951, 9975, -6132}, { 4338, 26808, -3705, 1976, -1079, -2570}, { -661, -7901, -2668, -15194, 17722, 4375}, { -4174, -11053, 717, -22506, 1562, 12252}, { -6405, 18334, 6103, 6983, 5956, 18195}, { 9851, 5370, 23604, -6861, -6569, -62}, { 21964, 13359, -683, 3785, 2168, 209}, { -3569, -1127, -19724, -1544, 1308, -803}, { -3083, 16049, -13791, -3077, 4294, 23713}, { -9999, 9943, -15872, 12934, -23631, 21699}, { 9722, 22837, 12192, 15091, 5533, 4837}, { 2243, 2099, 1243, 4089, 4748, 12956}, { 4007, -2468, 3353, -3092, 8843, 17024}, { 4330, 6127, 5549, 9249, 11226, 28592}, { -9586, -8825, 236, 1009, 455, -964}, { 6829, 19290, -1018, 200, 1821, 578}, { 5196, 957, 10372, 3330, -12800, -127}, { -3022, -8193, -14557, 22061, 5920, 1053}, { 10982, 25942, -24546, -23278, -11905, -6789}, { 22667, -11010, 5736, 2567, 23705, -10253}, { -3343, -4233, -5458, 20667, -10843, -3605}, { -4131, -3612, 4575, -829, -350, -847}, { -3303, 3451, -7398, -11604, 3023, 455}, { 3200, -9547, 3202, -22893, 11184, -26466}, { -14093, -4117, 15382, 14295, -10915, -20377}, { 3807, -11016, 22052, 14370, -15328, -7733}, { -6291, -17719, -1560, 12048, -19805, -443}, { -6147, -4234, -160, 8363, 22638, 11911}, { 19197, 1175, 7422, -9875, -4136, 4704}, { -72, -7652, -112, -11955, -3230, 27175}, { 3274, 5963, 7501, -17019, 866, -25452}, { 737, 1861, 1833, 2022, 2384, 4755}, { -5217, 7512, 3323, 2715, 3065, -1606}, { 4247, 565, 5629, 2497, 18019, -4920}, { -2833, -17920, -8062, 15738, -1018, 2136}, { 3050, -19483, 16930, 29835, -10222, 15153}, { -11346, 118, -25796, -13761, 15320, -468}, { -4824, 4960, -4263, 1575, -10593, 19561}, { -8203, -1409, -763, -1139, -607, 1408}, { -2203, -11415, 2021, -6388, -2600, 711}, { -413, -2511, -216, -3519, -28267, 1719}, { -14446, 17050, 13917, 13499, -25762, -16121}, { 19228, 7341, -12301, 682, -3791, -199}, { -4193, 20746, -15651, 11349, 5860, -824}, { -21490, -3546, -3, -1705, -3959, 9213}, { 15445, -1876, 2012, -19627, 16228, -4845}, { -2867, -3733, -7354, -175, -20119, 11174}, { -3571, -24587, 19700, 6654, 979, -654}, { 21820, -7430, -6639, -10767, -8362, 15543}, { 14827, 17977, -7204, -3409, 1906, -17288}, { 3525, -3947, -1415, -2798, 17648, 2082}, { -6580, -15255, -17913, 1337, 15338, 21158}, { 6210, 9698, 15155, -24666, -22507, -3999}, { -1740, -593, 1095, -7779, 25058, 5601}, { 21415, -432, -1658, -6898, -1438, -14454}, { -6943, 700, -12139, -745, -24187, 22466}, { 6287, 3283, 11006, 3844, 19184, 14781}, { -22502, 15274, 5443, -2808, -970, -3343}, { 3257, -3708, 4744, -8301, 22814, -10208}, { 24346, -20970, 19846, 987, -11958, -6277}, { 3906, -19701, 13060, -1609, 18641, 7466}, { -26409, -22549, 16305, 2014, 10975, 18032}, { -7039, 4655, -14818, 18739, 15789, 1296}, { 9310, -1681, 14667, -3326, 26535, -11853}, { 5728, 5917, 13400, 10020, -2236, -24704}, { 1741, -6727, 12695, -22009, 4080, 5450}, { -2621, 9393, 21143, -25938, -3162, -2529}, { 20672, 18894, -13939, 6990, -8260, 15811}, { -23818, 11183, -13639, 11868, 16045, 2630}, { 18361, -10220, 829, 856, -1010, 157}, { 14400, -4678, 5153, -13290, -27434, -11028}, { 21613, 11256, 17453, 7604, 13130, -484}, { 7, 1236, 573, 4214, 5576, -3081}, { 916, -9092, 1285, -8958, 1185, -28699}, { 21587, 23695, 19116, -2885, -14282, -8438}, { 23414, -6161, 12978, 3061, -9351, 2236}, { -3070, -7344, -20140, 5788, 582, -551}, { -3993, 315, -7773, 8224, -28082, -12465}, { 13766, -15357, 19205, -20624, 13043, -19247}, { 3777, -177, 8029, -1001, 17812, 5162}, { -7308, -4327, -18096, -620, -1350, 14932}, { 14756, -1221, -12819, -14922, -547, 27125}, { 2234, 1708, 2764, 5416, 7986, -25163}, { 2873, 3636, 3992, 5344, 10142, 21259}, { 1158, 5379, 508, -10514, 290, -1615}, { 1114, 24789, 16575, -25168, -298, -2832}, { -1107, -6144, -1918, -7791, -2971, -23276}, { 4016, 10793, 17317, -4342, -20982, -3383}, { -4494, -207, -9951, -3575, 7947, 1154}, { -7576, 8117, -14047, 16982, -26457, -27540}, { -15164, 16096, -16844, -8886, -23720, 15906}, { 24922, 5680, -1874, 420, 132, 117}, { -506, -19310, -198, 412, -311, 752}, { -1906, 3981, -7688, 16566, -19291, -14722}, { -399, -729, -3807, -4196, -12395, 7639}, { 3368, 2330, 9092, 23686, -10290, -1705}, { -3148, 2596, -7986, 14602, -4807, 16627}, { 8057, 1481, 49, 17205, 24869, 7474}, { -19304, -513, 11905, 2346, 5588, 3365}, { -5063, -21812, 11370, 10896, 4881, 261}, { 4794, 20577, 5109, -6025, -8049, -1521}, { 8125, -14756, 20639, -14918, 23941, -3650}, { 12451, 1381, 3613, 8687, -24002, 4848}, { 6726, 10643, 10086, 25217, -25159, -1065}, { 6561, 13977, 2911, 21737, 16465, -26050}, { -1776, 2575, -19606, -16800, 3032, 6679}, { 15012, -17910, -8438, -21554, -27111, 11808}, { 3448, -924, -15913, -1135, 5126, -20613}, { 7720, 2226, 17463, 5434, 28942, 17552}, { 1246, 15614, -11743, 24618, -17539, 3272}, { 3215, 17950, 2783, -722, -22672, 5979}, { -5678, -3184, -26087, 26034, 6583, 3302}, { 20310, -3555, -2715, -444, -1487, 1526}, { -20640, -21970, -12207, -25793, 8863, -1036}, { 17888, 570, -16102, 8329, -2553, 15275}, { -2677, 9950, -1879, 16477, -12762, -29007}, { -120, -2221, 219, 97, 365, 35}, { 1270, -718, 1480, -2689, 1930, -7527}, { 1896, 8750, 1906, 18235, -12692, -6174}, { -3733, 13713, -9882, -15960, -1376, -7146}, { -10600, 8496, 15967, -8792, 7532, 20439}, { 3041, -13457, 1032, -26952, 5787, 24984}, { -4590, -8220, -9322, -6112, -17243, 25745}, { -17808, 6970, 3752, 626, -114, 2178}, { 4449, -4862, 7054, -5404, 4738, -2827}, { 4922, -651, 18939, -9866, 848, 1886}, { -336, -5410, 7234, 20444, -9583, -600}, { 781, -19474, -12648, 6634, 1414, 450}, { -3399, -16770, 11107, 13200, -5498, 21663}, { -3265, 4859, -5961, 7530, -10837, 28086}, { 10350, -12901, 25699, 25640, -639, 351}, { 1163, 18763, -5466, -15087, -145, -1377}, { -14477, 27229, -31383, -32653, 21439, -2894}, { 15420, 18823, 22128, 19398, 22583, 13587}, { -10674, 10710, 5089, -4756, 909, -20760}, { -12948, -20660, 7410, 2722, 3427, 11585}, { -1105, 18374, 19731, -9650, 22442, 19634}, { -296, -6798, -14677, 21603, 19796, 21399}, { -19350, -7501, 25446, 13144, 8588, -25298}, { 3092, -10618, 20896, 9249, -3326, 1796}, { -811, 1449, 3106, 4748, 12073, -14262}, { -20720, 14275, -4332, -25838, -5781, -21149}, { -5132, 10554, -14020, -22150, 2840, -554}, { 25533, 17648, 14886, -21074, 2459, 25142}, { -9370, -1788, -12862, -5870, -25811, -11023}, { 6698, 819, 10313, 166, 27581, 523}, { 101, -19388, 3413, 9638, 64, 806}, { -2742, -17931, -2576, 22818, 8553, 1126}, { 2972, 15203, 1792, 25434, -5728, -17265}, { -1419, 1604, 4398, 11452, 1731, 23787}, { -5136, 4625, -10653, 27981, 9897, -2510}, { -10528, -28033, 2999, -1530, -832, -830}, { -11133, -12511, 22206, -7243, -23578, -21698}, { 16935, -21892, 1861, -9606, 9432, 19026}, { 10277, 9516, 26815, 2010, -4943, -9080}, { 5547, -2210, 14270, -15300, -19316, 1822}, { -4850, -783, -8959, -3076, -20056, -3197}, { 8232, -2794, -17752, 13308, 3229, -991}, { -12237, -6581, 10315, -9552, 2260, -20648}, { -7000, 5529, -7553, -7490, -10342, -10266}, { 3641, 19479, -5972, -19097, -18570, 12805}, { 1283, -4164, 4198, -28473, -2498, 1866}, { 16047, 26826, -13053, -6316, 985, -1597}, { -403, 13680, 6457, 25070, 27124, -20710}, { -18070, -1790, -24986, 5953, -954, 26600}, { -24224, -15383, 24788, 1953, -1136, 187}, { -2289, 12505, -20738, -904, 18324, 21258}, { 2658, -6140, 16179, 22276, -556, 2154}, { -6087, 13950, -25682, -27713, 4049, -4795}, { -21452, 26473, 19435, -9124, 895, 303}, { -22200, -26177, -6026, 24729, -22926, -9030}, { -14276, -15982, 23732, -22851, 9268, -3841}, { 29482, 21923, -6213, 1679, -2059, -1120}, { -435, 9802, -3891, 12359, -4288, -18971}, { 19768, -86, 2467, 1990, -1021, -5354}, { 20986, -8783, -5329, -23562, -4730, 2673}, { -5095, 5605, -4629, 19150, 26037, -12259}, { 972, 6858, 4551, 27949, -4025, -2272}, { 6075, -3260, -4989, -373, -1571, -3730}, { -7256, -12992, -8820, -5109, 23054, 5054}, { 920, 2615, 7912, -7353, -4905, 20186}, { -250, 5454, 3140, 6928, -18723, -2051}, { -10299, -4372, 19608, 4879, -661, -1885}, { 14816, -8603, -19815, 6135, -21210, 14108}, { -11945, -2223, 5018, 11892, 22741, 406}, { -13184, -2613, -13256, -22433, -12482, -8380}, { 17066, 25267, -2273, 5056, -342, 145}, { 8401, -17683, 19112, 10615, -19453, 17083}, { 20821, -5700, 12298, -25598, 10391, 7692}, { 4550, 15779, 17338, -19379, -4768, 1206}, { -7723, 10836, -27164, -11439, 6835, -1776}, { 2542, 3199, 4442, 17513, -3711, -914}, { 20960, -16774, -5814, 11087, -70, 22961}, { 3305, 2919, 6256, -4800, -20966, -3230}, { 5924, -16547, 2183, 2733, 3446, -23306}, { -6061, -194, -13852, -10971, 19488, 1029}, { 4467, -5964, -19004, 1519, -359, 855}, { -1581, -7607, 22070, -11580, -10032, 17102}, { -12412, 2553, 4324, 22500, 5751, 12170}, { -25127, 17996, -6384, 1180, 1182, 9622}, { 23462, -8471, -4392, -2669, 7638, -16835}, { -5511, -2887, -10757, -20883, 7246, 1053}, { 2703, -20602, -7554, 7516, -7740, 5868}, { 20670, 21901, 457, 14969, -17657, -11921}, { 3603, -1595, -2177, -157, -43, 605}, { 2513, 8954, 10527, 22559, -16100, -16041}, { 6002, 4951, 6795, -4862, -22400, 18849}, { 7590, -1693, -24688, -3404, 14169, 1214}, { -4398, -6663, -6870, -10083, -24596, 9253}, { 10468, 17751, -7748, 147, -6314, 4419}, { 16187, -16557, -4119, 4302, 7625, 5409}, { 3303, 2735, 7458, -19902, -2254, -3702}, { -2077, 21609, 14870, 12545, -6081, -1764}, { 4678, 11740, 2859, 6953, 1919, -3871}, { 3522, -21853, -2469, -10453, 18893, -10742}, { 3759, -10191, -4866, -2659, -17831, -1242}, { 14991, 9351, 11870, -1573, -4848, 22549}, { 9509, -27152, 10734, 20851, -26185, -17878}, { -7170, -1392, -19495, 12746, 8198, -1988}, { 1883, 28158, -846, -7235, 249, 233}, { -7200, 669, -371, -2948, 23234, -5635}, { 3141, 288, 3223, -1258, -98, -27607}, { 17373, -23235, 5110, -11199, -2574, -11487}, { -4928, 1518, -5456, 670, -18278, 1951}, { 10334, -19865, -4649, 361, -160, -923}, { 18732, 14264, -3155, -7485, -3328, 5959}, { -3614, 21077, 7276, 3536, 8121, -1528}, { -8422, 500, -19182, 18929, 26392, -1039}, { 15639, 25668, 8375, 1903, 1945, -11979}, { -2716, 3389, 26850, -4587, 1803, 22}, { 1177, -655, 1233, -2128, 7844, 1767}, { -761, 8209, -19290, -4593, 1923, -343}, { -689, -3530, -3267, -3804, -2753, 18566}, { -2110, 1962, -1353, 16643, 2765, -23102}, { -433, 4905, 302, 13016, 15933, -5905}, { 3203, 4126, 11181, -5496, -2529, -1160}, { -1091, -6469, -1415, 5682, -268, 583}, { -9405, -19572, 6216, 1658, 993, -75}, { -1695, -4504, -2289, -4088, -6556, -16577}, { 4760, -892, -10902, 6516, 24199, -6011}, { -253, 1000, 63, -81, -115, -382}, { -1333, 24224, -698, -4667, -2801, -19144}, { -876, -28866, -21873, 12677, -6344, 3235}, { 16847, 21145, -26172, -3183, -396, 230}, { 18296, -7790, -12857, -679, -1473, 5}, { -10488, 11429, 25805, -1122, 1401, -438}, { 3782, -7429, 26720, 17567, 19257, 12542}, { 6332, -746, 12789, 9316, -22542, -5354}, { 3418, -22728, 26978, 18303, 1076, 956}, { -27315, -2988, 920, 235, 2233, 81}, { 6199, 5296, 16093, 14768, -8429, -1112}, { -6432, 19244, 9921, -3253, 1278, -954}, { 24213, 2049, -22931, 2585, -2410, -4216}, { 9286, 14282, -19735, -3985, -2344, 1028}, { -20128, 17993, -9458, 23012, -16983, 8625}, { -6896, -20730, 3762, 17415, 22341, 19024}, { 842, 24181, 25062, -5839, -78, 937}, { -621, 19722, -24204, -1962, -14854, -56}, { 22766, -5119, 17365, 23868, -19480, -6558}, { -2158, 17490, -21435, 3340, -12819, -20295}, { -9621, 17325, 715, 2265, -4123, -492}, { 9156, 12947, 27303, -21175, -6072, -9457}, { -13164, -23269, -14006, -4184, 6978, 2}, { 938, -13381, 3520, -24297, 22902, 19589}, { -4911, -19774, 19764, -9310, -12650, 3819}, { -5462, -4249, -6987, -6260, -13943, -25150}, { 9341, 10369, -13862, -6704, 22556, -519}, { 6651, 18768, -4855, 12570, 14730, -10209}, { -823, 18119, 398, -1582, -116, -363}, { -6935, -12694, -28392, 8552, 6961, -239}, { -2602, -4704, -1021, 2015, 5129, 23670}, { -12559, -8190, -25028, 18544, 14179, 1663}, { 3813, 21036, -9620, -5051, -1800, -1087}, { -22057, 16675, 14960, 9459, 2786, 16991}, { -26040, -19318, -6414, 1104, 5798, -18039}, { -1737, 24825, 10417, -11087, 896, -5273}, { -1855, 11661, -2803, 24809, -21435, -19792}, { -23473, -16729, -5782, 5643, 2636, 4940}, { -1724, 4388, -26673, -13695, 10570, -25895}, { 15358, -19496, 26242, -18493, 1736, 8054}, { 5684, 20890, 4091, -19100, -14588, -10468}, { 17260, -16291, 14859, -17711, -19174, 12435}, { -27185, -12573, 6743, -562, 976, -257}, { 12395, -8618, -22248, -19843, 11013, 7762}, { 3799, 11853, -27622, -8473, 1089, -1495}, { 4141, -2182, -26720, -735, -774, 1469}, { 3125, 13762, 4606, 29257, 18771, -9958}, { -17465, -9445, -17562, -2530, -6435, -3726}, { -1742, 4351, -6841, -19773, 9627, -10654}, { 7251, 3525, 10835, 5601, 25198, -23348}, { -10300, -17830, 631, 11640, 2044, -20878}, { -873, -8502, -1063, -15674, -10693, 14934}, { -15957, 28137, 5268, 477, -1053, 1158}, { -1495, -8814, -5764, -24965, 25988, 7907}, { -1038, -114, -2308, -1319, -6480, 1472}, { 4895, -17897, -25850, 5301, -188, 1581}, { 3200, 17225, 4346, 22101, -18543, 22028}, { -10250, 545, -10932, 2276, -28070, 8118}, { 15343, 2329, 9316, 20537, 14908, 21021}, { 6329, 6130, -24508, 837, -8637, -5844}, { 7386, -501, 10503, 20131, 11435, -4755}, { -2745, 24174, -9274, 15273, -8389, -5835}, { 2992, -2864, 6048, -7473, 11687, -19996}, { -883, -11954, -9976, -21829, -4436, -27178}, { 3458, 19626, 1280, 2597, 19849, 5255}, { -5315, 19133, -14518, -8946, 13749, -1352}, { 18642, 17655, 11001, 6817, -18418, 6336}, { -1697, 2244, -4640, 3948, -12890, -5273}, { 20428, 10542, 4170, -1012, 19439, 21691}, { -2943, -19735, -4208, 1320, 909, -8897}, { 9351, -8066, -2618, -12933, 26582, 3507}, { 9705, -22628, 8311, 8167, -13293, 5608}, { 3222, 3749, -1508, 165, -52, -196}, { 102, -22744, -8832, 903, -11421, -14662}, { -120, 5998, 19765, 13401, 3628, 5197}, { 8528, 5827, -1066, 774, -39, -166}, { 9411, -9476, 9581, -13004, 24456, 24900}, { 17878, 2235, -21639, 20478, 4716, -7190}, { -2482, 9511, 1611, -21943, 14230, -1289}, { 9288, -2291, 23215, -3452, -10842, 11}, { 9496, 3041, 5130, -3890, -21219, -22589}, { 14262, -9838, 20195, 14019, 91, -17200}, { -18591, 980, 17, 821, 120, -574}, { 12285, -19269, 13742, 16373, -161, 6025}, { -3364, 1530, -4005, 2454, -10872, -23839}, { 105, 5085, -260, 5790, -588, 19170}, { 4121, 4169, 13439, 14644, 20899, 7434}, { -175, 13101, -3704, 23233, 3907, 10106}, { -6101, 23467, 5204, -1341, 1599, 13174}, { -3217, -3494, 15117, -8387, -11762, -4750}, { 1146, 4675, -19378, 14917, -5091, 249}, { -21506, 10136, -16473, -13305, 18382, -8601}, { 628, 2447, 3344, 3130, -5115, 119}, { 17900, -22422, -17633, 21967, -16293, -7676}, { 16863, 24214, 5612, -3858, -809, 3822}, { -2291, 10091, -2360, -25109, -1226, 312}, { 2957, 11256, 26745, -13266, -3455, -1128}, { -19762, -2708, 4604, 6355, 1638, 25501}, { -19593, -7753, 3159, -85, -489, -1855}, { 814, 12510, 19077, -4681, -2610, -1474}, { -23408, -19027, 8137, 19878, 7912, -282}, { 839, -19652, 11927, 27278, -3211, 2266}, { 4020, -1110, 8226, -1274, 20922, 25060}, { 26576, 325, -8693, -232, -2218, -699}, { -11293, -4200, 1805, -6673, -22940, -1339}, { -2005, -15886, -1047, -27687, -13235, 14370}, { -22073, 1949, 13175, -15656, -1846, 8055}, { 3039, 12025, 7132, -24632, 413, -2347}, { -24048, -206, 12459, -6654, -417, -10091}, { 18179, -23688, -20515, -16396, 7230, 763}, { 5659, -5085, 13878, -23729, -11077, -19587}, { 11340, 501, 25040, 7616, -19658, 1605}, { -26650, 8878, 10544, 417, 1299, 261}, { 14460, 11369, -3263, 9990, 8194, 18111}, { 1355, -20838, -9196, -16060, -8559, -730}, { -1918, -20937, -18293, -2461, -2651, 4316}, { -2810, 24521, -10996, -25721, 308, -1234}, { -9075, -17280, -1833, -29342, -24213, -16631}, { -2843, 10165, -5339, -2888, 21858, -21340}, { -15832, 14849, -23780, 5184, 10113, -20639}, { -19535, -11361, 8413, 1486, -23658, -5759}, { -7512, 1027, -20794, 13732, 19892, -21934}, { -12132, -7022, -19175, -8840, 22125, -16490}, { 1937, 5210, -6318, -23788, 13141, 11082}, { -205, 6036, -380, 8658, -233, 28020}, { -5523, 7477, 7635, 23595, 9763, -2590}, { 21658, -28313, -3086, -300, -1032, 1744}, { -22352, 16646, 208, 6665, -17400, -3028}, { 18482, 9336, -2737, -19372, 407, -4389}, { -4913, -17370, 18819, -17654, 13416, 15232}, { 7749, 6368, 23135, -18174, 7584, -4248}, { -1489, -6523, 586, -10157, 14964, 25568}, { 3844, -6156, 4897, -13045, -22526, 5647}, { -8491, -2105, -24774, 905, -9326, 1456}, { -3040, -1476, 1166, -4428, 11236, 9204}, { 3397, -1451, 13598, -15841, 24540, 5819}, { 8483, -2993, 21547, -16916, 7741, 24018}, { -14932, -23758, -5332, -6664, -4497, 13267}, { 19379, 12916, -2142, -737, 21100, -22101}, { 3393, -4629, 5735, -18913, -6969, 2687}, { 1148, -16147, -21433, -28095, -630, -14449}, { 7300, 672, 18530, -17452, -10149, 351}, { 11356, -10974, 17212, 4624, 145, 17791}, { -711, -3479, -2238, 15887, 2027, 0}, { -28048, 1794, -593, -2758, -21852, 11535}, { -19683, 4937, 22004, 21523, -3148, 1790}, { 813, 8231, 2633, 11981, -3043, 22201}, { 8952, -24760, -690, 14873, -2366, -5372}, { 8406, -5439, -274, -642, -145, 778}, { -6605, 7258, 20780, -23507, -18625, 22782}, { -22896, -25488, 10020, -1614, 1508, -1393}, { 7607, 407, -24678, -16385, -1804, -4699}, { -10592, -19139, 10462, -3747, 8721, -6919}, { 13010, 5292, -6230, -4884, -20904, -1797}, { 16891, -13770, -465, 19343, -10741, -12959}, { 25193, -14799, -5681, -521, -321, -1211}, { 6917, -3093, 20183, -26903, -12026, 1295}, { 305, 1992, 19457, -985, 25, -521}, { 6707, -3698, 8365, -8687, 21921, -27166}, { 4668, 5997, 7117, 11696, 24401, -10794}, { 744, -9416, 19893, 1963, 7922, -9824}, { 3430, 21282, -1736, 10844, 8821, 27015}, { -8813, 1521, -24038, 1651, 7838, -1208}, { 3911, -11221, 3273, -12541, 7168, 18402}, { 21642, 9117, -11536, -5256, 7077, 2382}, { 100, 3817, -6713, 1244, 1518, -321}, { 7946, -18670, 10667, -4866, 727, 776}, { -15883, -8150, -2087, 22739, 1567, -3482}, { 4380, -2735, 8469, -7025, -11424, 1317}, { 26970, 4393, 7665, 17561, -714, 650}, { -16191, -835, 8365, 1795, -14314, 16297}, { 4504, -10048, 7662, -26690, -17428, 2580}, { 48, -3984, 564, -5871, 2658, -18658}, { 12579, -26016, -15642, 2672, -1347, -887}, { -4950, 4208, -6811, 2569, -20621, -8658}, { -1836, -14818, -5571, -23322, -14800, 25867}, { 5434, -28139, -2357, -2883, -570, 2431}, { 13096, -2771, 24994, -12496, -24723, -1025}, { -5676, -4339, 1908, 18628, -21323, 17366}, { 27660, -27897, -15409, 1436, -7112, -2241}, { 8019, 3847, 24568, -469, 9674, 10683}, { -903, -10149, 1801, -21260, 4795, -8751}, { 1122, -9582, 2625, 22791, 956, 882}, { 7876, 19075, -9900, -24266, 7496, 9277}, { 980, -26764, -5386, 5396, 1086, 1648}, { 28838, -1270, -447, 5, -429, -20}, { -15283, 6132, 22812, 1252, -9963, 511}, { 851, 7925, -457, -12210, 4261, 7579}, { -4530, 8452, -1246, 14501, -24951, -5760}, { -17814, -10727, 9887, -23929, -13432, 1878}, { -15049, 10165, 16491, -14603, -11712, -21156}, { -3317, 840, -5683, 22413, 1994, 586}, { 23158, -5788, -15043, -10372, -9271, -13523}, { -773, -9509, -3993, -24264, 8463, 5804}, { -8545, -703, -12440, -3985, -25122, -28147}, { -16659, 16001, 2746, 1611, 5097, -1043}, { 41, -7181, 19903, 31555, -32237, 13927}, { -5658, 845, -12774, 5705, 16695, -86}, { 5282, 14875, 27026, 21124, 15776, -10477}, { 14712, 19648, -11487, -13361, -20196, -15229}, { 8597, -9138, -626, 10891, -6015, 6346}, { -1488, -1272, -1479, -1303, -3704, -5485}, { -3370, 17871, -6604, 24930, 25886, -3127}, { 8416, 27783, -1385, 5350, -4260, 19993}, { 5688, 362, 17246, 3809, -3246, 1088}, { -105, -29607, 2747, 15223, -167, 3722}, { 3502, -3195, 8602, 7772, -1566, -915}, { -491, 3257, -2423, 5522, 20606, -100}, { -13948, -11368, -15375, -21866, -8520, 12221}, { -616, 2424, -2023, 4398, -3805, 8108}, { -7204, 21043, 21211, -9395, -19391, 896}, { -5737, -15160, -21298, 17066, -1006, -366}, { 6261, 3240, -11937, -16213, -15820, 6581}, { -3155, 24796, 2733, -1257, -875, -1597}, { -20469, 11094, 24071, -8987, 14136, 2220}, { -14106, 11959, -22495, 4135, -1055, -5420}, { 801, -2655, 60, -5324, -790, 5937}, { -7372, -1764, -22433, -26060, 21707, 4178}, { -5715, -6648, -14908, 1325, -24044, 1493}, { -6024, -12488, 23930, 2950, 1601, 1173}, { 19067, 17630, 17929, -10654, 10928, -4958}, { 3231, -3284, 27336, 4174, -1683, 497}, }; const int16_t (* const ff_eac3_mantissa_vq[8])[6] = { NULL, vq_hebap1, vq_hebap2, vq_hebap3, vq_hebap4, vq_hebap5, vq_hebap6, vq_hebap7, }; /** * Table E2.14 Frame Exponent Strategy Combinations */ const uint8_t ff_eac3_frm_expstr[32][6] = { { EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE}, { EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D45}, { EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE}, { EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D45}, { EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_REUSE}, { EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D45}, { EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE}, { EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D45, EXP_D45}, { EXP_D25, EXP_REUSE, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE}, { EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45}, { EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE}, { EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D45, EXP_D45}, { EXP_D25, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE}, { EXP_D25, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE, EXP_D45}, { EXP_D25, EXP_REUSE, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE}, { EXP_D25, EXP_REUSE, EXP_D45, EXP_D45, EXP_D45, EXP_D45}, { EXP_D45, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_REUSE}, { EXP_D45, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE, EXP_D45}, { EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D25, EXP_REUSE}, { EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45, EXP_D45}, { EXP_D45, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_REUSE}, { EXP_D45, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE, EXP_D45}, { EXP_D45, EXP_D25, EXP_REUSE, EXP_D45, EXP_D25, EXP_REUSE}, { EXP_D45, EXP_D25, EXP_REUSE, EXP_D45, EXP_D45, EXP_D45}, { EXP_D45, EXP_D45, EXP_D15, EXP_REUSE, EXP_REUSE, EXP_REUSE}, { EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE, EXP_D45}, { EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_D25, EXP_REUSE}, { EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_D45, EXP_D45}, { EXP_D45, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_REUSE}, { EXP_D45, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE, EXP_D45}, { EXP_D45, EXP_D45, EXP_D45, EXP_D45, EXP_D25, EXP_REUSE}, { EXP_D45, EXP_D45, EXP_D45, EXP_D45, EXP_D45, EXP_D45}, }; /** * Table E.25: Spectral Extension Attenuation Table * ff_eac3_spx_atten_tab[code][bin]=pow(2.0,(bin+1)*(code+1)/-15.0); */ const float ff_eac3_spx_atten_tab[32][3] = { { 0.954841603910416503f, 0.911722488558216804f, 0.870550563296124125f }, { 0.911722488558216804f, 0.831237896142787758f, 0.757858283255198995f }, { 0.870550563296124125f, 0.757858283255198995f, 0.659753955386447100f }, { 0.831237896142787758f, 0.690956439983888004f, 0.574349177498517438f }, { 0.793700525984099792f, 0.629960524947436595f, 0.500000000000000000f }, { 0.757858283255198995f, 0.574349177498517438f, 0.435275281648062062f }, { 0.723634618720189082f, 0.523647061410313364f, 0.378929141627599553f }, { 0.690956439983888004f, 0.477420801955208307f, 0.329876977693223550f }, { 0.659753955386447100f, 0.435275281648062062f, 0.287174588749258719f }, { 0.629960524947436595f, 0.396850262992049896f, 0.250000000000000000f }, { 0.601512518041058319f, 0.361817309360094541f, 0.217637640824031003f }, { 0.574349177498517438f, 0.329876977693223550f, 0.189464570813799776f }, { 0.548412489847312945f, 0.300756259020529160f, 0.164938488846611775f }, { 0.523647061410313364f, 0.274206244923656473f, 0.143587294374629387f }, { 0.500000000000000000f, 0.250000000000000000f, 0.125000000000000000f }, { 0.477420801955208307f, 0.227930622139554201f, 0.108818820412015502f }, { 0.455861244279108402f, 0.207809474035696939f, 0.094732285406899888f }, { 0.435275281648062062f, 0.189464570813799776f, 0.082469244423305887f }, { 0.415618948071393879f, 0.172739109995972029f, 0.071793647187314694f }, { 0.396850262992049896f, 0.157490131236859149f, 0.062500000000000000f }, { 0.378929141627599553f, 0.143587294374629387f, 0.054409410206007751f }, { 0.361817309360094541f, 0.130911765352578369f, 0.047366142703449930f }, { 0.345478219991944002f, 0.119355200488802049f, 0.041234622211652958f }, { 0.329876977693223550f, 0.108818820412015502f, 0.035896823593657347f }, { 0.314980262473718298f, 0.099212565748012460f, 0.031250000000000000f }, { 0.300756259020529160f, 0.090454327340023621f, 0.027204705103003875f }, { 0.287174588749258719f, 0.082469244423305887f, 0.023683071351724965f }, { 0.274206244923656473f, 0.075189064755132290f, 0.020617311105826479f }, { 0.261823530705156682f, 0.068551561230914118f, 0.017948411796828673f }, { 0.250000000000000000f, 0.062500000000000000f, 0.015625000000000000f }, { 0.238710400977604098f, 0.056982655534888536f, 0.013602352551501938f }, { 0.227930622139554201f, 0.051952368508924235f, 0.011841535675862483f } };
123linslouis-android-video-cutter
jni/libavcodec/eac3dec_data.c
C
asf20
61,268
/* * VLC constants for DV codec * Copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * VLC constants for DV codec. */ #ifndef AVCODEC_DV_VLC_DATA_H #define AVCODEC_DV_VLC_DATA_H #include <stdint.h> #define NB_DV_VLC 409 /* * There's a catch about the following three tables: the mapping they establish * between (run, level) and vlc is not 1-1. So you have to watch out for that * when building misc. tables. E.g. (1, 0) can be either 0x7cf or 0x1f82. */ static const uint16_t dv_vlc_bits[409] = { 0x0000, 0x0002, 0x0007, 0x0008, 0x0009, 0x0014, 0x0015, 0x0016, 0x0017, 0x0030, 0x0031, 0x0032, 0x0033, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef, 0x01e0, 0x01e1, 0x01e2, 0x01e3, 0x01e4, 0x01e5, 0x01e6, 0x01e7, 0x01e8, 0x01e9, 0x01ea, 0x01eb, 0x01ec, 0x01ed, 0x01ee, 0x01ef, 0x03e0, 0x03e1, 0x03e2, 0x03e3, 0x03e4, 0x03e5, 0x03e6, 0x07ce, 0x07cf, 0x07d0, 0x07d1, 0x07d2, 0x07d3, 0x07d4, 0x07d5, 0x0fac, 0x0fad, 0x0fae, 0x0faf, 0x0fb0, 0x0fb1, 0x0fb2, 0x0fb3, 0x0fb4, 0x0fb5, 0x0fb6, 0x0fb7, 0x0fb8, 0x0fb9, 0x0fba, 0x0fbb, 0x0fbc, 0x0fbd, 0x0fbe, 0x0fbf, 0x1f80, 0x1f81, 0x1f82, 0x1f83, 0x1f84, 0x1f85, 0x1f86, 0x1f87, 0x1f88, 0x1f89, 0x1f8a, 0x1f8b, 0x1f8c, 0x1f8d, 0x1f8e, 0x1f8f, 0x1f90, 0x1f91, 0x1f92, 0x1f93, 0x1f94, 0x1f95, 0x1f96, 0x1f97, 0x1f98, 0x1f99, 0x1f9a, 0x1f9b, 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f, 0x1fa0, 0x1fa1, 0x1fa2, 0x1fa3, 0x1fa4, 0x1fa5, 0x1fa6, 0x1fa7, 0x1fa8, 0x1fa9, 0x1faa, 0x1fab, 0x1fac, 0x1fad, 0x1fae, 0x1faf, 0x1fb0, 0x1fb1, 0x1fb2, 0x1fb3, 0x1fb4, 0x1fb5, 0x1fb6, 0x1fb7, 0x1fb8, 0x1fb9, 0x1fba, 0x1fbb, 0x1fbc, 0x1fbd, 0x1fbe, 0x1fbf, 0x7f00, 0x7f01, 0x7f02, 0x7f03, 0x7f04, 0x7f05, 0x7f06, 0x7f07, 0x7f08, 0x7f09, 0x7f0a, 0x7f0b, 0x7f0c, 0x7f0d, 0x7f0e, 0x7f0f, 0x7f10, 0x7f11, 0x7f12, 0x7f13, 0x7f14, 0x7f15, 0x7f16, 0x7f17, 0x7f18, 0x7f19, 0x7f1a, 0x7f1b, 0x7f1c, 0x7f1d, 0x7f1e, 0x7f1f, 0x7f20, 0x7f21, 0x7f22, 0x7f23, 0x7f24, 0x7f25, 0x7f26, 0x7f27, 0x7f28, 0x7f29, 0x7f2a, 0x7f2b, 0x7f2c, 0x7f2d, 0x7f2e, 0x7f2f, 0x7f30, 0x7f31, 0x7f32, 0x7f33, 0x7f34, 0x7f35, 0x7f36, 0x7f37, 0x7f38, 0x7f39, 0x7f3a, 0x7f3b, 0x7f3c, 0x7f3d, 0x7f3e, 0x7f3f, 0x7f40, 0x7f41, 0x7f42, 0x7f43, 0x7f44, 0x7f45, 0x7f46, 0x7f47, 0x7f48, 0x7f49, 0x7f4a, 0x7f4b, 0x7f4c, 0x7f4d, 0x7f4e, 0x7f4f, 0x7f50, 0x7f51, 0x7f52, 0x7f53, 0x7f54, 0x7f55, 0x7f56, 0x7f57, 0x7f58, 0x7f59, 0x7f5a, 0x7f5b, 0x7f5c, 0x7f5d, 0x7f5e, 0x7f5f, 0x7f60, 0x7f61, 0x7f62, 0x7f63, 0x7f64, 0x7f65, 0x7f66, 0x7f67, 0x7f68, 0x7f69, 0x7f6a, 0x7f6b, 0x7f6c, 0x7f6d, 0x7f6e, 0x7f6f, 0x7f70, 0x7f71, 0x7f72, 0x7f73, 0x7f74, 0x7f75, 0x7f76, 0x7f77, 0x7f78, 0x7f79, 0x7f7a, 0x7f7b, 0x7f7c, 0x7f7d, 0x7f7e, 0x7f7f, 0x7f80, 0x7f81, 0x7f82, 0x7f83, 0x7f84, 0x7f85, 0x7f86, 0x7f87, 0x7f88, 0x7f89, 0x7f8a, 0x7f8b, 0x7f8c, 0x7f8d, 0x7f8e, 0x7f8f, 0x7f90, 0x7f91, 0x7f92, 0x7f93, 0x7f94, 0x7f95, 0x7f96, 0x7f97, 0x7f98, 0x7f99, 0x7f9a, 0x7f9b, 0x7f9c, 0x7f9d, 0x7f9e, 0x7f9f, 0x7fa0, 0x7fa1, 0x7fa2, 0x7fa3, 0x7fa4, 0x7fa5, 0x7fa6, 0x7fa7, 0x7fa8, 0x7fa9, 0x7faa, 0x7fab, 0x7fac, 0x7fad, 0x7fae, 0x7faf, 0x7fb0, 0x7fb1, 0x7fb2, 0x7fb3, 0x7fb4, 0x7fb5, 0x7fb6, 0x7fb7, 0x7fb8, 0x7fb9, 0x7fba, 0x7fbb, 0x7fbc, 0x7fbd, 0x7fbe, 0x7fbf, 0x7fc0, 0x7fc1, 0x7fc2, 0x7fc3, 0x7fc4, 0x7fc5, 0x7fc6, 0x7fc7, 0x7fc8, 0x7fc9, 0x7fca, 0x7fcb, 0x7fcc, 0x7fcd, 0x7fce, 0x7fcf, 0x7fd0, 0x7fd1, 0x7fd2, 0x7fd3, 0x7fd4, 0x7fd5, 0x7fd6, 0x7fd7, 0x7fd8, 0x7fd9, 0x7fda, 0x7fdb, 0x7fdc, 0x7fdd, 0x7fde, 0x7fdf, 0x7fe0, 0x7fe1, 0x7fe2, 0x7fe3, 0x7fe4, 0x7fe5, 0x7fe6, 0x7fe7, 0x7fe8, 0x7fe9, 0x7fea, 0x7feb, 0x7fec, 0x7fed, 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3, 0x7ff4, 0x7ff5, 0x7ff6, 0x7ff7, 0x7ff8, 0x7ff9, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffd, 0x7ffe, 0x7fff, 0x0006, }; static const uint8_t dv_vlc_len[409] = { 2, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 4, }; static const uint8_t dv_vlc_run[409] = { 0, 0, 1, 0, 0, 2, 1, 0, 0, 3, 4, 0, 0, 5, 6, 2, 1, 1, 0, 0, 0, 7, 8, 9, 10, 3, 4, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 11, 12, 13, 14, 5, 6, 3, 4, 2, 2, 1, 0, 0, 0, 0, 0, 5, 3, 3, 2, 1, 1, 1, 0, 1, 6, 4, 3, 1, 1, 1, 2, 3, 4, 5, 7, 8, 9, 10, 7, 8, 4, 3, 2, 2, 2, 2, 2, 1, 1, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, }; static const uint8_t dv_vlc_level[409] = { 1, 2, 1, 3, 4, 1, 2, 5, 6, 1, 1, 7, 8, 1, 1, 2, 3, 4, 9, 10, 11, 1, 1, 1, 1, 2, 2, 3, 5, 6, 7, 12, 13, 14, 15, 16, 17, 1, 1, 1, 1, 2, 2, 3, 3, 4, 5, 8, 18, 19, 20, 21, 22, 3, 4, 5, 6, 9, 10, 11, 0, 0, 3, 4, 6, 12, 13, 14, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 5, 7, 7, 8, 9, 10, 11, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 0, }; #endif /* AVCODEC_DV_VLC_DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/dv_vlc_data.h
C
asf20
10,244
/* * MPEG-4 Audio common header * Copyright (c) 2008 Baptiste Coudurier <baptiste.coudurier@free.fr> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_MPEG4AUDIO_H #define AVCODEC_MPEG4AUDIO_H #include <stdint.h> #include "get_bits.h" #include "put_bits.h" typedef struct { int object_type; int sampling_index; int sample_rate; int chan_config; int sbr; //< -1 implicit, 1 presence int ext_object_type; int ext_sampling_index; int ext_sample_rate; int ext_chan_config; int channels; int ps; //< -1 implicit, 1 presence } MPEG4AudioConfig; extern const int ff_mpeg4audio_sample_rates[16]; extern const uint8_t ff_mpeg4audio_channels[8]; /** * Parse MPEG-4 systems extradata to retrieve audio configuration. * @param[in] c MPEG4AudioConfig structure to fill. * @param[in] buf Extradata from container. * @param[in] buf_size Extradata size. * @return On error -1 is returned, on success AudioSpecificConfig bit index in extradata. */ int ff_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size); enum AudioObjectType { AOT_NULL, // Support? Name AOT_AAC_MAIN, ///< Y Main AOT_AAC_LC, ///< Y Low Complexity AOT_AAC_SSR, ///< N (code in SoC repo) Scalable Sample Rate AOT_AAC_LTP, ///< N (code in SoC repo) Long Term Prediction AOT_SBR, ///< Y Spectral Band Replication AOT_AAC_SCALABLE, ///< N Scalable AOT_TWINVQ, ///< N Twin Vector Quantizer AOT_CELP, ///< N Code Excited Linear Prediction AOT_HVXC, ///< N Harmonic Vector eXcitation Coding AOT_TTSI = 12, ///< N Text-To-Speech Interface AOT_MAINSYNTH, ///< N Main Synthesis AOT_WAVESYNTH, ///< N Wavetable Synthesis AOT_MIDI, ///< N General MIDI AOT_SAFX, ///< N Algorithmic Synthesis and Audio Effects AOT_ER_AAC_LC, ///< N Error Resilient Low Complexity AOT_ER_AAC_LTP = 19, ///< N Error Resilient Long Term Prediction AOT_ER_AAC_SCALABLE, ///< N Error Resilient Scalable AOT_ER_TWINVQ, ///< N Error Resilient Twin Vector Quantizer AOT_ER_BSAC, ///< N Error Resilient Bit-Sliced Arithmetic Coding AOT_ER_AAC_LD, ///< N Error Resilient Low Delay AOT_ER_CELP, ///< N Error Resilient Code Excited Linear Prediction AOT_ER_HVXC, ///< N Error Resilient Harmonic Vector eXcitation Coding AOT_ER_HILN, ///< N Error Resilient Harmonic and Individual Lines plus Noise AOT_ER_PARAM, ///< N Error Resilient Parametric AOT_SSC, ///< N SinuSoidal Coding AOT_PS, ///< N Parametric Stereo AOT_SURROUND, ///< N MPEG Surround AOT_ESCAPE, ///< Y Escape Value AOT_L1, ///< Y Layer 1 AOT_L2, ///< Y Layer 2 AOT_L3, ///< Y Layer 3 AOT_DST, ///< N Direct Stream Transfer AOT_ALS, ///< Y Audio LosslesS AOT_SLS, ///< N Scalable LosslesS AOT_SLS_NON_CORE, ///< N Scalable LosslesS (non core) AOT_ER_AAC_ELD, ///< N Error Resilient Enhanced Low Delay AOT_SMR_SIMPLE, ///< N Symbolic Music Representation Simple AOT_SMR_MAIN, ///< N Symbolic Music Representation Main AOT_USAC_NOSBR, ///< N Unified Speech and Audio Coding (no SBR) AOT_SAOC, ///< N Spatial Audio Object Coding AOT_LD_SURROUND, ///< N Low Delay MPEG Surround AOT_USAC, ///< N Unified Speech and Audio Coding }; #define MAX_PCE_SIZE 304 ///<Maximum size of a PCE including the 3-bit ID_PCE ///<marker and the comment int ff_copy_pce_data(PutBitContext *pb, GetBitContext *gb); #endif /* AVCODEC_MPEG4AUDIO_H */
123linslouis-android-video-cutter
jni/libavcodec/mpeg4audio.h
C
asf20
5,749
/* * LSP routines for ACELP-based codecs * * Copyright (c) 2007 Reynaldo H. Verdejo Pinochet (QCELP decoder) * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "avcodec.h" #define FRAC_BITS 14 #include "mathops.h" #include "lsp.h" #include "celp_math.h" void ff_acelp_reorder_lsf(int16_t* lsfq, int lsfq_min_distance, int lsfq_min, int lsfq_max, int lp_order) { int i, j; /* sort lsfq in ascending order. float bubble agorithm, O(n) if data already sorted, O(n^2) - otherwise */ for(i=0; i<lp_order-1; i++) for(j=i; j>=0 && lsfq[j] > lsfq[j+1]; j--) FFSWAP(int16_t, lsfq[j], lsfq[j+1]); for(i=0; i<lp_order; i++) { lsfq[i] = FFMAX(lsfq[i], lsfq_min); lsfq_min = lsfq[i] + lsfq_min_distance; } lsfq[lp_order-1] = FFMIN(lsfq[lp_order-1], lsfq_max);//Is warning required ? } void ff_set_min_dist_lsf(float *lsf, double min_spacing, int size) { int i; float prev = 0.0; for (i = 0; i < size; i++) prev = lsf[i] = FFMAX(lsf[i], prev + min_spacing); } void ff_acelp_lsf2lsp(int16_t *lsp, const int16_t *lsf, int lp_order) { int i; /* Convert LSF to LSP, lsp=cos(lsf) */ for(i=0; i<lp_order; i++) // 20861 = 2.0 / PI in (0.15) lsp[i] = ff_cos(lsf[i] * 20861 >> 15); // divide by PI and (0,13) -> (0,14) } /** * \brief decodes polynomial coefficients from LSP * \param f [out] decoded polynomial coefficients (-0x20000000 <= (3.22) <= 0x1fffffff) * \param lsp LSP coefficients (-0x8000 <= (0.15) <= 0x7fff) */ static void lsp2poly(int* f, const int16_t* lsp, int lp_half_order) { int i, j; f[0] = 0x400000; // 1.0 in (3.22) f[1] = -lsp[0] << 8; // *2 and (0.15) -> (3.22) for(i=2; i<=lp_half_order; i++) { f[i] = f[i-2]; for(j=i; j>1; j--) f[j] -= MULL(f[j-1], lsp[2*i-2], FRAC_BITS) - f[j-2]; f[1] -= lsp[2*i-2] << 8; } } void ff_acelp_lsp2lpc(int16_t* lp, const int16_t* lsp, int lp_half_order) { int i; int f1[lp_half_order+1]; // (3.22) int f2[lp_half_order+1]; // (3.22) lsp2poly(f1, lsp , lp_half_order); lsp2poly(f2, lsp+1, lp_half_order); /* 3.2.6 of G.729, Equations 25 and 26*/ lp[0] = 4096; for(i=1; i<lp_half_order+1; i++) { int ff1 = f1[i] + f1[i-1]; // (3.22) int ff2 = f2[i] - f2[i-1]; // (3.22) ff1 += 1 << 10; // for rounding lp[i] = (ff1 + ff2) >> 11; // divide by 2 and (3.22) -> (3.12) lp[(lp_half_order << 1) + 1 - i] = (ff1 - ff2) >> 11; // divide by 2 and (3.22) -> (3.12) } } void ff_acelp_lp_decode(int16_t* lp_1st, int16_t* lp_2nd, const int16_t* lsp_2nd, const int16_t* lsp_prev, int lp_order) { int16_t lsp_1st[lp_order]; // (0.15) int i; /* LSP values for first subframe (3.2.5 of G.729, Equation 24)*/ for(i=0; i<lp_order; i++) #ifdef G729_BITEXACT lsp_1st[i] = (lsp_2nd[i] >> 1) + (lsp_prev[i] >> 1); #else lsp_1st[i] = (lsp_2nd[i] + lsp_prev[i]) >> 1; #endif ff_acelp_lsp2lpc(lp_1st, lsp_1st, lp_order >> 1); /* LSP values for second subframe (3.2.5 of G.729)*/ ff_acelp_lsp2lpc(lp_2nd, lsp_2nd, lp_order >> 1); } void ff_lsp2polyf(const double *lsp, double *f, int lp_half_order) { int i, j; f[0] = 1.0; f[1] = -2 * lsp[0]; lsp -= 2; for(i=2; i<=lp_half_order; i++) { double val = -2 * lsp[2*i]; f[i] = val * f[i-1] + 2*f[i-2]; for(j=i-1; j>1; j--) f[j] += f[j-1] * val + f[j-2]; f[1] += val; } } void ff_acelp_lspd2lpc(const double *lsp, float *lpc, int lp_half_order) { double pa[MAX_LP_HALF_ORDER+1], qa[MAX_LP_HALF_ORDER+1]; float *lpc2 = lpc + (lp_half_order << 1) - 1; assert(lp_half_order <= MAX_LP_HALF_ORDER); ff_lsp2polyf(lsp, pa, lp_half_order); ff_lsp2polyf(lsp + 1, qa, lp_half_order); while (lp_half_order--) { double paf = pa[lp_half_order+1] + pa[lp_half_order]; double qaf = qa[lp_half_order+1] - qa[lp_half_order]; lpc [ lp_half_order] = 0.5*(paf+qaf); lpc2[-lp_half_order] = 0.5*(paf-qaf); } } void ff_sort_nearly_sorted_floats(float *vals, int len) { int i,j; for (i = 0; i < len - 1; i++) for (j = i; j >= 0 && vals[j] > vals[j+1]; j--) FFSWAP(float, vals[j], vals[j+1]); }
123linslouis-android-video-cutter
jni/libavcodec/lsp.c
C
asf20
5,127
/* * E-AC-3 decoder tables * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_EAC3DEC_DATA_H #define AVCODEC_EAC3DEC_DATA_H #include <stdint.h> extern const uint8_t ff_eac3_bits_vs_hebap[20]; extern const int16_t ff_eac3_gaq_remap_1[12]; extern const int16_t ff_eac3_gaq_remap_2_4_a[9][2]; extern const int16_t ff_eac3_gaq_remap_2_4_b[9][2]; extern const int16_t (* const ff_eac3_mantissa_vq[8])[6]; extern const uint8_t ff_eac3_frm_expstr[32][6]; extern const float ff_eac3_spx_atten_tab[32][3]; #endif /* AVCODEC_EAC3DEC_DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/eac3dec_data.h
C
asf20
1,339
/* * RV30 decoder motion compensation functions * Copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * RV30 decoder motion compensation functions */ #include "avcodec.h" #include "dsputil.h" #define RV30_LOWPASS(OPNAME, OP) \ static av_unused void OPNAME ## rv30_tpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\ const int h=8;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ int i;\ for(i=0; i<h; i++)\ {\ OP(dst[0], (-(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + 8)>>4);\ OP(dst[1], (-(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + 8)>>4);\ OP(dst[2], (-(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + 8)>>4);\ OP(dst[3], (-(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + 8)>>4);\ OP(dst[4], (-(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + 8)>>4);\ OP(dst[5], (-(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + 8)>>4);\ OP(dst[6], (-(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + 8)>>4);\ OP(dst[7], (-(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + 8)>>4);\ dst+=dstStride;\ src+=srcStride;\ }\ }\ \ static void OPNAME ## rv30_tpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\ const int w=8;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ int i;\ for(i=0; i<w; i++)\ {\ const int srcA= src[-1*srcStride];\ const int src0= src[0 *srcStride];\ const int src1= src[1 *srcStride];\ const int src2= src[2 *srcStride];\ const int src3= src[3 *srcStride];\ const int src4= src[4 *srcStride];\ const int src5= src[5 *srcStride];\ const int src6= src[6 *srcStride];\ const int src7= src[7 *srcStride];\ const int src8= src[8 *srcStride];\ const int src9= src[9 *srcStride];\ OP(dst[0*dstStride], (-(srcA+src2) + src0*C1 + src1*C2 + 8)>>4);\ OP(dst[1*dstStride], (-(src0+src3) + src1*C1 + src2*C2 + 8)>>4);\ OP(dst[2*dstStride], (-(src1+src4) + src2*C1 + src3*C2 + 8)>>4);\ OP(dst[3*dstStride], (-(src2+src5) + src3*C1 + src4*C2 + 8)>>4);\ OP(dst[4*dstStride], (-(src3+src6) + src4*C1 + src5*C2 + 8)>>4);\ OP(dst[5*dstStride], (-(src4+src7) + src5*C1 + src6*C2 + 8)>>4);\ OP(dst[6*dstStride], (-(src5+src8) + src6*C1 + src7*C2 + 8)>>4);\ OP(dst[7*dstStride], (-(src6+src9) + src7*C1 + src8*C2 + 8)>>4);\ dst++;\ src++;\ }\ }\ \ static void OPNAME ## rv30_tpel8_hv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ const int w = 8;\ const int h = 8;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ int i, j;\ for(j = 0; j < h; j++){\ for(i = 0; i < w; i++){\ OP(dst[i], (\ src[srcStride*-1+i-1] -12*src[srcStride*-1+i] -6*src[srcStride*-1+i+1] +src[srcStride*-1+i+2]+\ -12*src[srcStride* 0+i-1] +144*src[srcStride* 0+i] +72*src[srcStride* 0+i+1] -12*src[srcStride* 0+i+2] +\ -6*src[srcStride* 1+i-1] +72*src[srcStride* 1+i] +36*src[srcStride* 1+i+1] -6*src[srcStride* 1+i+2] +\ src[srcStride* 2+i-1] -12*src[srcStride* 2+i] -6*src[srcStride* 2+i+1] +src[srcStride* 2+i+2] +\ 128)>>8);\ }\ src += srcStride;\ dst += dstStride;\ }\ }\ \ static void OPNAME ## rv30_tpel8_hhv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ const int w = 8;\ const int h = 8;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ int i, j;\ for(j = 0; j < h; j++){\ for(i = 0; i < w; i++){\ OP(dst[i], (\ src[srcStride*-1+i-1] -12*src[srcStride*-1+i+1] -6*src[srcStride*-1+i] +src[srcStride*-1+i+2]+\ -12*src[srcStride* 0+i-1] +144*src[srcStride* 0+i+1] +72*src[srcStride* 0+i] -12*src[srcStride* 0+i+2]+\ -6*src[srcStride* 1+i-1] +72*src[srcStride* 1+i+1] +36*src[srcStride* 1+i] -6*src[srcStride* 1+i+2]+\ src[srcStride* 2+i-1] -12*src[srcStride* 2+i+1] -6*src[srcStride* 2+i] +src[srcStride* 2+i+2]+\ 128)>>8);\ }\ src += srcStride;\ dst += dstStride;\ }\ }\ \ static void OPNAME ## rv30_tpel8_hvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ const int w = 8;\ const int h = 8;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ int i, j;\ for(j = 0; j < h; j++){\ for(i = 0; i < w; i++){\ OP(dst[i], (\ src[srcStride*-1+i-1] -12*src[srcStride*-1+i] -6*src[srcStride*-1+i+1] +src[srcStride*-1+i+2]+\ -6*src[srcStride* 0+i-1] +72*src[srcStride* 0+i] +36*src[srcStride* 0+i+1] -6*src[srcStride* 0+i+2]+\ -12*src[srcStride* 1+i-1] +144*src[srcStride* 1+i] +72*src[srcStride* 1+i+1] -12*src[srcStride* 1+i+2]+\ src[srcStride* 2+i-1] -12*src[srcStride* 2+i] -6*src[srcStride* 2+i+1] +src[srcStride* 2+i+2]+\ 128)>>8);\ }\ src += srcStride;\ dst += dstStride;\ }\ }\ \ static void OPNAME ## rv30_tpel8_hhvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ const int w = 8;\ const int h = 8;\ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ int i, j;\ for(j = 0; j < h; j++){\ for(i = 0; i < w; i++){\ OP(dst[i], (\ 36*src[i+srcStride*0] +54*src[i+1+srcStride*0] +6*src[i+2+srcStride*0]+\ 54*src[i+srcStride*1] +81*src[i+1+srcStride*1] +9*src[i+2+srcStride*1]+\ 6*src[i+srcStride*2] + 9*src[i+1+srcStride*2] + src[i+2+srcStride*2]+\ 128)>>8);\ }\ src += srcStride;\ dst += dstStride;\ }\ }\ \ static void OPNAME ## rv30_tpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\ OPNAME ## rv30_tpel8_v_lowpass(dst , src , dstStride, srcStride, C1, C2);\ OPNAME ## rv30_tpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## rv30_tpel8_v_lowpass(dst , src , dstStride, srcStride, C1, C2);\ OPNAME ## rv30_tpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\ }\ \ static void OPNAME ## rv30_tpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\ OPNAME ## rv30_tpel8_h_lowpass(dst , src , dstStride, srcStride, C1, C2);\ OPNAME ## rv30_tpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## rv30_tpel8_h_lowpass(dst , src , dstStride, srcStride, C1, C2);\ OPNAME ## rv30_tpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, C1, C2);\ }\ \ static void OPNAME ## rv30_tpel16_hv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## rv30_tpel8_hv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hv_lowpass(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## rv30_tpel8_hv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hv_lowpass(dst+8, src+8, dstStride, srcStride);\ }\ \ static void OPNAME ## rv30_tpel16_hhv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## rv30_tpel8_hhv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hhv_lowpass(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## rv30_tpel8_hhv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hhv_lowpass(dst+8, src+8, dstStride, srcStride);\ }\ \ static void OPNAME ## rv30_tpel16_hvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## rv30_tpel8_hvv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hvv_lowpass(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## rv30_tpel8_hvv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hvv_lowpass(dst+8, src+8, dstStride, srcStride);\ }\ \ static void OPNAME ## rv30_tpel16_hhvv_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## rv30_tpel8_hhvv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hhvv_lowpass(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## rv30_tpel8_hhvv_lowpass(dst , src , dstStride, srcStride);\ OPNAME ## rv30_tpel8_hhvv_lowpass(dst+8, src+8, dstStride, srcStride);\ }\ \ #define RV30_MC(OPNAME, SIZE) \ static void OPNAME ## rv30_tpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _h_lowpass(dst, src, stride, stride, 12, 6);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _h_lowpass(dst, src, stride, stride, 6, 12);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _v_lowpass(dst, src, stride, stride, 12, 6);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _v_lowpass(dst, src, stride, stride, 6, 12);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _hv_lowpass(dst, src, stride, stride);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _hvv_lowpass(dst, src, stride, stride);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _hhv_lowpass(dst, src, stride, stride);\ }\ \ static void OPNAME ## rv30_tpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## rv30_tpel ## SIZE ## _hhvv_lowpass(dst, src, stride, stride);\ }\ \ #define op_avg(a, b) a = (((a)+cm[b]+1)>>1) #define op_put(a, b) a = cm[b] RV30_LOWPASS(put_ , op_put) RV30_LOWPASS(avg_ , op_avg) RV30_MC(put_, 8) RV30_MC(put_, 16) RV30_MC(avg_, 8) RV30_MC(avg_, 16) av_cold void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx) { c->put_rv30_tpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0]; c->put_rv30_tpel_pixels_tab[0][ 1] = put_rv30_tpel16_mc10_c; c->put_rv30_tpel_pixels_tab[0][ 2] = put_rv30_tpel16_mc20_c; c->put_rv30_tpel_pixels_tab[0][ 4] = put_rv30_tpel16_mc01_c; c->put_rv30_tpel_pixels_tab[0][ 5] = put_rv30_tpel16_mc11_c; c->put_rv30_tpel_pixels_tab[0][ 6] = put_rv30_tpel16_mc21_c; c->put_rv30_tpel_pixels_tab[0][ 8] = put_rv30_tpel16_mc02_c; c->put_rv30_tpel_pixels_tab[0][ 9] = put_rv30_tpel16_mc12_c; c->put_rv30_tpel_pixels_tab[0][10] = put_rv30_tpel16_mc22_c; c->avg_rv30_tpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0]; c->avg_rv30_tpel_pixels_tab[0][ 1] = avg_rv30_tpel16_mc10_c; c->avg_rv30_tpel_pixels_tab[0][ 2] = avg_rv30_tpel16_mc20_c; c->avg_rv30_tpel_pixels_tab[0][ 4] = avg_rv30_tpel16_mc01_c; c->avg_rv30_tpel_pixels_tab[0][ 5] = avg_rv30_tpel16_mc11_c; c->avg_rv30_tpel_pixels_tab[0][ 6] = avg_rv30_tpel16_mc21_c; c->avg_rv30_tpel_pixels_tab[0][ 8] = avg_rv30_tpel16_mc02_c; c->avg_rv30_tpel_pixels_tab[0][ 9] = avg_rv30_tpel16_mc12_c; c->avg_rv30_tpel_pixels_tab[0][10] = avg_rv30_tpel16_mc22_c; c->put_rv30_tpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0]; c->put_rv30_tpel_pixels_tab[1][ 1] = put_rv30_tpel8_mc10_c; c->put_rv30_tpel_pixels_tab[1][ 2] = put_rv30_tpel8_mc20_c; c->put_rv30_tpel_pixels_tab[1][ 4] = put_rv30_tpel8_mc01_c; c->put_rv30_tpel_pixels_tab[1][ 5] = put_rv30_tpel8_mc11_c; c->put_rv30_tpel_pixels_tab[1][ 6] = put_rv30_tpel8_mc21_c; c->put_rv30_tpel_pixels_tab[1][ 8] = put_rv30_tpel8_mc02_c; c->put_rv30_tpel_pixels_tab[1][ 9] = put_rv30_tpel8_mc12_c; c->put_rv30_tpel_pixels_tab[1][10] = put_rv30_tpel8_mc22_c; c->avg_rv30_tpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0]; c->avg_rv30_tpel_pixels_tab[1][ 1] = avg_rv30_tpel8_mc10_c; c->avg_rv30_tpel_pixels_tab[1][ 2] = avg_rv30_tpel8_mc20_c; c->avg_rv30_tpel_pixels_tab[1][ 4] = avg_rv30_tpel8_mc01_c; c->avg_rv30_tpel_pixels_tab[1][ 5] = avg_rv30_tpel8_mc11_c; c->avg_rv30_tpel_pixels_tab[1][ 6] = avg_rv30_tpel8_mc21_c; c->avg_rv30_tpel_pixels_tab[1][ 8] = avg_rv30_tpel8_mc02_c; c->avg_rv30_tpel_pixels_tab[1][ 9] = avg_rv30_tpel8_mc12_c; c->avg_rv30_tpel_pixels_tab[1][10] = avg_rv30_tpel8_mc22_c; }
123linslouis-android-video-cutter
jni/libavcodec/rv30dsp.c
C
asf20
13,529
/* * RealVideo 4 decoder * copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * miscellaneous RV40 tables */ #ifndef AVCODEC_RV40DATA_H #define AVCODEC_RV40DATA_H #include <stdint.h> /** * standard widths and heights coded in RV40 */ //@{ static const int rv40_standard_widths[] = { 160, 172, 240, 320, 352, 640, 704, 0}; static const int rv40_standard_heights[] = { 120, 132, 144, 240, 288, 480, -8, -10, 180, 360, 576, 0}; //@} #define MODE2_PATTERNS_NUM 20 /** * intra types table * * These values are actually coded 3-tuples * used for detecting standard block configurations. */ static const uint16_t rv40_aic_table_index[MODE2_PATTERNS_NUM] = { 0x000, 0x100, 0x200, 0x011, 0x111, 0x211, 0x511, 0x611, 0x022, 0x122, 0x222, 0x722, 0x272, 0x227, 0x822, 0x282, 0x228, 0x112, 0x116, 0x221 }; /** * luma quantizer values * The second table is used for inter blocks. */ static const uint8_t rv40_luma_dc_quant[2][32] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 22, 22, 22, 22 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 20, 21, 21, 22, 23, 23, 23, 24, 24, 24, 24 } }; /** * @begingroup loopfilter coefficients used by the RV40 loop filter * @{ */ /** * dither values for deblocking filter - left/top values */ static const uint8_t rv40_dither_l[16] = { 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30, 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40 }; /** * dither values for deblocking filter - right/bottom values */ static const uint8_t rv40_dither_r[16] = { 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40, 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40 }; /** alpha parameter for RV40 loop filter - almost the same as in JVT-A003r1 */ static const uint8_t rv40_alpha_tab[32] = { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 122, 96, 75, 59, 47, 37, 29, 23, 18, 15, 13, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 }; /** beta parameter for RV40 loop filter - almost the same as in JVT-A003r1 */ static const uint8_t rv40_beta_tab[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 4, 4, 4, 6, 6, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 14, 15, 16, 17 }; /** clip table for RV40 loop filter - the same as in JVT-A003r1 */ static const uint8_t rv40_filter_clip_tbl[3][32] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 7, 8, 9 } }; /** @} */ // end loopfilter group #endif /* AVCODEC_RV40DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/rv40data.h
C
asf20
3,624
/* * Zip Motion Blocks Video (ZMBV) encoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Zip Motion Blocks Video encoder */ #include <stdio.h> #include <stdlib.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" #include <zlib.h> #define ZMBV_KEYFRAME 1 #define ZMBV_DELTAPAL 2 #define ZMBV_BLOCK 16 /** * Encoder context */ typedef struct ZmbvEncContext { AVCodecContext *avctx; AVFrame pic; int range; uint8_t *comp_buf, *work_buf; uint8_t pal[768]; uint32_t pal2[256]; //for quick comparisons uint8_t *prev; int pstride; int comp_size; int keyint, curfrm; z_stream zstream; } ZmbvEncContext; static int score_tab[256]; /** Block comparing function * XXX should be optimized and moved to DSPContext * TODO handle out of edge ME */ static inline int block_cmp(uint8_t *src, int stride, uint8_t *src2, int stride2, int bw, int bh, int *xored) { int sum = 0; int i, j; uint8_t histogram[256] = {0}; *xored = 0; for(j = 0; j < bh; j++){ for(i = 0; i < bw; i++){ int t = src[i] ^ src2[i]; histogram[t]++; *xored |= t; } src += stride; src2 += stride2; } for(i = 1; i < 256; i++) sum += score_tab[histogram[i]]; return sum; } /** Motion estimation function * TODO make better ME decisions */ static int zmbv_me(ZmbvEncContext *c, uint8_t *src, int sstride, uint8_t *prev, int pstride, int x, int y, int *mx, int *my, int *xored) { int dx, dy, tx, ty, tv, bv, bw, bh; *mx = *my = 0; bw = FFMIN(ZMBV_BLOCK, c->avctx->width - x); bh = FFMIN(ZMBV_BLOCK, c->avctx->height - y); bv = block_cmp(src, sstride, prev, pstride, bw, bh, xored); if(!bv) return 0; for(ty = FFMAX(y - c->range, 0); ty < FFMIN(y + c->range, c->avctx->height - bh); ty++){ for(tx = FFMAX(x - c->range, 0); tx < FFMIN(x + c->range, c->avctx->width - bw); tx++){ if(tx == x && ty == y) continue; // we already tested this block dx = tx - x; dy = ty - y; tv = block_cmp(src, sstride, prev + dx + dy*pstride, pstride, bw, bh, xored); if(tv < bv){ bv = tv; *mx = dx; *my = dy; if(!bv) return 0; } } } return bv; } static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) { ZmbvEncContext * const c = avctx->priv_data; AVFrame *pict = data; AVFrame * const p = &c->pic; uint8_t *src, *prev; uint32_t *palptr; int len = 0; int keyframe, chpal; int fl; int work_size = 0; int bw, bh; int i, j; keyframe = !c->curfrm; c->curfrm++; if(c->curfrm == c->keyint) c->curfrm = 0; *p = *pict; p->pict_type= keyframe ? FF_I_TYPE : FF_P_TYPE; p->key_frame= keyframe; chpal = !keyframe && memcmp(p->data[1], c->pal2, 1024); fl = (keyframe ? ZMBV_KEYFRAME : 0) | (chpal ? ZMBV_DELTAPAL : 0); *buf++ = fl; len++; if(keyframe){ deflateReset(&c->zstream); *buf++ = 0; len++; // hi ver *buf++ = 1; len++; // lo ver *buf++ = 1; len++; // comp *buf++ = 4; len++; // format - 8bpp *buf++ = ZMBV_BLOCK; len++; // block width *buf++ = ZMBV_BLOCK; len++; // block height } palptr = (uint32_t*)p->data[1]; src = p->data[0]; prev = c->prev; if(chpal){ uint8_t tpal[3]; for(i = 0; i < 256; i++){ AV_WB24(tpal, palptr[i]); c->work_buf[work_size++] = tpal[0] ^ c->pal[i * 3 + 0]; c->work_buf[work_size++] = tpal[1] ^ c->pal[i * 3 + 1]; c->work_buf[work_size++] = tpal[2] ^ c->pal[i * 3 + 2]; c->pal[i * 3 + 0] = tpal[0]; c->pal[i * 3 + 1] = tpal[1]; c->pal[i * 3 + 2] = tpal[2]; } memcpy(c->pal2, p->data[1], 1024); } if(keyframe){ for(i = 0; i < 256; i++){ AV_WB24(c->pal+(i*3), palptr[i]); } memcpy(c->work_buf, c->pal, 768); memcpy(c->pal2, p->data[1], 1024); work_size = 768; for(i = 0; i < avctx->height; i++){ memcpy(c->work_buf + work_size, src, avctx->width); src += p->linesize[0]; work_size += avctx->width; } }else{ int x, y, bh2, bw2, xored; uint8_t *tsrc, *tprev; uint8_t *mv; int mx, my, bv; bw = (avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK; bh = (avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK; mv = c->work_buf + work_size; memset(c->work_buf + work_size, 0, (bw * bh * 2 + 3) & ~3); work_size += (bw * bh * 2 + 3) & ~3; /* for now just XOR'ing */ for(y = 0; y < avctx->height; y += ZMBV_BLOCK) { bh2 = FFMIN(avctx->height - y, ZMBV_BLOCK); for(x = 0; x < avctx->width; x += ZMBV_BLOCK, mv += 2) { bw2 = FFMIN(avctx->width - x, ZMBV_BLOCK); tsrc = src + x; tprev = prev + x; bv = zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored); mv[0] = (mx << 1) | !!xored; mv[1] = my << 1; tprev += mx + my * c->pstride; if(xored){ for(j = 0; j < bh2; j++){ for(i = 0; i < bw2; i++) c->work_buf[work_size++] = tsrc[i] ^ tprev[i]; tsrc += p->linesize[0]; tprev += c->pstride; } } } src += p->linesize[0] * ZMBV_BLOCK; prev += c->pstride * ZMBV_BLOCK; } } /* save the previous frame */ src = p->data[0]; prev = c->prev; for(i = 0; i < avctx->height; i++){ memcpy(prev, src, avctx->width); prev += c->pstride; src += p->linesize[0]; } c->zstream.next_in = c->work_buf; c->zstream.avail_in = work_size; c->zstream.total_in = 0; c->zstream.next_out = c->comp_buf; c->zstream.avail_out = c->comp_size; c->zstream.total_out = 0; if(deflate(&c->zstream, Z_SYNC_FLUSH) != Z_OK){ av_log(avctx, AV_LOG_ERROR, "Error compressing data\n"); return -1; } memcpy(buf, c->comp_buf, c->zstream.total_out); return len + c->zstream.total_out; } /** * Init zmbv encoder */ static av_cold int encode_init(AVCodecContext *avctx) { ZmbvEncContext * const c = avctx->priv_data; int zret; // Zlib return code int i; int lvl = 9; for(i=1; i<256; i++) score_tab[i]= -i * log(i/(double)(ZMBV_BLOCK*ZMBV_BLOCK)) * (256/M_LN2); c->avctx = avctx; c->curfrm = 0; c->keyint = avctx->keyint_min; c->range = 8; if(avctx->me_range > 0) c->range = FFMIN(avctx->me_range, 127); if(avctx->compression_level >= 0) lvl = avctx->compression_level; if(lvl < 0 || lvl > 9){ av_log(avctx, AV_LOG_ERROR, "Compression level should be 0-9, not %i\n", lvl); return -1; } // Needed if zlib unused or init aborted before deflateInit memset(&(c->zstream), 0, sizeof(z_stream)); c->comp_size = avctx->width * avctx->height + 1024 + ((avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK) * ((avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK) * 2 + 4; if ((c->work_buf = av_malloc(c->comp_size)) == NULL) { av_log(avctx, AV_LOG_ERROR, "Can't allocate work buffer.\n"); return -1; } /* Conservative upper bound taken from zlib v1.2.1 source via lcl.c */ c->comp_size = c->comp_size + ((c->comp_size + 7) >> 3) + ((c->comp_size + 63) >> 6) + 11; /* Allocate compression buffer */ if ((c->comp_buf = av_malloc(c->comp_size)) == NULL) { av_log(avctx, AV_LOG_ERROR, "Can't allocate compression buffer.\n"); return -1; } c->pstride = FFALIGN(avctx->width, 16); if ((c->prev = av_malloc(c->pstride * avctx->height)) == NULL) { av_log(avctx, AV_LOG_ERROR, "Can't allocate picture.\n"); return -1; } c->zstream.zalloc = Z_NULL; c->zstream.zfree = Z_NULL; c->zstream.opaque = Z_NULL; zret = deflateInit(&(c->zstream), lvl); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return -1; } avctx->coded_frame = (AVFrame*)&c->pic; return 0; } /** * Uninit zmbv encoder */ static av_cold int encode_end(AVCodecContext *avctx) { ZmbvEncContext * const c = avctx->priv_data; av_freep(&c->comp_buf); av_freep(&c->work_buf); deflateEnd(&(c->zstream)); av_freep(&c->prev); return 0; } AVCodec zmbv_encoder = { "zmbv", AVMEDIA_TYPE_VIDEO, CODEC_ID_ZMBV, sizeof(ZmbvEncContext), encode_init, encode_frame, encode_end, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_PAL8, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"), };
123linslouis-android-video-cutter
jni/libavcodec/zmbvenc.c
C
asf20
9,890
/* * MPEG Audio common tables * copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * mpeg audio layer common tables. */ #ifndef AVCODEC_MPEGAUDIODATA_H #define AVCODEC_MPEGAUDIODATA_H #include "libavutil/common.h" #define MODE_EXT_MS_STEREO 2 #define MODE_EXT_I_STEREO 1 extern const uint16_t ff_mpa_bitrate_tab[2][3][15]; extern const uint16_t ff_mpa_freq_tab[3]; extern const int32_t ff_mpa_enwindow[257]; extern const int ff_mpa_sblimit_table[5]; extern const int ff_mpa_quant_steps[17]; extern const int ff_mpa_quant_bits[17]; extern const unsigned char * const ff_mpa_alloc_tables[5]; #endif /* AVCODEC_MPEGAUDIODATA_H */
123linslouis-android-video-cutter
jni/libavcodec/mpegaudiodata.h
C
asf20
1,398
/* * jfdctfst.c * * This file is part of the Independent JPEG Group's software. * * The authors make NO WARRANTY or representation, either express or implied, * with respect to this software, its quality, accuracy, merchantability, or * fitness for a particular purpose. This software is provided "AS IS", and * you, its user, assume the entire risk as to its quality and accuracy. * * This software is copyright (C) 1994-1996, Thomas G. Lane. * All Rights Reserved except as specified below. * * Permission is hereby granted to use, copy, modify, and distribute this * software (or portions thereof) for any purpose, without fee, subject to * these conditions: * (1) If any part of the source code for this software is distributed, then * this README file must be included, with this copyright and no-warranty * notice unaltered; and any additions, deletions, or changes to the original * files must be clearly indicated in accompanying documentation. * (2) If only executable code is distributed, then the accompanying * documentation must state that "this software is based in part on the work * of the Independent JPEG Group". * (3) Permission for use of this software is granted only if the user accepts * full responsibility for any undesirable consequences; the authors accept * NO LIABILITY for damages of any kind. * * These conditions apply to any software derived from or based on the IJG * code, not just to the unmodified library. If you use our work, you ought * to acknowledge us. * * Permission is NOT granted for the use of any IJG author's name or company * name in advertising or publicity relating to this software or products * derived from it. This software may be referred to only as "the Independent * JPEG Group's software". * * We specifically permit and encourage the use of this software as the basis * of commercial products, provided that all warranty or liability claims are * assumed by the product vendor. * * This file contains a fast, not so accurate integer implementation of the * forward DCT (Discrete Cosine Transform). * * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT * on each column. Direct algorithms are also available, but they are * much more complex and seem not to be any faster when reduced to code. * * This implementation is based on Arai, Agui, and Nakajima's algorithm for * scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in * Japanese, but the algorithm is described in the Pennebaker & Mitchell * JPEG textbook (see REFERENCES section in file README). The following code * is based directly on figure 4-8 in P&M. * While an 8-point DCT cannot be done in less than 11 multiplies, it is * possible to arrange the computation so that many of the multiplies are * simple scalings of the final outputs. These multiplies can then be * folded into the multiplications or divisions by the JPEG quantization * table entries. The AA&N method leaves only 5 multiplies and 29 adds * to be done in the DCT itself. * The primary disadvantage of this method is that with fixed-point math, * accuracy is lost due to imprecise representation of the scaled * quantization values. The smaller the quantization table entry, the less * precise the scaled value, so this implementation does worse with high- * quality-setting files than with low-quality ones. */ /** * @file * Independent JPEG Group's fast AAN dct. */ #include <stdlib.h> #include <stdio.h> #include "libavutil/common.h" #include "dsputil.h" #define DCTSIZE 8 #define GLOBAL(x) x #define RIGHT_SHIFT(x, n) ((x) >> (n)) /* * This module is specialized to the case DCTSIZE = 8. */ #if DCTSIZE != 8 Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ #endif /* Scaling decisions are generally the same as in the LL&M algorithm; * see jfdctint.c for more details. However, we choose to descale * (right shift) multiplication products as soon as they are formed, * rather than carrying additional fractional bits into subsequent additions. * This compromises accuracy slightly, but it lets us save a few shifts. * More importantly, 16-bit arithmetic is then adequate (for 8-bit samples) * everywhere except in the multiplications proper; this saves a good deal * of work on 16-bit-int machines. * * Again to save a few shifts, the intermediate results between pass 1 and * pass 2 are not upscaled, but are represented only to integral precision. * * A final compromise is to represent the multiplicative constants to only * 8 fractional bits, rather than 13. This saves some shifting work on some * machines, and may also reduce the cost of multiplication (since there * are fewer one-bits in the constants). */ #define CONST_BITS 8 /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus * causing a lot of useless floating-point operations at run time. * To get around this we use the following pre-calculated constants. * If you change CONST_BITS you may want to add appropriate values. * (With a reasonable C compiler, you can just rely on the FIX() macro...) */ #if CONST_BITS == 8 #define FIX_0_382683433 ((int32_t) 98) /* FIX(0.382683433) */ #define FIX_0_541196100 ((int32_t) 139) /* FIX(0.541196100) */ #define FIX_0_707106781 ((int32_t) 181) /* FIX(0.707106781) */ #define FIX_1_306562965 ((int32_t) 334) /* FIX(1.306562965) */ #else #define FIX_0_382683433 FIX(0.382683433) #define FIX_0_541196100 FIX(0.541196100) #define FIX_0_707106781 FIX(0.707106781) #define FIX_1_306562965 FIX(1.306562965) #endif /* We can gain a little more speed, with a further compromise in accuracy, * by omitting the addition in a descaling shift. This yields an incorrectly * rounded result half the time... */ #ifndef USE_ACCURATE_ROUNDING #undef DESCALE #define DESCALE(x,n) RIGHT_SHIFT(x, n) #endif /* Multiply a DCTELEM variable by an int32_t constant, and immediately * descale to yield a DCTELEM result. */ #define MULTIPLY(var,const) ((DCTELEM) DESCALE((var) * (const), CONST_BITS)) static av_always_inline void row_fdct(DCTELEM * data){ int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast16_t tmp10, tmp11, tmp12, tmp13; int_fast16_t z1, z2, z3, z4, z5, z11, z13; DCTELEM *dataptr; int ctr; /* Pass 1: process rows. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[0] + dataptr[7]; tmp7 = dataptr[0] - dataptr[7]; tmp1 = dataptr[1] + dataptr[6]; tmp6 = dataptr[1] - dataptr[6]; tmp2 = dataptr[2] + dataptr[5]; tmp5 = dataptr[2] - dataptr[5]; tmp3 = dataptr[3] + dataptr[4]; tmp4 = dataptr[3] - dataptr[4]; /* Even part */ tmp10 = tmp0 + tmp3; /* phase 2 */ tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[0] = tmp10 + tmp11; /* phase 3 */ dataptr[4] = tmp10 - tmp11; z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */ dataptr[2] = tmp13 + z1; /* phase 5 */ dataptr[6] = tmp13 - z1; /* Odd part */ tmp10 = tmp4 + tmp5; /* phase 2 */ tmp11 = tmp5 + tmp6; tmp12 = tmp6 + tmp7; /* The rotator is modified from fig 4-8 to avoid extra negations. */ z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ z4 = MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */ z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ z11 = tmp7 + z3; /* phase 5 */ z13 = tmp7 - z3; dataptr[5] = z13 + z2; /* phase 6 */ dataptr[3] = z13 - z2; dataptr[1] = z11 + z4; dataptr[7] = z11 - z4; dataptr += DCTSIZE; /* advance pointer to next row */ } } /* * Perform the forward DCT on one block of samples. */ GLOBAL(void) fdct_ifast (DCTELEM * data) { int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast16_t tmp10, tmp11, tmp12, tmp13; int_fast16_t z1, z2, z3, z4, z5, z11, z13; DCTELEM *dataptr; int ctr; row_fdct(data); /* Pass 2: process columns. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; /* Even part */ tmp10 = tmp0 + tmp3; /* phase 2 */ tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[DCTSIZE*0] = tmp10 + tmp11; /* phase 3 */ dataptr[DCTSIZE*4] = tmp10 - tmp11; z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */ dataptr[DCTSIZE*2] = tmp13 + z1; /* phase 5 */ dataptr[DCTSIZE*6] = tmp13 - z1; /* Odd part */ tmp10 = tmp4 + tmp5; /* phase 2 */ tmp11 = tmp5 + tmp6; tmp12 = tmp6 + tmp7; /* The rotator is modified from fig 4-8 to avoid extra negations. */ z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ z4 = MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */ z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ z11 = tmp7 + z3; /* phase 5 */ z13 = tmp7 - z3; dataptr[DCTSIZE*5] = z13 + z2; /* phase 6 */ dataptr[DCTSIZE*3] = z13 - z2; dataptr[DCTSIZE*1] = z11 + z4; dataptr[DCTSIZE*7] = z11 - z4; dataptr++; /* advance pointer to next column */ } } /* * Perform the forward 2-4-8 DCT on one block of samples. */ GLOBAL(void) fdct_ifast248 (DCTELEM * data) { int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast16_t tmp10, tmp11, tmp12, tmp13; int_fast16_t z1; DCTELEM *dataptr; int ctr; row_fdct(data); /* Pass 2: process columns. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1]; tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; /* Even part */ tmp10 = tmp0 + tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; tmp13 = tmp0 - tmp3; dataptr[DCTSIZE*0] = tmp10 + tmp11; dataptr[DCTSIZE*4] = tmp10 - tmp11; z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); dataptr[DCTSIZE*2] = tmp13 + z1; dataptr[DCTSIZE*6] = tmp13 - z1; tmp10 = tmp4 + tmp7; tmp11 = tmp5 + tmp6; tmp12 = tmp5 - tmp6; tmp13 = tmp4 - tmp7; dataptr[DCTSIZE*1] = tmp10 + tmp11; dataptr[DCTSIZE*5] = tmp10 - tmp11; z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); dataptr[DCTSIZE*3] = tmp13 + z1; dataptr[DCTSIZE*7] = tmp13 - z1; dataptr++; /* advance pointer to next column */ } } #undef GLOBAL #undef CONST_BITS #undef DESCALE #undef FIX_0_541196100 #undef FIX_1_306562965
123linslouis-android-video-cutter
jni/libavcodec/jfdctfst.c
C
asf20
11,339
/* * Kega Game Video (KGV1) decoder * Copyright (c) 2010 Daniel Verkamp * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Kega Game Video decoder */ #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct { AVCodecContext *avctx; AVFrame pic; uint16_t *prev, *cur; } KgvContext; static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + avpkt->size; KgvContext * const c = avctx->priv_data; int offsets[7]; uint16_t *out, *prev; int outcnt = 0, maxcnt; int w, h, i; if (avpkt->size < 2) return -1; w = (buf[0] + 1) * 8; h = (buf[1] + 1) * 8; buf += 2; if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); maxcnt = w * h; out = av_realloc(c->cur, w * h * 2); if (!out) return -1; c->cur = out; prev = av_realloc(c->prev, w * h * 2); if (!prev) return -1; c->prev = prev; for (i = 0; i < 7; i++) offsets[i] = -1; while (outcnt < maxcnt && buf_end - 2 > buf) { int code = AV_RL16(buf); buf += 2; if (!(code & 0x8000)) { out[outcnt++] = code; // rgb555 pixel coded directly } else { int count; uint16_t *inp; if ((code & 0x6000) == 0x6000) { // copy from previous frame int oidx = (code >> 10) & 7; int start; count = (code & 0x3FF) + 3; if (offsets[oidx] < 0) { if (buf_end - 3 < buf) break; offsets[oidx] = AV_RL24(buf); buf += 3; } start = (outcnt + offsets[oidx]) % maxcnt; if (maxcnt - start < count) break; inp = prev + start; } else { // copy from earlier in this frame int offset = (code & 0x1FFF) + 1; if (!(code & 0x6000)) { count = 2; } else if ((code & 0x6000) == 0x2000) { count = 3; } else { if (buf_end - 1 < buf) break; count = 4 + *buf++; } if (outcnt < offset) break; inp = out + outcnt - offset; } if (maxcnt - outcnt < count) break; for (i = 0; i < count; i++) out[outcnt++] = inp[i]; } } if (outcnt - maxcnt) av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt); c->pic.data[0] = (uint8_t *)c->cur; c->pic.linesize[0] = w * 2; *data_size = sizeof(AVFrame); *(AVFrame*)data = c->pic; FFSWAP(uint16_t *, c->cur, c->prev); return avpkt->size; } static av_cold int decode_init(AVCodecContext *avctx) { KgvContext * const c = avctx->priv_data; c->avctx = avctx; avctx->pix_fmt = PIX_FMT_RGB555; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { KgvContext * const c = avctx->priv_data; av_freep(&c->cur); av_freep(&c->prev); return 0; } AVCodec kgv1_decoder = { "kgv1", AVMEDIA_TYPE_VIDEO, CODEC_ID_KGV1, sizeof(KgvContext), decode_init, NULL, decode_end, decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"), };
123linslouis-android-video-cutter
jni/libavcodec/kgv1dec.c
C
asf20
4,378
/* * LZW decoder * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief LZW decoding routines * @author Fabrice Bellard * Modified for use in TIFF by Konstantin Shishkov */ #ifndef AVCODEC_LZW_H #define AVCODEC_LZW_H #include <stdint.h> struct PutBitContext; enum FF_LZW_MODES{ FF_LZW_GIF, FF_LZW_TIFF }; /* clients should not know what LZWState is */ typedef void LZWState; /* first two functions de/allocate memory for LZWState */ void ff_lzw_decode_open(LZWState **p); void ff_lzw_decode_close(LZWState **p); int ff_lzw_decode_init(LZWState *s, int csize, const uint8_t *buf, int buf_size, int mode); int ff_lzw_decode(LZWState *s, uint8_t *buf, int len); const uint8_t* ff_lzw_cur_ptr(LZWState *lzw); void ff_lzw_decode_tail(LZWState *lzw); /** LZW encode state */ struct LZWEncodeState; extern const int ff_lzw_encode_state_size; void ff_lzw_encode_init(struct LZWEncodeState *s, uint8_t *outbuf, int outsize, int maxbits, enum FF_LZW_MODES mode, void (*lzw_put_bits)(struct PutBitContext *, int, unsigned int)); int ff_lzw_encode(struct LZWEncodeState * s, const uint8_t * inbuf, int insize); int ff_lzw_encode_flush(struct LZWEncodeState *s, void (*lzw_flush_put_bits)(struct PutBitContext *)); #endif /* AVCODEC_LZW_H */
123linslouis-android-video-cutter
jni/libavcodec/lzw.h
C
asf20
2,134
/* * H.26L/H.264/AVC/JVT/14496-10/... cabac decoding * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 part10 cabac decoding. * @author Michael Niedermayer <michaelni@gmx.at> */ #define CABAC 1 #include "internal.h" #include "dsputil.h" #include "avcodec.h" #include "h264.h" #include "h264data.h" #include "h264_mvpred.h" #include "golomb.h" #include "cabac.h" #if ARCH_X86 #include "x86/h264_i386.h" #endif //#undef NDEBUG #include <assert.h> /* Cabac pre state table */ static const int8_t cabac_context_init_I[460][2] = { /* 0 - 10 */ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 }, { 2, 54 }, { 3, 74 }, { -28,127 }, { -23, 104 }, { -6, 53 }, { -1, 54 }, { 7, 51 }, /* 11 - 23 unsused for I */ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, /* 24- 39 */ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, /* 40 - 53 */ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, /* 54 - 59 */ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, /* 60 - 69 */ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 }, { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 }, { 13, 41 }, { 3, 62 }, /* 70 -> 87 */ { 0, 11 }, { 1, 55 }, { 0, 69 }, { -17, 127 }, { -13, 102 },{ 0, 82 }, { -7, 74 }, { -21, 107 }, { -27, 127 },{ -31, 127 },{ -24, 127 }, { -18, 95 }, { -27, 127 },{ -21, 114 },{ -30, 127 }, { -17, 123 }, { -12, 115 },{ -16, 122 }, /* 88 -> 104 */ { -11, 115 },{ -12, 63 }, { -2, 68 }, { -15, 84 }, { -13, 104 },{ -3, 70 }, { -8, 93 }, { -10, 90 }, { -30, 127 },{ -1, 74 }, { -6, 97 }, { -7, 91 }, { -20, 127 },{ -4, 56 }, { -5, 82 }, { -7, 76 }, { -22, 125 }, /* 105 -> 135 */ { -7, 93 }, { -11, 87 }, { -3, 77 }, { -5, 71 }, { -4, 63 }, { -4, 68 }, { -12, 84 }, { -7, 62 }, { -7, 65 }, { 8, 61 }, { 5, 56 }, { -2, 66 }, { 1, 64 }, { 0, 61 }, { -2, 78 }, { 1, 50 }, { 7, 52 }, { 10, 35 }, { 0, 44 }, { 11, 38 }, { 1, 45 }, { 0, 46 }, { 5, 44 }, { 31, 17 }, { 1, 51 }, { 7, 50 }, { 28, 19 }, { 16, 33 }, { 14, 62 }, { -13, 108 },{ -15, 100 }, /* 136 -> 165 */ { -13, 101 },{ -13, 91 }, { -12, 94 }, { -10, 88 }, { -16, 84 }, { -10, 86 }, { -7, 83 }, { -13, 87 }, { -19, 94 }, { 1, 70 }, { 0, 72 }, { -5, 74 }, { 18, 59 }, { -8, 102 }, { -15, 100 }, { 0, 95 }, { -4, 75 }, { 2, 72 }, { -11, 75 }, { -3, 71 }, { 15, 46 }, { -13, 69 }, { 0, 62 }, { 0, 65 }, { 21, 37 }, { -15, 72 }, { 9, 57 }, { 16, 54 }, { 0, 62 }, { 12, 72 }, /* 166 -> 196 */ { 24, 0 }, { 15, 9 }, { 8, 25 }, { 13, 18 }, { 15, 9 }, { 13, 19 }, { 10, 37 }, { 12, 18 }, { 6, 29 }, { 20, 33 }, { 15, 30 }, { 4, 45 }, { 1, 58 }, { 0, 62 }, { 7, 61 }, { 12, 38 }, { 11, 45 }, { 15, 39 }, { 11, 42 }, { 13, 44 }, { 16, 45 }, { 12, 41 }, { 10, 49 }, { 30, 34 }, { 18, 42 }, { 10, 55 }, { 17, 51 }, { 17, 46 }, { 0, 89 }, { 26, -19 }, { 22, -17 }, /* 197 -> 226 */ { 26, -17 }, { 30, -25 }, { 28, -20 }, { 33, -23 }, { 37, -27 }, { 33, -23 }, { 40, -28 }, { 38, -17 }, { 33, -11 }, { 40, -15 }, { 41, -6 }, { 38, 1 }, { 41, 17 }, { 30, -6 }, { 27, 3 }, { 26, 22 }, { 37, -16 }, { 35, -4 }, { 38, -8 }, { 38, -3 }, { 37, 3 }, { 38, 5 }, { 42, 0 }, { 35, 16 }, { 39, 22 }, { 14, 48 }, { 27, 37 }, { 21, 60 }, { 12, 68 }, { 2, 97 }, /* 227 -> 251 */ { -3, 71 }, { -6, 42 }, { -5, 50 }, { -3, 54 }, { -2, 62 }, { 0, 58 }, { 1, 63 }, { -2, 72 }, { -1, 74 }, { -9, 91 }, { -5, 67 }, { -5, 27 }, { -3, 39 }, { -2, 44 }, { 0, 46 }, { -16, 64 }, { -8, 68 }, { -10, 78 }, { -6, 77 }, { -10, 86 }, { -12, 92 }, { -15, 55 }, { -10, 60 }, { -6, 62 }, { -4, 65 }, /* 252 -> 275 */ { -12, 73 }, { -8, 76 }, { -7, 80 }, { -9, 88 }, { -17, 110 },{ -11, 97 }, { -20, 84 }, { -11, 79 }, { -6, 73 }, { -4, 74 }, { -13, 86 }, { -13, 96 }, { -11, 97 }, { -19, 117 },{ -8, 78 }, { -5, 33 }, { -4, 48 }, { -2, 53 }, { -3, 62 }, { -13, 71 }, { -10, 79 }, { -12, 86 }, { -13, 90 }, { -14, 97 }, /* 276 a bit special (not used, bypass is used instead) */ { 0, 0 }, /* 277 -> 307 */ { -6, 93 }, { -6, 84 }, { -8, 79 }, { 0, 66 }, { -1, 71 }, { 0, 62 }, { -2, 60 }, { -2, 59 }, { -5, 75 }, { -3, 62 }, { -4, 58 }, { -9, 66 }, { -1, 79 }, { 0, 71 }, { 3, 68 }, { 10, 44 }, { -7, 62 }, { 15, 36 }, { 14, 40 }, { 16, 27 }, { 12, 29 }, { 1, 44 }, { 20, 36 }, { 18, 32 }, { 5, 42 }, { 1, 48 }, { 10, 62 }, { 17, 46 }, { 9, 64 }, { -12, 104 },{ -11, 97 }, /* 308 -> 337 */ { -16, 96 }, { -7, 88 }, { -8, 85 }, { -7, 85 }, { -9, 85 }, { -13, 88 }, { 4, 66 }, { -3, 77 }, { -3, 76 }, { -6, 76 }, { 10, 58 }, { -1, 76 }, { -1, 83 }, { -7, 99 }, { -14, 95 }, { 2, 95 }, { 0, 76 }, { -5, 74 }, { 0, 70 }, { -11, 75 }, { 1, 68 }, { 0, 65 }, { -14, 73 }, { 3, 62 }, { 4, 62 }, { -1, 68 }, { -13, 75 }, { 11, 55 }, { 5, 64 }, { 12, 70 }, /* 338 -> 368 */ { 15, 6 }, { 6, 19 }, { 7, 16 }, { 12, 14 }, { 18, 13 }, { 13, 11 }, { 13, 15 }, { 15, 16 }, { 12, 23 }, { 13, 23 }, { 15, 20 }, { 14, 26 }, { 14, 44 }, { 17, 40 }, { 17, 47 }, { 24, 17 }, { 21, 21 }, { 25, 22 }, { 31, 27 }, { 22, 29 }, { 19, 35 }, { 14, 50 }, { 10, 57 }, { 7, 63 }, { -2, 77 }, { -4, 82 }, { -3, 94 }, { 9, 69 }, { -12, 109 },{ 36, -35 }, { 36, -34 }, /* 369 -> 398 */ { 32, -26 }, { 37, -30 }, { 44, -32 }, { 34, -18 }, { 34, -15 }, { 40, -15 }, { 33, -7 }, { 35, -5 }, { 33, 0 }, { 38, 2 }, { 33, 13 }, { 23, 35 }, { 13, 58 }, { 29, -3 }, { 26, 0 }, { 22, 30 }, { 31, -7 }, { 35, -15 }, { 34, -3 }, { 34, 3 }, { 36, -1 }, { 34, 5 }, { 32, 11 }, { 35, 5 }, { 34, 12 }, { 39, 11 }, { 30, 29 }, { 34, 26 }, { 29, 39 }, { 19, 66 }, /* 399 -> 435 */ { 31, 21 }, { 31, 31 }, { 25, 50 }, { -17, 120 }, { -20, 112 }, { -18, 114 }, { -11, 85 }, { -15, 92 }, { -14, 89 }, { -26, 71 }, { -15, 81 }, { -14, 80 }, { 0, 68 }, { -14, 70 }, { -24, 56 }, { -23, 68 }, { -24, 50 }, { -11, 74 }, { 23, -13 }, { 26, -13 }, { 40, -15 }, { 49, -14 }, { 44, 3 }, { 45, 6 }, { 44, 34 }, { 33, 54 }, { 19, 82 }, { -3, 75 }, { -1, 23 }, { 1, 34 }, { 1, 43 }, { 0, 54 }, { -2, 55 }, { 0, 61 }, { 1, 64 }, { 0, 68 }, { -9, 92 }, /* 436 -> 459 */ { -14, 106 }, { -13, 97 }, { -15, 90 }, { -12, 90 }, { -18, 88 }, { -10, 73 }, { -9, 79 }, { -14, 86 }, { -10, 73 }, { -10, 70 }, { -10, 69 }, { -5, 66 }, { -9, 64 }, { -5, 58 }, { 2, 59 }, { 21, -10 }, { 24, -11 }, { 28, -8 }, { 28, -1 }, { 29, 3 }, { 29, 9 }, { 35, 20 }, { 29, 36 }, { 14, 67 } }; static const int8_t cabac_context_init_PB[3][460][2] = { /* i_cabac_init_idc == 0 */ { /* 0 - 10 */ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 }, { 2, 54 }, { 3, 74 }, { -28, 127 }, { -23, 104 }, { -6, 53 }, { -1, 54 }, { 7, 51 }, /* 11 - 23 */ { 23, 33 }, { 23, 2 }, { 21, 0 }, { 1, 9 }, { 0, 49 }, { -37, 118 }, { 5, 57 }, { -13, 78 }, { -11, 65 }, { 1, 62 }, { 12, 49 }, { -4, 73 }, { 17, 50 }, /* 24 - 39 */ { 18, 64 }, { 9, 43 }, { 29, 0 }, { 26, 67 }, { 16, 90 }, { 9, 104 }, { -46, 127 }, { -20, 104 }, { 1, 67 }, { -13, 78 }, { -11, 65 }, { 1, 62 }, { -6, 86 }, { -17, 95 }, { -6, 61 }, { 9, 45 }, /* 40 - 53 */ { -3, 69 }, { -6, 81 }, { -11, 96 }, { 6, 55 }, { 7, 67 }, { -5, 86 }, { 2, 88 }, { 0, 58 }, { -3, 76 }, { -10, 94 }, { 5, 54 }, { 4, 69 }, { -3, 81 }, { 0, 88 }, /* 54 - 59 */ { -7, 67 }, { -5, 74 }, { -4, 74 }, { -5, 80 }, { -7, 72 }, { 1, 58 }, /* 60 - 69 */ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 }, { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 }, { 13, 41 }, { 3, 62 }, /* 70 - 87 */ { 0, 45 }, { -4, 78 }, { -3, 96 }, { -27, 126 }, { -28, 98 }, { -25, 101 }, { -23, 67 }, { -28, 82 }, { -20, 94 }, { -16, 83 }, { -22, 110 }, { -21, 91 }, { -18, 102 }, { -13, 93 }, { -29, 127 }, { -7, 92 }, { -5, 89 }, { -7, 96 }, { -13, 108 }, { -3, 46 }, { -1, 65 }, { -1, 57 }, { -9, 93 }, { -3, 74 }, { -9, 92 }, { -8, 87 }, { -23, 126 }, { 5, 54 }, { 6, 60 }, { 6, 59 }, { 6, 69 }, { -1, 48 }, { 0, 68 }, { -4, 69 }, { -8, 88 }, /* 105 -> 165 */ { -2, 85 }, { -6, 78 }, { -1, 75 }, { -7, 77 }, { 2, 54 }, { 5, 50 }, { -3, 68 }, { 1, 50 }, { 6, 42 }, { -4, 81 }, { 1, 63 }, { -4, 70 }, { 0, 67 }, { 2, 57 }, { -2, 76 }, { 11, 35 }, { 4, 64 }, { 1, 61 }, { 11, 35 }, { 18, 25 }, { 12, 24 }, { 13, 29 }, { 13, 36 }, { -10, 93 }, { -7, 73 }, { -2, 73 }, { 13, 46 }, { 9, 49 }, { -7, 100 }, { 9, 53 }, { 2, 53 }, { 5, 53 }, { -2, 61 }, { 0, 56 }, { 0, 56 }, { -13, 63 }, { -5, 60 }, { -1, 62 }, { 4, 57 }, { -6, 69 }, { 4, 57 }, { 14, 39 }, { 4, 51 }, { 13, 68 }, { 3, 64 }, { 1, 61 }, { 9, 63 }, { 7, 50 }, { 16, 39 }, { 5, 44 }, { 4, 52 }, { 11, 48 }, { -5, 60 }, { -1, 59 }, { 0, 59 }, { 22, 33 }, { 5, 44 }, { 14, 43 }, { -1, 78 }, { 0, 60 }, { 9, 69 }, /* 166 - 226 */ { 11, 28 }, { 2, 40 }, { 3, 44 }, { 0, 49 }, { 0, 46 }, { 2, 44 }, { 2, 51 }, { 0, 47 }, { 4, 39 }, { 2, 62 }, { 6, 46 }, { 0, 54 }, { 3, 54 }, { 2, 58 }, { 4, 63 }, { 6, 51 }, { 6, 57 }, { 7, 53 }, { 6, 52 }, { 6, 55 }, { 11, 45 }, { 14, 36 }, { 8, 53 }, { -1, 82 }, { 7, 55 }, { -3, 78 }, { 15, 46 }, { 22, 31 }, { -1, 84 }, { 25, 7 }, { 30, -7 }, { 28, 3 }, { 28, 4 }, { 32, 0 }, { 34, -1 }, { 30, 6 }, { 30, 6 }, { 32, 9 }, { 31, 19 }, { 26, 27 }, { 26, 30 }, { 37, 20 }, { 28, 34 }, { 17, 70 }, { 1, 67 }, { 5, 59 }, { 9, 67 }, { 16, 30 }, { 18, 32 }, { 18, 35 }, { 22, 29 }, { 24, 31 }, { 23, 38 }, { 18, 43 }, { 20, 41 }, { 11, 63 }, { 9, 59 }, { 9, 64 }, { -1, 94 }, { -2, 89 }, { -9, 108 }, /* 227 - 275 */ { -6, 76 }, { -2, 44 }, { 0, 45 }, { 0, 52 }, { -3, 64 }, { -2, 59 }, { -4, 70 }, { -4, 75 }, { -8, 82 }, { -17, 102 }, { -9, 77 }, { 3, 24 }, { 0, 42 }, { 0, 48 }, { 0, 55 }, { -6, 59 }, { -7, 71 }, { -12, 83 }, { -11, 87 }, { -30, 119 }, { 1, 58 }, { -3, 29 }, { -1, 36 }, { 1, 38 }, { 2, 43 }, { -6, 55 }, { 0, 58 }, { 0, 64 }, { -3, 74 }, { -10, 90 }, { 0, 70 }, { -4, 29 }, { 5, 31 }, { 7, 42 }, { 1, 59 }, { -2, 58 }, { -3, 72 }, { -3, 81 }, { -11, 97 }, { 0, 58 }, { 8, 5 }, { 10, 14 }, { 14, 18 }, { 13, 27 }, { 2, 40 }, { 0, 58 }, { -3, 70 }, { -6, 79 }, { -8, 85 }, /* 276 a bit special (not used, bypass is used instead) */ { 0, 0 }, /* 277 - 337 */ { -13, 106 }, { -16, 106 }, { -10, 87 }, { -21, 114 }, { -18, 110 }, { -14, 98 }, { -22, 110 }, { -21, 106 }, { -18, 103 }, { -21, 107 }, { -23, 108 }, { -26, 112 }, { -10, 96 }, { -12, 95 }, { -5, 91 }, { -9, 93 }, { -22, 94 }, { -5, 86 }, { 9, 67 }, { -4, 80 }, { -10, 85 }, { -1, 70 }, { 7, 60 }, { 9, 58 }, { 5, 61 }, { 12, 50 }, { 15, 50 }, { 18, 49 }, { 17, 54 }, { 10, 41 }, { 7, 46 }, { -1, 51 }, { 7, 49 }, { 8, 52 }, { 9, 41 }, { 6, 47 }, { 2, 55 }, { 13, 41 }, { 10, 44 }, { 6, 50 }, { 5, 53 }, { 13, 49 }, { 4, 63 }, { 6, 64 }, { -2, 69 }, { -2, 59 }, { 6, 70 }, { 10, 44 }, { 9, 31 }, { 12, 43 }, { 3, 53 }, { 14, 34 }, { 10, 38 }, { -3, 52 }, { 13, 40 }, { 17, 32 }, { 7, 44 }, { 7, 38 }, { 13, 50 }, { 10, 57 }, { 26, 43 }, /* 338 - 398 */ { 14, 11 }, { 11, 14 }, { 9, 11 }, { 18, 11 }, { 21, 9 }, { 23, -2 }, { 32, -15 }, { 32, -15 }, { 34, -21 }, { 39, -23 }, { 42, -33 }, { 41, -31 }, { 46, -28 }, { 38, -12 }, { 21, 29 }, { 45, -24 }, { 53, -45 }, { 48, -26 }, { 65, -43 }, { 43, -19 }, { 39, -10 }, { 30, 9 }, { 18, 26 }, { 20, 27 }, { 0, 57 }, { -14, 82 }, { -5, 75 }, { -19, 97 }, { -35, 125 }, { 27, 0 }, { 28, 0 }, { 31, -4 }, { 27, 6 }, { 34, 8 }, { 30, 10 }, { 24, 22 }, { 33, 19 }, { 22, 32 }, { 26, 31 }, { 21, 41 }, { 26, 44 }, { 23, 47 }, { 16, 65 }, { 14, 71 }, { 8, 60 }, { 6, 63 }, { 17, 65 }, { 21, 24 }, { 23, 20 }, { 26, 23 }, { 27, 32 }, { 28, 23 }, { 28, 24 }, { 23, 40 }, { 24, 32 }, { 28, 29 }, { 23, 42 }, { 19, 57 }, { 22, 53 }, { 22, 61 }, { 11, 86 }, /* 399 - 435 */ { 12, 40 }, { 11, 51 }, { 14, 59 }, { -4, 79 }, { -7, 71 }, { -5, 69 }, { -9, 70 }, { -8, 66 }, { -10, 68 }, { -19, 73 }, { -12, 69 }, { -16, 70 }, { -15, 67 }, { -20, 62 }, { -19, 70 }, { -16, 66 }, { -22, 65 }, { -20, 63 }, { 9, -2 }, { 26, -9 }, { 33, -9 }, { 39, -7 }, { 41, -2 }, { 45, 3 }, { 49, 9 }, { 45, 27 }, { 36, 59 }, { -6, 66 }, { -7, 35 }, { -7, 42 }, { -8, 45 }, { -5, 48 }, { -12, 56 }, { -6, 60 }, { -5, 62 }, { -8, 66 }, { -8, 76 }, /* 436 - 459 */ { -5, 85 }, { -6, 81 }, { -10, 77 }, { -7, 81 }, { -17, 80 }, { -18, 73 }, { -4, 74 }, { -10, 83 }, { -9, 71 }, { -9, 67 }, { -1, 61 }, { -8, 66 }, { -14, 66 }, { 0, 59 }, { 2, 59 }, { 21, -13 }, { 33, -14 }, { 39, -7 }, { 46, -2 }, { 51, 2 }, { 60, 6 }, { 61, 17 }, { 55, 34 }, { 42, 62 }, }, /* i_cabac_init_idc == 1 */ { /* 0 - 10 */ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 }, { 2, 54 }, { 3, 74 }, { -28, 127 }, { -23, 104 }, { -6, 53 }, { -1, 54 }, { 7, 51 }, /* 11 - 23 */ { 22, 25 }, { 34, 0 }, { 16, 0 }, { -2, 9 }, { 4, 41 }, { -29, 118 }, { 2, 65 }, { -6, 71 }, { -13, 79 }, { 5, 52 }, { 9, 50 }, { -3, 70 }, { 10, 54 }, /* 24 - 39 */ { 26, 34 }, { 19, 22 }, { 40, 0 }, { 57, 2 }, { 41, 36 }, { 26, 69 }, { -45, 127 }, { -15, 101 }, { -4, 76 }, { -6, 71 }, { -13, 79 }, { 5, 52 }, { 6, 69 }, { -13, 90 }, { 0, 52 }, { 8, 43 }, /* 40 - 53 */ { -2, 69 },{ -5, 82 },{ -10, 96 },{ 2, 59 }, { 2, 75 },{ -3, 87 },{ -3, 100 },{ 1, 56 }, { -3, 74 },{ -6, 85 },{ 0, 59 },{ -3, 81 }, { -7, 86 },{ -5, 95 }, /* 54 - 59 */ { -1, 66 },{ -1, 77 },{ 1, 70 },{ -2, 86 }, { -5, 72 },{ 0, 61 }, /* 60 - 69 */ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 }, { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 }, { 13, 41 }, { 3, 62 }, /* 70 - 104 */ { 13, 15 }, { 7, 51 }, { 2, 80 }, { -39, 127 }, { -18, 91 }, { -17, 96 }, { -26, 81 }, { -35, 98 }, { -24, 102 }, { -23, 97 }, { -27, 119 }, { -24, 99 }, { -21, 110 }, { -18, 102 }, { -36, 127 }, { 0, 80 }, { -5, 89 }, { -7, 94 }, { -4, 92 }, { 0, 39 }, { 0, 65 }, { -15, 84 }, { -35, 127 }, { -2, 73 }, { -12, 104 }, { -9, 91 }, { -31, 127 }, { 3, 55 }, { 7, 56 }, { 7, 55 }, { 8, 61 }, { -3, 53 }, { 0, 68 }, { -7, 74 }, { -9, 88 }, /* 105 -> 165 */ { -13, 103 }, { -13, 91 }, { -9, 89 }, { -14, 92 }, { -8, 76 }, { -12, 87 }, { -23, 110 }, { -24, 105 }, { -10, 78 }, { -20, 112 }, { -17, 99 }, { -78, 127 }, { -70, 127 }, { -50, 127 }, { -46, 127 }, { -4, 66 }, { -5, 78 }, { -4, 71 }, { -8, 72 }, { 2, 59 }, { -1, 55 }, { -7, 70 }, { -6, 75 }, { -8, 89 }, { -34, 119 }, { -3, 75 }, { 32, 20 }, { 30, 22 }, { -44, 127 }, { 0, 54 }, { -5, 61 }, { 0, 58 }, { -1, 60 }, { -3, 61 }, { -8, 67 }, { -25, 84 }, { -14, 74 }, { -5, 65 }, { 5, 52 }, { 2, 57 }, { 0, 61 }, { -9, 69 }, { -11, 70 }, { 18, 55 }, { -4, 71 }, { 0, 58 }, { 7, 61 }, { 9, 41 }, { 18, 25 }, { 9, 32 }, { 5, 43 }, { 9, 47 }, { 0, 44 }, { 0, 51 }, { 2, 46 }, { 19, 38 }, { -4, 66 }, { 15, 38 }, { 12, 42 }, { 9, 34 }, { 0, 89 }, /* 166 - 226 */ { 4, 45 }, { 10, 28 }, { 10, 31 }, { 33, -11 }, { 52, -43 }, { 18, 15 }, { 28, 0 }, { 35, -22 }, { 38, -25 }, { 34, 0 }, { 39, -18 }, { 32, -12 }, { 102, -94 }, { 0, 0 }, { 56, -15 }, { 33, -4 }, { 29, 10 }, { 37, -5 }, { 51, -29 }, { 39, -9 }, { 52, -34 }, { 69, -58 }, { 67, -63 }, { 44, -5 }, { 32, 7 }, { 55, -29 }, { 32, 1 }, { 0, 0 }, { 27, 36 }, { 33, -25 }, { 34, -30 }, { 36, -28 }, { 38, -28 }, { 38, -27 }, { 34, -18 }, { 35, -16 }, { 34, -14 }, { 32, -8 }, { 37, -6 }, { 35, 0 }, { 30, 10 }, { 28, 18 }, { 26, 25 }, { 29, 41 }, { 0, 75 }, { 2, 72 }, { 8, 77 }, { 14, 35 }, { 18, 31 }, { 17, 35 }, { 21, 30 }, { 17, 45 }, { 20, 42 }, { 18, 45 }, { 27, 26 }, { 16, 54 }, { 7, 66 }, { 16, 56 }, { 11, 73 }, { 10, 67 }, { -10, 116 }, /* 227 - 275 */ { -23, 112 }, { -15, 71 }, { -7, 61 }, { 0, 53 }, { -5, 66 }, { -11, 77 }, { -9, 80 }, { -9, 84 }, { -10, 87 }, { -34, 127 }, { -21, 101 }, { -3, 39 }, { -5, 53 }, { -7, 61 }, { -11, 75 }, { -15, 77 }, { -17, 91 }, { -25, 107 }, { -25, 111 }, { -28, 122 }, { -11, 76 }, { -10, 44 }, { -10, 52 }, { -10, 57 }, { -9, 58 }, { -16, 72 }, { -7, 69 }, { -4, 69 }, { -5, 74 }, { -9, 86 }, { 2, 66 }, { -9, 34 }, { 1, 32 }, { 11, 31 }, { 5, 52 }, { -2, 55 }, { -2, 67 }, { 0, 73 }, { -8, 89 }, { 3, 52 }, { 7, 4 }, { 10, 8 }, { 17, 8 }, { 16, 19 }, { 3, 37 }, { -1, 61 }, { -5, 73 }, { -1, 70 }, { -4, 78 }, /* 276 a bit special (not used, bypass is used instead) */ { 0, 0 }, /* 277 - 337 */ { -21, 126 }, { -23, 124 }, { -20, 110 }, { -26, 126 }, { -25, 124 }, { -17, 105 }, { -27, 121 }, { -27, 117 }, { -17, 102 }, { -26, 117 }, { -27, 116 }, { -33, 122 }, { -10, 95 }, { -14, 100 }, { -8, 95 }, { -17, 111 }, { -28, 114 }, { -6, 89 }, { -2, 80 }, { -4, 82 }, { -9, 85 }, { -8, 81 }, { -1, 72 }, { 5, 64 }, { 1, 67 }, { 9, 56 }, { 0, 69 }, { 1, 69 }, { 7, 69 }, { -7, 69 }, { -6, 67 }, { -16, 77 }, { -2, 64 }, { 2, 61 }, { -6, 67 }, { -3, 64 }, { 2, 57 }, { -3, 65 }, { -3, 66 }, { 0, 62 }, { 9, 51 }, { -1, 66 }, { -2, 71 }, { -2, 75 }, { -1, 70 }, { -9, 72 }, { 14, 60 }, { 16, 37 }, { 0, 47 }, { 18, 35 }, { 11, 37 }, { 12, 41 }, { 10, 41 }, { 2, 48 }, { 12, 41 }, { 13, 41 }, { 0, 59 }, { 3, 50 }, { 19, 40 }, { 3, 66 }, { 18, 50 }, /* 338 - 398 */ { 19, -6 }, { 18, -6 }, { 14, 0 }, { 26, -12 }, { 31, -16 }, { 33, -25 }, { 33, -22 }, { 37, -28 }, { 39, -30 }, { 42, -30 }, { 47, -42 }, { 45, -36 }, { 49, -34 }, { 41, -17 }, { 32, 9 }, { 69, -71 }, { 63, -63 }, { 66, -64 }, { 77, -74 }, { 54, -39 }, { 52, -35 }, { 41, -10 }, { 36, 0 }, { 40, -1 }, { 30, 14 }, { 28, 26 }, { 23, 37 }, { 12, 55 }, { 11, 65 }, { 37, -33 }, { 39, -36 }, { 40, -37 }, { 38, -30 }, { 46, -33 }, { 42, -30 }, { 40, -24 }, { 49, -29 }, { 38, -12 }, { 40, -10 }, { 38, -3 }, { 46, -5 }, { 31, 20 }, { 29, 30 }, { 25, 44 }, { 12, 48 }, { 11, 49 }, { 26, 45 }, { 22, 22 }, { 23, 22 }, { 27, 21 }, { 33, 20 }, { 26, 28 }, { 30, 24 }, { 27, 34 }, { 18, 42 }, { 25, 39 }, { 18, 50 }, { 12, 70 }, { 21, 54 }, { 14, 71 }, { 11, 83 }, /* 399 - 435 */ { 25, 32 }, { 21, 49 }, { 21, 54 }, { -5, 85 }, { -6, 81 }, { -10, 77 }, { -7, 81 }, { -17, 80 }, { -18, 73 }, { -4, 74 }, { -10, 83 }, { -9, 71 }, { -9, 67 }, { -1, 61 }, { -8, 66 }, { -14, 66 }, { 0, 59 }, { 2, 59 }, { 17, -10 }, { 32, -13 }, { 42, -9 }, { 49, -5 }, { 53, 0 }, { 64, 3 }, { 68, 10 }, { 66, 27 }, { 47, 57 }, { -5, 71 }, { 0, 24 }, { -1, 36 }, { -2, 42 }, { -2, 52 }, { -9, 57 }, { -6, 63 }, { -4, 65 }, { -4, 67 }, { -7, 82 }, /* 436 - 459 */ { -3, 81 }, { -3, 76 }, { -7, 72 }, { -6, 78 }, { -12, 72 }, { -14, 68 }, { -3, 70 }, { -6, 76 }, { -5, 66 }, { -5, 62 }, { 0, 57 }, { -4, 61 }, { -9, 60 }, { 1, 54 }, { 2, 58 }, { 17, -10 }, { 32, -13 }, { 42, -9 }, { 49, -5 }, { 53, 0 }, { 64, 3 }, { 68, 10 }, { 66, 27 }, { 47, 57 }, }, /* i_cabac_init_idc == 2 */ { /* 0 - 10 */ { 20, -15 }, { 2, 54 }, { 3, 74 }, { 20, -15 }, { 2, 54 }, { 3, 74 }, { -28, 127 }, { -23, 104 }, { -6, 53 }, { -1, 54 }, { 7, 51 }, /* 11 - 23 */ { 29, 16 }, { 25, 0 }, { 14, 0 }, { -10, 51 }, { -3, 62 }, { -27, 99 }, { 26, 16 }, { -4, 85 }, { -24, 102 }, { 5, 57 }, { 6, 57 }, { -17, 73 }, { 14, 57 }, /* 24 - 39 */ { 20, 40 }, { 20, 10 }, { 29, 0 }, { 54, 0 }, { 37, 42 }, { 12, 97 }, { -32, 127 }, { -22, 117 }, { -2, 74 }, { -4, 85 }, { -24, 102 }, { 5, 57 }, { -6, 93 }, { -14, 88 }, { -6, 44 }, { 4, 55 }, /* 40 - 53 */ { -11, 89 },{ -15, 103 },{ -21, 116 },{ 19, 57 }, { 20, 58 },{ 4, 84 },{ 6, 96 },{ 1, 63 }, { -5, 85 },{ -13, 106 },{ 5, 63 },{ 6, 75 }, { -3, 90 },{ -1, 101 }, /* 54 - 59 */ { 3, 55 },{ -4, 79 },{ -2, 75 },{ -12, 97 }, { -7, 50 },{ 1, 60 }, /* 60 - 69 */ { 0, 41 }, { 0, 63 }, { 0, 63 }, { 0, 63 }, { -9, 83 }, { 4, 86 }, { 0, 97 }, { -7, 72 }, { 13, 41 }, { 3, 62 }, /* 70 - 104 */ { 7, 34 }, { -9, 88 }, { -20, 127 }, { -36, 127 }, { -17, 91 }, { -14, 95 }, { -25, 84 }, { -25, 86 }, { -12, 89 }, { -17, 91 }, { -31, 127 }, { -14, 76 }, { -18, 103 }, { -13, 90 }, { -37, 127 }, { 11, 80 }, { 5, 76 }, { 2, 84 }, { 5, 78 }, { -6, 55 }, { 4, 61 }, { -14, 83 }, { -37, 127 }, { -5, 79 }, { -11, 104 }, { -11, 91 }, { -30, 127 }, { 0, 65 }, { -2, 79 }, { 0, 72 }, { -4, 92 }, { -6, 56 }, { 3, 68 }, { -8, 71 }, { -13, 98 }, /* 105 -> 165 */ { -4, 86 }, { -12, 88 }, { -5, 82 }, { -3, 72 }, { -4, 67 }, { -8, 72 }, { -16, 89 }, { -9, 69 }, { -1, 59 }, { 5, 66 }, { 4, 57 }, { -4, 71 }, { -2, 71 }, { 2, 58 }, { -1, 74 }, { -4, 44 }, { -1, 69 }, { 0, 62 }, { -7, 51 }, { -4, 47 }, { -6, 42 }, { -3, 41 }, { -6, 53 }, { 8, 76 }, { -9, 78 }, { -11, 83 }, { 9, 52 }, { 0, 67 }, { -5, 90 }, { 1, 67 }, { -15, 72 }, { -5, 75 }, { -8, 80 }, { -21, 83 }, { -21, 64 }, { -13, 31 }, { -25, 64 }, { -29, 94 }, { 9, 75 }, { 17, 63 }, { -8, 74 }, { -5, 35 }, { -2, 27 }, { 13, 91 }, { 3, 65 }, { -7, 69 }, { 8, 77 }, { -10, 66 }, { 3, 62 }, { -3, 68 }, { -20, 81 }, { 0, 30 }, { 1, 7 }, { -3, 23 }, { -21, 74 }, { 16, 66 }, { -23, 124 }, { 17, 37 }, { 44, -18 }, { 50, -34 }, { -22, 127 }, /* 166 - 226 */ { 4, 39 }, { 0, 42 }, { 7, 34 }, { 11, 29 }, { 8, 31 }, { 6, 37 }, { 7, 42 }, { 3, 40 }, { 8, 33 }, { 13, 43 }, { 13, 36 }, { 4, 47 }, { 3, 55 }, { 2, 58 }, { 6, 60 }, { 8, 44 }, { 11, 44 }, { 14, 42 }, { 7, 48 }, { 4, 56 }, { 4, 52 }, { 13, 37 }, { 9, 49 }, { 19, 58 }, { 10, 48 }, { 12, 45 }, { 0, 69 }, { 20, 33 }, { 8, 63 }, { 35, -18 }, { 33, -25 }, { 28, -3 }, { 24, 10 }, { 27, 0 }, { 34, -14 }, { 52, -44 }, { 39, -24 }, { 19, 17 }, { 31, 25 }, { 36, 29 }, { 24, 33 }, { 34, 15 }, { 30, 20 }, { 22, 73 }, { 20, 34 }, { 19, 31 }, { 27, 44 }, { 19, 16 }, { 15, 36 }, { 15, 36 }, { 21, 28 }, { 25, 21 }, { 30, 20 }, { 31, 12 }, { 27, 16 }, { 24, 42 }, { 0, 93 }, { 14, 56 }, { 15, 57 }, { 26, 38 }, { -24, 127 }, /* 227 - 275 */ { -24, 115 }, { -22, 82 }, { -9, 62 }, { 0, 53 }, { 0, 59 }, { -14, 85 }, { -13, 89 }, { -13, 94 }, { -11, 92 }, { -29, 127 }, { -21, 100 }, { -14, 57 }, { -12, 67 }, { -11, 71 }, { -10, 77 }, { -21, 85 }, { -16, 88 }, { -23, 104 }, { -15, 98 }, { -37, 127 }, { -10, 82 }, { -8, 48 }, { -8, 61 }, { -8, 66 }, { -7, 70 }, { -14, 75 }, { -10, 79 }, { -9, 83 }, { -12, 92 }, { -18, 108 }, { -4, 79 }, { -22, 69 }, { -16, 75 }, { -2, 58 }, { 1, 58 }, { -13, 78 }, { -9, 83 }, { -4, 81 }, { -13, 99 }, { -13, 81 }, { -6, 38 }, { -13, 62 }, { -6, 58 }, { -2, 59 }, { -16, 73 }, { -10, 76 }, { -13, 86 }, { -9, 83 }, { -10, 87 }, /* 276 a bit special (not used, bypass is used instead) */ { 0, 0 }, /* 277 - 337 */ { -22, 127 }, { -25, 127 }, { -25, 120 }, { -27, 127 }, { -19, 114 }, { -23, 117 }, { -25, 118 }, { -26, 117 }, { -24, 113 }, { -28, 118 }, { -31, 120 }, { -37, 124 }, { -10, 94 }, { -15, 102 }, { -10, 99 }, { -13, 106 }, { -50, 127 }, { -5, 92 }, { 17, 57 }, { -5, 86 }, { -13, 94 }, { -12, 91 }, { -2, 77 }, { 0, 71 }, { -1, 73 }, { 4, 64 }, { -7, 81 }, { 5, 64 }, { 15, 57 }, { 1, 67 }, { 0, 68 }, { -10, 67 }, { 1, 68 }, { 0, 77 }, { 2, 64 }, { 0, 68 }, { -5, 78 }, { 7, 55 }, { 5, 59 }, { 2, 65 }, { 14, 54 }, { 15, 44 }, { 5, 60 }, { 2, 70 }, { -2, 76 }, { -18, 86 }, { 12, 70 }, { 5, 64 }, { -12, 70 }, { 11, 55 }, { 5, 56 }, { 0, 69 }, { 2, 65 }, { -6, 74 }, { 5, 54 }, { 7, 54 }, { -6, 76 }, { -11, 82 }, { -2, 77 }, { -2, 77 }, { 25, 42 }, /* 338 - 398 */ { 17, -13 }, { 16, -9 }, { 17, -12 }, { 27, -21 }, { 37, -30 }, { 41, -40 }, { 42, -41 }, { 48, -47 }, { 39, -32 }, { 46, -40 }, { 52, -51 }, { 46, -41 }, { 52, -39 }, { 43, -19 }, { 32, 11 }, { 61, -55 }, { 56, -46 }, { 62, -50 }, { 81, -67 }, { 45, -20 }, { 35, -2 }, { 28, 15 }, { 34, 1 }, { 39, 1 }, { 30, 17 }, { 20, 38 }, { 18, 45 }, { 15, 54 }, { 0, 79 }, { 36, -16 }, { 37, -14 }, { 37, -17 }, { 32, 1 }, { 34, 15 }, { 29, 15 }, { 24, 25 }, { 34, 22 }, { 31, 16 }, { 35, 18 }, { 31, 28 }, { 33, 41 }, { 36, 28 }, { 27, 47 }, { 21, 62 }, { 18, 31 }, { 19, 26 }, { 36, 24 }, { 24, 23 }, { 27, 16 }, { 24, 30 }, { 31, 29 }, { 22, 41 }, { 22, 42 }, { 16, 60 }, { 15, 52 }, { 14, 60 }, { 3, 78 }, { -16, 123 }, { 21, 53 }, { 22, 56 }, { 25, 61 }, /* 399 - 435 */ { 21, 33 }, { 19, 50 }, { 17, 61 }, { -3, 78 }, { -8, 74 }, { -9, 72 }, { -10, 72 }, { -18, 75 }, { -12, 71 }, { -11, 63 }, { -5, 70 }, { -17, 75 }, { -14, 72 }, { -16, 67 }, { -8, 53 }, { -14, 59 }, { -9, 52 }, { -11, 68 }, { 9, -2 }, { 30, -10 }, { 31, -4 }, { 33, -1 }, { 33, 7 }, { 31, 12 }, { 37, 23 }, { 31, 38 }, { 20, 64 }, { -9, 71 }, { -7, 37 }, { -8, 44 }, { -11, 49 }, { -10, 56 }, { -12, 59 }, { -8, 63 }, { -9, 67 }, { -6, 68 }, { -10, 79 }, /* 436 - 459 */ { -3, 78 }, { -8, 74 }, { -9, 72 }, { -10, 72 }, { -18, 75 }, { -12, 71 }, { -11, 63 }, { -5, 70 }, { -17, 75 }, { -14, 72 }, { -16, 67 }, { -8, 53 }, { -14, 59 }, { -9, 52 }, { -11, 68 }, { 9, -2 }, { 30, -10 }, { 31, -4 }, { 33, -1 }, { 33, 7 }, { 31, 12 }, { 37, 23 }, { 31, 38 }, { 20, 64 }, } }; void ff_h264_init_cabac_states(H264Context *h) { MpegEncContext * const s = &h->s; int i; const int8_t (*tab)[2]; if( h->slice_type_nos == FF_I_TYPE ) tab = cabac_context_init_I; else tab = cabac_context_init_PB[h->cabac_init_idc]; /* calculate pre-state */ for( i= 0; i < 460; i++ ) { int pre = 2*(((tab[i][0] * s->qscale) >>4 ) + tab[i][1]) - 127; pre^= pre>>31; if(pre > 124) pre= 124 + (pre&1); h->cabac_state[i] = pre; } } static int decode_cabac_field_decoding_flag(H264Context *h) { MpegEncContext * const s = &h->s; const long mbb_xy = h->mb_xy - 2L*s->mb_stride; unsigned long ctx = 0; ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num); ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num); return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] ); } static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_slice) { uint8_t *state= &h->cabac_state[ctx_base]; int mb_type; if(intra_slice){ int ctx=0; if( h->left_type[0] & (MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)) ctx++; if( h->top_type & (MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)) ctx++; if( get_cabac_noinline( &h->cabac, &state[ctx] ) == 0 ) return 0; /* I4x4 */ state += 2; }else{ if( get_cabac_noinline( &h->cabac, state ) == 0 ) return 0; /* I4x4 */ } if( get_cabac_terminate( &h->cabac ) ) return 25; /* PCM */ mb_type = 1; /* I16x16 */ mb_type += 12 * get_cabac_noinline( &h->cabac, &state[1] ); /* cbp_luma != 0 */ if( get_cabac_noinline( &h->cabac, &state[2] ) ) /* cbp_chroma */ mb_type += 4 + 4 * get_cabac_noinline( &h->cabac, &state[2+intra_slice] ); mb_type += 2 * get_cabac_noinline( &h->cabac, &state[3+intra_slice] ); mb_type += 1 * get_cabac_noinline( &h->cabac, &state[3+2*intra_slice] ); return mb_type; } static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) { MpegEncContext * const s = &h->s; int mba_xy, mbb_xy; int ctx = 0; if(FRAME_MBAFF){ //FIXME merge with the stuff in fill_caches? int mb_xy = mb_x + (mb_y&~1)*s->mb_stride; mba_xy = mb_xy - 1; if( (mb_y&1) && h->slice_table[mba_xy] == h->slice_num && MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) ) mba_xy += s->mb_stride; if( MB_FIELD ){ mbb_xy = mb_xy - s->mb_stride; if( !(mb_y&1) && h->slice_table[mbb_xy] == h->slice_num && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) ) mbb_xy -= s->mb_stride; }else mbb_xy = mb_x + (mb_y-1)*s->mb_stride; }else{ int mb_xy = h->mb_xy; mba_xy = mb_xy - 1; mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE); } if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] )) ctx++; if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] )) ctx++; if( h->slice_type_nos == FF_B_TYPE ) ctx += 13; return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] ); } static int decode_cabac_mb_intra4x4_pred_mode( H264Context *h, int pred_mode ) { int mode = 0; if( get_cabac( &h->cabac, &h->cabac_state[68] ) ) return pred_mode; mode += 1 * get_cabac( &h->cabac, &h->cabac_state[69] ); mode += 2 * get_cabac( &h->cabac, &h->cabac_state[69] ); mode += 4 * get_cabac( &h->cabac, &h->cabac_state[69] ); return mode + ( mode >= pred_mode ); } static int decode_cabac_mb_chroma_pre_mode( H264Context *h) { const int mba_xy = h->left_mb_xy[0]; const int mbb_xy = h->top_mb_xy; int ctx = 0; /* No need to test for IS_INTRA4x4 and IS_INTRA16x16, as we set chroma_pred_mode_table to 0 */ if( h->left_type[0] && h->chroma_pred_mode_table[mba_xy] != 0 ) ctx++; if( h->top_type && h->chroma_pred_mode_table[mbb_xy] != 0 ) ctx++; if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+ctx] ) == 0 ) return 0; if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 ) return 1; if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 ) return 2; else return 3; } static int decode_cabac_mb_cbp_luma( H264Context *h) { int cbp_b, cbp_a, ctx, cbp = 0; cbp_a = h->left_cbp; cbp_b = h->top_cbp; ctx = !(cbp_a & 0x02) + 2 * !(cbp_b & 0x04); cbp += get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]); ctx = !(cbp & 0x01) + 2 * !(cbp_b & 0x08); cbp += get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 1; ctx = !(cbp_a & 0x08) + 2 * !(cbp & 0x01); cbp += get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 2; ctx = !(cbp & 0x04) + 2 * !(cbp & 0x02); cbp += get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 3; return cbp; } static int decode_cabac_mb_cbp_chroma( H264Context *h) { int ctx; int cbp_a, cbp_b; cbp_a = (h->left_cbp>>4)&0x03; cbp_b = (h-> top_cbp>>4)&0x03; ctx = 0; if( cbp_a > 0 ) ctx++; if( cbp_b > 0 ) ctx += 2; if( get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 ) return 0; ctx = 4; if( cbp_a == 2 ) ctx++; if( cbp_b == 2 ) ctx += 2; return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] ); } static int decode_cabac_p_mb_sub_type( H264Context *h ) { if( get_cabac( &h->cabac, &h->cabac_state[21] ) ) return 0; /* 8x8 */ if( !get_cabac( &h->cabac, &h->cabac_state[22] ) ) return 1; /* 8x4 */ if( get_cabac( &h->cabac, &h->cabac_state[23] ) ) return 2; /* 4x8 */ return 3; /* 4x4 */ } static int decode_cabac_b_mb_sub_type( H264Context *h ) { int type; if( !get_cabac( &h->cabac, &h->cabac_state[36] ) ) return 0; /* B_Direct_8x8 */ if( !get_cabac( &h->cabac, &h->cabac_state[37] ) ) return 1 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L0_8x8, B_L1_8x8 */ type = 3; if( get_cabac( &h->cabac, &h->cabac_state[38] ) ) { if( get_cabac( &h->cabac, &h->cabac_state[39] ) ) return 11 + get_cabac( &h->cabac, &h->cabac_state[39] ); /* B_L1_4x4, B_Bi_4x4 */ type += 4; } type += 2*get_cabac( &h->cabac, &h->cabac_state[39] ); type += get_cabac( &h->cabac, &h->cabac_state[39] ); return type; } static int decode_cabac_mb_ref( H264Context *h, int list, int n ) { int refa = h->ref_cache[list][scan8[n] - 1]; int refb = h->ref_cache[list][scan8[n] - 8]; int ref = 0; int ctx = 0; if( h->slice_type_nos == FF_B_TYPE) { if( refa > 0 && !(h->direct_cache[scan8[n] - 1]&(MB_TYPE_DIRECT2>>1)) ) ctx++; if( refb > 0 && !(h->direct_cache[scan8[n] - 8]&(MB_TYPE_DIRECT2>>1)) ) ctx += 2; } else { if( refa > 0 ) ctx++; if( refb > 0 ) ctx += 2; } while( get_cabac( &h->cabac, &h->cabac_state[54+ctx] ) ) { ref++; ctx = (ctx>>2)+4; if(ref >= 32 /*h->ref_list[list]*/){ return -1; } } return ref; } static int decode_cabac_mb_mvd( H264Context *h, int ctxbase, int amvd, int *mvda) { int mvd; if(!get_cabac(&h->cabac, &h->cabac_state[ctxbase+((amvd-3)>>(INT_BIT-1))+((amvd-33)>>(INT_BIT-1))+2])){ // if(!get_cabac(&h->cabac, &h->cabac_state[ctxbase+(amvd>2)+(amvd>32)])){ *mvda= 0; return 0; } mvd= 1; ctxbase+= 3; while( mvd < 9 && get_cabac( &h->cabac, &h->cabac_state[ctxbase] ) ) { if( mvd < 4 ) ctxbase++; mvd++; } if( mvd >= 9 ) { int k = 3; while( get_cabac_bypass( &h->cabac ) ) { mvd += 1 << k; k++; if(k>24){ av_log(h->s.avctx, AV_LOG_ERROR, "overflow in decode_cabac_mb_mvd\n"); return INT_MIN; } } while( k-- ) { mvd += get_cabac_bypass( &h->cabac )<<k; } *mvda=mvd < 70 ? mvd : 70; }else *mvda=mvd; return get_cabac_bypass_sign( &h->cabac, -mvd ); } #define DECODE_CABAC_MB_MVD( h, list, n )\ {\ int amvd0 = h->mvd_cache[list][scan8[n] - 1][0] +\ h->mvd_cache[list][scan8[n] - 8][0];\ int amvd1 = h->mvd_cache[list][scan8[n] - 1][1] +\ h->mvd_cache[list][scan8[n] - 8][1];\ \ mx += decode_cabac_mb_mvd( h, 40, amvd0, &mpx );\ my += decode_cabac_mb_mvd( h, 47, amvd1, &mpy );\ } static av_always_inline int get_cabac_cbf_ctx( H264Context *h, int cat, int idx, int is_dc ) { int nza, nzb; int ctx = 0; if( is_dc ) { if( cat == 0 ) { nza = h->left_cbp&0x100; nzb = h-> top_cbp&0x100; } else { nza = (h->left_cbp>>(6+idx))&0x01; nzb = (h-> top_cbp>>(6+idx))&0x01; } } else { assert(cat == 1 || cat == 2 || cat == 4); nza = h->non_zero_count_cache[scan8[idx] - 1]; nzb = h->non_zero_count_cache[scan8[idx] - 8]; } if( nza > 0 ) ctx++; if( nzb > 0 ) ctx += 2; return ctx + 4 * cat; } DECLARE_ASM_CONST(1, uint8_t, last_coeff_flag_offset_8x8)[63] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8 }; static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff, int is_dc ) { static const int significant_coeff_flag_offset[2][6] = { { 105+0, 105+15, 105+29, 105+44, 105+47, 402 }, { 277+0, 277+15, 277+29, 277+44, 277+47, 436 } }; static const int last_coeff_flag_offset[2][6] = { { 166+0, 166+15, 166+29, 166+44, 166+47, 417 }, { 338+0, 338+15, 338+29, 338+44, 338+47, 451 } }; static const int coeff_abs_level_m1_offset[6] = { 227+0, 227+10, 227+20, 227+30, 227+39, 426 }; static const uint8_t significant_coeff_flag_offset_8x8[2][63] = { { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5, 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7, 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11, 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 }, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5, 6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9, 9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 } }; /* node ctx: 0..3: abslevel1 (with abslevelgt1 == 0). * 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter). * map node ctx => cabac ctx for level=1 */ static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 }; /* map node ctx => cabac ctx for level>1 */ static const uint8_t coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 }; static const uint8_t coeff_abs_level_transition[2][8] = { /* update node ctx after decoding a level=1 */ { 1, 2, 3, 3, 4, 5, 6, 7 }, /* update node ctx after decoding a level>1 */ { 4, 4, 4, 4, 5, 6, 7, 7 } }; int index[64]; int av_unused last; int coeff_count = 0; int node_ctx = 0; uint8_t *significant_coeff_ctx_base; uint8_t *last_coeff_ctx_base; uint8_t *abs_level_m1_ctx_base; #if !ARCH_X86 #define CABAC_ON_STACK #endif #ifdef CABAC_ON_STACK #define CC &cc CABACContext cc; cc.range = h->cabac.range; cc.low = h->cabac.low; cc.bytestream= h->cabac.bytestream; #else #define CC &h->cabac #endif /* cat: 0-> DC 16x16 n = 0 * 1-> AC 16x16 n = luma4x4idx * 2-> Luma4x4 n = luma4x4idx * 3-> DC Chroma n = iCbCr * 4-> AC Chroma n = 16 + 4 * iCbCr + chroma4x4idx * 5-> Luma8x8 n = 4 * luma8x8idx */ /* read coded block flag */ if( is_dc || cat != 5 ) { if( get_cabac( CC, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n, is_dc ) ] ) == 0 ) { if( !is_dc ) h->non_zero_count_cache[scan8[n]] = 0; #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif return; } } significant_coeff_ctx_base = h->cabac_state + significant_coeff_flag_offset[MB_FIELD][cat]; last_coeff_ctx_base = h->cabac_state + last_coeff_flag_offset[MB_FIELD][cat]; abs_level_m1_ctx_base = h->cabac_state + coeff_abs_level_m1_offset[cat]; if( !is_dc && cat == 5 ) { #define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) \ for(last= 0; last < coefs; last++) { \ uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; \ if( get_cabac( CC, sig_ctx )) { \ uint8_t *last_ctx = last_coeff_ctx_base + last_off; \ index[coeff_count++] = last; \ if( get_cabac( CC, last_ctx ) ) { \ last= max_coeff; \ break; \ } \ } \ }\ if( last == max_coeff -1 ) {\ index[coeff_count++] = last;\ } const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD]; #if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, sig_off); } else { coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index); #else DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] ); } else { DECODE_SIGNIFICANCE( max_coeff - 1, last, last ); #endif } assert(coeff_count > 0); if( is_dc ) { if( cat == 0 ) h->cbp_table[h->mb_xy] |= 0x100; else h->cbp_table[h->mb_xy] |= 0x40 << n; } else { if( cat == 5 ) fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1); else { assert( cat == 1 || cat == 2 || cat == 4 ); h->non_zero_count_cache[scan8[n]] = coeff_count; } } do { uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base; int j= scantable[index[--coeff_count]]; if( get_cabac( CC, ctx ) == 0 ) { node_ctx = coeff_abs_level_transition[0][node_ctx]; if( is_dc ) { block[j] = get_cabac_bypass_sign( CC, -1); }else{ block[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6; } } else { int coeff_abs = 2; ctx = coeff_abs_levelgt1_ctx[node_ctx] + abs_level_m1_ctx_base; node_ctx = coeff_abs_level_transition[1][node_ctx]; while( coeff_abs < 15 && get_cabac( CC, ctx ) ) { coeff_abs++; } if( coeff_abs >= 15 ) { int j = 0; while( get_cabac_bypass( CC ) ) { j++; } coeff_abs=1; while( j-- ) { coeff_abs += coeff_abs + get_cabac_bypass( CC ); } coeff_abs+= 14; } if( is_dc ) { block[j] = get_cabac_bypass_sign( CC, -coeff_abs ); }else{ block[j] = (get_cabac_bypass_sign( CC, -coeff_abs ) * qmul[j] + 32) >> 6; } } } while( coeff_count ); #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif } static void decode_cabac_residual_dc( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, int max_coeff ) { decode_cabac_residual_internal(h, block, cat, n, scantable, NULL, max_coeff, 1); } static void decode_cabac_residual_nondc( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff ) { decode_cabac_residual_internal(h, block, cat, n, scantable, qmul, max_coeff, 0); } /** * decodes a macroblock * @return 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed */ int ff_h264_decode_mb_cabac(H264Context *h) { MpegEncContext * const s = &h->s; int mb_xy; int mb_type, partition_count, cbp = 0; int dct8x8_allowed= h->pps.transform_8x8_mode; mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); if( h->slice_type_nos != FF_I_TYPE ) { int skip; /* a skipped mb needs the aff flag from the following mb */ if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped ) skip = h->next_mb_skipped; else skip = decode_cabac_mb_skip( h, s->mb_x, s->mb_y ); /* read skip flags */ if( skip ) { if( FRAME_MBAFF && (s->mb_y&1)==0 ){ s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP; h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 ); if(!h->next_mb_skipped) h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); } decode_mb_skip(h); h->cbp_table[mb_xy] = 0; h->chroma_pred_mode_table[mb_xy] = 0; h->last_qscale_diff = 0; return 0; } } if(FRAME_MBAFF){ if( (s->mb_y&1) == 0 ) h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); } h->prev_mb_skipped = 0; fill_decode_neighbors(h, -(MB_FIELD)); if( h->slice_type_nos == FF_B_TYPE ) { int ctx = 0; assert(h->slice_type_nos == FF_B_TYPE); if( !IS_DIRECT( h->left_type[0]-1 ) ) ctx++; if( !IS_DIRECT( h->top_type-1 ) ) ctx++; if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+ctx] ) ){ mb_type= 0; /* B_Direct_16x16 */ }else if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+3] ) ) { mb_type= 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */ }else{ int bits; bits = get_cabac_noinline( &h->cabac, &h->cabac_state[27+4] ) << 3; bits+= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 2; bits+= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 1; bits+= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ); if( bits < 8 ){ mb_type= bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */ }else if( bits == 13 ){ mb_type= decode_cabac_intra_mb_type(h, 32, 0); goto decode_intra_mb; }else if( bits == 14 ){ mb_type= 11; /* B_L1_L0_8x16 */ }else if( bits == 15 ){ mb_type= 22; /* B_8x8 */ }else{ bits= ( bits<<1 ) + get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ); mb_type= bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */ } } partition_count= b_mb_type_info[mb_type].partition_count; mb_type= b_mb_type_info[mb_type].type; } else if( h->slice_type_nos == FF_P_TYPE ) { if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) { /* P-type */ if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) { /* P_L0_D16x16, P_8x8 */ mb_type= 3 * get_cabac_noinline( &h->cabac, &h->cabac_state[16] ); } else { /* P_L0_D8x16, P_L0_D16x8 */ mb_type= 2 - get_cabac_noinline( &h->cabac, &h->cabac_state[17] ); } partition_count= p_mb_type_info[mb_type].partition_count; mb_type= p_mb_type_info[mb_type].type; } else { mb_type= decode_cabac_intra_mb_type(h, 17, 0); goto decode_intra_mb; } } else { mb_type= decode_cabac_intra_mb_type(h, 3, 1); if(h->slice_type == FF_SI_TYPE && mb_type) mb_type--; assert(h->slice_type_nos == FF_I_TYPE); decode_intra_mb: partition_count = 0; cbp= i_mb_type_info[mb_type].cbp; h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode; mb_type= i_mb_type_info[mb_type].type; } if(MB_FIELD) mb_type |= MB_TYPE_INTERLACED; h->slice_table[ mb_xy ]= h->slice_num; if(IS_INTRA_PCM(mb_type)) { const uint8_t *ptr; // We assume these blocks are very rare so we do not optimize it. // FIXME The two following lines get the bitstream position in the cabac // decode, I think it should be done by a function in cabac.h (or cabac.c). ptr= h->cabac.bytestream; if(h->cabac.low&0x1) ptr--; if(CABAC_BITS==16){ if(h->cabac.low&0x1FF) ptr--; } // The pixels are stored in the same order as levels in h->mb array. memcpy(h->mb, ptr, 256); ptr+=256; if(CHROMA){ memcpy(h->mb+128, ptr, 128); ptr+=128; } ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr); // All blocks are present h->cbp_table[mb_xy] = 0x1ef; h->chroma_pred_mode_table[mb_xy] = 0; // In deblocking, the quantizer is 0 s->current_picture.qscale_table[mb_xy]= 0; // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 32); s->current_picture.mb_type[mb_xy]= mb_type; h->last_qscale_diff = 0; return 0; } if(MB_MBAFF){ h->ref_count[0] <<= 1; h->ref_count[1] <<= 1; } fill_decode_caches(h, mb_type); if( IS_INTRA( mb_type ) ) { int i, pred_mode; if( IS_INTRA4x4( mb_type ) ) { if( dct8x8_allowed && get_cabac_noinline( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] ) ) { mb_type |= MB_TYPE_8x8DCT; for( i = 0; i < 16; i+=4 ) { int pred = pred_intra_mode( h, i ); int mode = decode_cabac_mb_intra4x4_pred_mode( h, pred ); fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 ); } } else { for( i = 0; i < 16; i++ ) { int pred = pred_intra_mode( h, i ); h->intra4x4_pred_mode_cache[ scan8[i] ] = decode_cabac_mb_intra4x4_pred_mode( h, pred ); //av_log( s->avctx, AV_LOG_ERROR, "i4x4 pred=%d mode=%d\n", pred, h->intra4x4_pred_mode_cache[ scan8[i] ] ); } } ff_h264_write_back_intra_pred_mode(h); if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1; } else { h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode ); if( h->intra16x16_pred_mode < 0 ) return -1; } if(CHROMA){ h->chroma_pred_mode_table[mb_xy] = pred_mode = decode_cabac_mb_chroma_pre_mode( h ); pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode ); if( pred_mode < 0 ) return -1; h->chroma_pred_mode= pred_mode; } } else if( partition_count == 4 ) { int i, j, sub_partition_count[4], list, ref[2][4]; if( h->slice_type_nos == FF_B_TYPE ) { for( i = 0; i < 4; i++ ) { h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h ); sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type; } if( IS_DIRECT(h->sub_mb_type[0] | h->sub_mb_type[1] | h->sub_mb_type[2] | h->sub_mb_type[3]) ) { ff_h264_pred_direct_motion(h, &mb_type); h->ref_cache[0][scan8[4]] = h->ref_cache[1][scan8[4]] = h->ref_cache[0][scan8[12]] = h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE; for( i = 0; i < 4; i++ ) fill_rectangle( &h->direct_cache[scan8[4*i]], 2, 2, 8, (h->sub_mb_type[i]>>1)&0xFF, 1 ); } } else { for( i = 0; i < 4; i++ ) { h->sub_mb_type[i] = decode_cabac_p_mb_sub_type( h ); sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type; } } for( list = 0; list < h->list_count; list++ ) { for( i = 0; i < 4; i++ ) { if(IS_DIRECT(h->sub_mb_type[i])) continue; if(IS_DIR(h->sub_mb_type[i], 0, list)){ if( h->ref_count[list] > 1 ){ ref[list][i] = decode_cabac_mb_ref( h, list, 4*i ); if(ref[list][i] >= (unsigned)h->ref_count[list]){ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref[list][i], h->ref_count[list]); return -1; } }else ref[list][i] = 0; } else { ref[list][i] = -1; } h->ref_cache[list][ scan8[4*i]+1 ]= h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i]; } } if(dct8x8_allowed) dct8x8_allowed = get_dct8x8_allowed(h); for(list=0; list<h->list_count; list++){ for(i=0; i<4; i++){ h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ]; if(IS_DIRECT(h->sub_mb_type[i])){ fill_rectangle(h->mvd_cache[list][scan8[4*i]], 2, 2, 8, 0, 2); continue; } if(IS_DIR(h->sub_mb_type[i], 0, list) && !IS_DIRECT(h->sub_mb_type[i])){ const int sub_mb_type= h->sub_mb_type[i]; const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1; for(j=0; j<sub_partition_count[i]; j++){ int mpx, mpy; int mx, my; const int index= 4*i + block_width*j; int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ]; uint8_t (* mvd_cache)[2]= &h->mvd_cache[list][ scan8[index] ]; pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my); DECODE_CABAC_MB_MVD( h, list, index) tprintf(s->avctx, "final mv:%d %d\n", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx; mv_cache[ 1 ][1]= mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my; mvd_cache[ 1 ][0]= mvd_cache[ 8 ][0]= mvd_cache[ 9 ][0]= mpx; mvd_cache[ 1 ][1]= mvd_cache[ 8 ][1]= mvd_cache[ 9 ][1]= mpy; }else if(IS_SUB_8X4(sub_mb_type)){ mv_cache[ 1 ][0]= mx; mv_cache[ 1 ][1]= my; mvd_cache[ 1 ][0]= mpx; mvd_cache[ 1 ][1]= mpy; }else if(IS_SUB_4X8(sub_mb_type)){ mv_cache[ 8 ][0]= mx; mv_cache[ 8 ][1]= my; mvd_cache[ 8 ][0]= mpx; mvd_cache[ 8 ][1]= mpy; } mv_cache[ 0 ][0]= mx; mv_cache[ 0 ][1]= my; mvd_cache[ 0 ][0]= mpx; mvd_cache[ 0 ][1]= mpy; } }else{ fill_rectangle(h->mv_cache [list][ scan8[4*i] ], 2, 2, 8, 0, 4); fill_rectangle(h->mvd_cache[list][ scan8[4*i] ], 2, 2, 8, 0, 2); } } } } else if( IS_DIRECT(mb_type) ) { ff_h264_pred_direct_motion(h, &mb_type); fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2); fill_rectangle(h->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2); dct8x8_allowed &= h->sps.direct_8x8_inference_flag; } else { int list, i; if(IS_16X16(mb_type)){ for(list=0; list<h->list_count; list++){ if(IS_DIR(mb_type, 0, list)){ int ref; if(h->ref_count[list] > 1){ ref= decode_cabac_mb_ref(h, list, 0); if(ref >= (unsigned)h->ref_count[list]){ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, h->ref_count[list]); return -1; } }else ref=0; fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, ref, 1); } } for(list=0; list<h->list_count; list++){ if(IS_DIR(mb_type, 0, list)){ int mx,my,mpx,mpy; pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my); DECODE_CABAC_MB_MVD( h, list, 0) tprintf(s->avctx, "final mv:%d %d\n", mx, my); fill_rectangle(h->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack8to16(mpx,mpy), 2); fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); } } } else if(IS_16X8(mb_type)){ for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int ref; if(h->ref_count[list] > 1){ ref= decode_cabac_mb_ref( h, list, 8*i ); if(ref >= (unsigned)h->ref_count[list]){ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, h->ref_count[list]); return -1; } }else ref=0; fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, ref, 1); }else fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1); } } for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int mx,my,mpx,mpy; pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my); DECODE_CABAC_MB_MVD( h, list, 8*i) tprintf(s->avctx, "final mv:%d %d\n", mx, my); fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack8to16(mpx,mpy), 2); fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4); }else{ fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 2); fill_rectangle(h-> mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4); } } } }else{ assert(IS_8X16(mb_type)); for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ //FIXME optimize int ref; if(h->ref_count[list] > 1){ ref= decode_cabac_mb_ref( h, list, 4*i ); if(ref >= (unsigned)h->ref_count[list]){ av_log(s->avctx, AV_LOG_ERROR, "Reference %d >= %d\n", ref, h->ref_count[list]); return -1; } }else ref=0; fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, ref, 1); }else fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1); } } for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int mx,my,mpx,mpy; pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); DECODE_CABAC_MB_MVD( h, list, 4*i) tprintf(s->avctx, "final mv:%d %d\n", mx, my); fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack8to16(mpx,mpy), 2); fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4); }else{ fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 2); fill_rectangle(h-> mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4); } } } } } if( IS_INTER( mb_type ) ) { h->chroma_pred_mode_table[mb_xy] = 0; write_back_motion( h, mb_type ); } if( !IS_INTRA16x16( mb_type ) ) { cbp = decode_cabac_mb_cbp_luma( h ); if(CHROMA) cbp |= decode_cabac_mb_cbp_chroma( h ) << 4; } h->cbp_table[mb_xy] = h->cbp = cbp; if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) { mb_type |= MB_TYPE_8x8DCT * get_cabac_noinline( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] ); } s->current_picture.mb_type[mb_xy]= mb_type; if( cbp || IS_INTRA16x16( mb_type ) ) { const uint8_t *scan, *scan8x8, *dc_scan; const uint32_t *qmul; if(IS_INTERLACED(mb_type)){ scan8x8= s->qscale ? h->field_scan8x8 : h->field_scan8x8_q0; scan= s->qscale ? h->field_scan : h->field_scan_q0; dc_scan= luma_dc_field_scan; }else{ scan8x8= s->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0; scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0; dc_scan= luma_dc_zigzag_scan; } // decode_cabac_mb_dqp if(get_cabac_noinline( &h->cabac, &h->cabac_state[60 + (h->last_qscale_diff != 0)])){ int val = 1; int ctx= 2; while( get_cabac_noinline( &h->cabac, &h->cabac_state[60 + ctx] ) ) { ctx= 3; val++; if(val > 102){ //prevent infinite loop av_log(h->s.avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", s->mb_x, s->mb_y); return -1; } } if( val&0x01 ) val= (val + 1)>>1 ; else val= -((val + 1)>>1); h->last_qscale_diff = val; s->qscale += val; if(((unsigned)s->qscale) > 51){ if(s->qscale<0) s->qscale+= 52; else s->qscale-= 52; } h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); }else h->last_qscale_diff=0; if( IS_INTRA16x16( mb_type ) ) { int i; //av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 DC\n" ); decode_cabac_residual_dc( h, h->mb, 0, 0, dc_scan, 16); if( cbp&15 ) { qmul = h->dequant4_coeff[0][s->qscale]; for( i = 0; i < 16; i++ ) { //av_log( s->avctx, AV_LOG_ERROR, "INTRA16x16 AC:%d\n", i ); decode_cabac_residual_nondc(h, h->mb + 16*i, 1, i, scan + 1, qmul, 15); } } else { fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1); } } else { int i8x8, i4x4; for( i8x8 = 0; i8x8 < 4; i8x8++ ) { if( cbp & (1<<i8x8) ) { if( IS_8x8DCT(mb_type) ) { decode_cabac_residual_nondc(h, h->mb + 64*i8x8, 5, 4*i8x8, scan8x8, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 64); } else { qmul = h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale]; for( i4x4 = 0; i4x4 < 4; i4x4++ ) { const int index = 4*i8x8 + i4x4; //av_log( s->avctx, AV_LOG_ERROR, "Luma4x4: %d\n", index ); //START_TIMER decode_cabac_residual_nondc(h, h->mb + 16*index, 2, index, scan, qmul, 16); //STOP_TIMER("decode_residual") } } } else { uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ]; nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0; } } } if( cbp&0x30 ){ int c; for( c = 0; c < 2; c++ ) { //av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-DC\n",c ); decode_cabac_residual_dc(h, h->mb + 256 + 16*4*c, 3, c, chroma_dc_scan, 4); } } if( cbp&0x20 ) { int c, i; for( c = 0; c < 2; c++ ) { qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[c]]; for( i = 0; i < 4; i++ ) { const int index = 16 + 4 * c + i; //av_log( s->avctx, AV_LOG_ERROR, "INTRA C%d-AC %d\n",c, index - 16 ); decode_cabac_residual_nondc(h, h->mb + 16*index, 4, index, scan + 1, qmul, 15); } } } else { uint8_t * const nnz= &h->non_zero_count_cache[0]; nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] = nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0; } } else { uint8_t * const nnz= &h->non_zero_count_cache[0]; fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1); nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] = nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0; h->last_qscale_diff = 0; } s->current_picture.qscale_table[mb_xy]= s->qscale; write_back_non_zero_count(h); if(MB_MBAFF){ h->ref_count[0] >>= 1; h->ref_count[1] >>= 1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/h264_cabac.c
C
asf20
72,074
/* * TTA (The Lossless True Audio) decoder * Copyright (c) 2006 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * TTA (The Lossless True Audio) decoder * (www.true-audio.com or tta.corecodec.org) * @author Alex Beregszaszi * */ #define ALT_BITSTREAM_READER_LE //#define DEBUG #include <limits.h> #include "avcodec.h" #include "get_bits.h" #define FORMAT_INT 1 #define FORMAT_FLOAT 3 typedef struct TTAContext { AVCodecContext *avctx; GetBitContext gb; int flags, channels, bps, is_float, data_length; int frame_length, last_frame_length, total_frames; int32_t *decode_buffer; } TTAContext; #if 0 static inline int shift_1(int i) { if (i < 32) return 1 << i; else return 0x80000000; // 16 << 31 } static inline int shift_16(int i) { if (i < 28) return 16 << i; else return 0x80000000; // 16 << 27 } #else static const uint32_t shift_1[] = { 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010, 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200, 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000, 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000, 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000 }; static const uint32_t * const shift_16 = shift_1 + 4; #endif #define MAX_ORDER 16 typedef struct TTAFilter { int32_t shift, round, error, mode; int32_t qm[MAX_ORDER]; int32_t dx[MAX_ORDER]; int32_t dl[MAX_ORDER]; } TTAFilter; static const int32_t ttafilter_configs[4][2] = { {10, 1}, {9, 1}, {10, 1}, {12, 0} }; static void ttafilter_init(TTAFilter *c, int32_t shift, int32_t mode) { memset(c, 0, sizeof(TTAFilter)); c->shift = shift; c->round = shift_1[shift-1]; // c->round = 1 << (shift - 1); c->mode = mode; } // FIXME: copy paste from original static inline void memshl(register int32_t *a, register int32_t *b) { *a++ = *b++; *a++ = *b++; *a++ = *b++; *a++ = *b++; *a++ = *b++; *a++ = *b++; *a++ = *b++; *a = *b; } // FIXME: copy paste from original // mode=1 encoder, mode=0 decoder static inline void ttafilter_process(TTAFilter *c, int32_t *in, int32_t mode) { register int32_t *dl = c->dl, *qm = c->qm, *dx = c->dx, sum = c->round; if (!c->error) { sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; sum += *dl++ * *qm, qm++; dx += 8; } else if(c->error < 0) { sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; sum += *dl++ * (*qm -= *dx++), qm++; } else { sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; sum += *dl++ * (*qm += *dx++), qm++; } *(dx-0) = ((*(dl-1) >> 30) | 1) << 2; *(dx-1) = ((*(dl-2) >> 30) | 1) << 1; *(dx-2) = ((*(dl-3) >> 30) | 1) << 1; *(dx-3) = ((*(dl-4) >> 30) | 1); // compress if (mode) { *dl = *in; *in -= (sum >> c->shift); c->error = *in; } else { c->error = *in; *in += (sum >> c->shift); *dl = *in; } if (c->mode) { *(dl-1) = *dl - *(dl-1); *(dl-2) = *(dl-1) - *(dl-2); *(dl-3) = *(dl-2) - *(dl-3); } memshl(c->dl, c->dl + 1); memshl(c->dx, c->dx + 1); } typedef struct TTARice { uint32_t k0, k1, sum0, sum1; } TTARice; static void rice_init(TTARice *c, uint32_t k0, uint32_t k1) { c->k0 = k0; c->k1 = k1; c->sum0 = shift_16[k0]; c->sum1 = shift_16[k1]; } static int tta_get_unary(GetBitContext *gb) { int ret = 0; // count ones while(get_bits1(gb)) ret++; return ret; } static av_cold int tta_decode_init(AVCodecContext * avctx) { TTAContext *s = avctx->priv_data; int i; s->avctx = avctx; // 30bytes includes a seektable with one frame if (avctx->extradata_size < 30) return -1; init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size); if (show_bits_long(&s->gb, 32) == AV_RL32("TTA1")) { /* signature */ skip_bits(&s->gb, 32); // if (get_bits_long(&s->gb, 32) != bswap_32(AV_RL32("TTA1"))) { // av_log(s->avctx, AV_LOG_ERROR, "Missing magic\n"); // return -1; // } s->flags = get_bits(&s->gb, 16); if (s->flags != 1 && s->flags != 3) { av_log(s->avctx, AV_LOG_ERROR, "Invalid flags\n"); return -1; } s->is_float = (s->flags == FORMAT_FLOAT); avctx->channels = s->channels = get_bits(&s->gb, 16); avctx->bits_per_coded_sample = get_bits(&s->gb, 16); s->bps = (avctx->bits_per_coded_sample + 7) / 8; avctx->sample_rate = get_bits_long(&s->gb, 32); if(avctx->sample_rate > 1000000){ //prevent FRAME_TIME * avctx->sample_rate from overflowing and sanity check av_log(avctx, AV_LOG_ERROR, "sample_rate too large\n"); return -1; } s->data_length = get_bits_long(&s->gb, 32); skip_bits(&s->gb, 32); // CRC32 of header if (s->is_float) { avctx->sample_fmt = SAMPLE_FMT_FLT; av_log(s->avctx, AV_LOG_ERROR, "Unsupported sample format. Please contact the developers.\n"); return -1; } else switch(s->bps) { // case 1: avctx->sample_fmt = SAMPLE_FMT_U8; break; case 2: avctx->sample_fmt = SAMPLE_FMT_S16; break; // case 3: avctx->sample_fmt = SAMPLE_FMT_S24; break; case 4: avctx->sample_fmt = SAMPLE_FMT_S32; break; default: av_log(s->avctx, AV_LOG_ERROR, "Invalid/unsupported sample format. Please contact the developers.\n"); return -1; } // FIXME: horribly broken, but directly from reference source #define FRAME_TIME 1.04489795918367346939 s->frame_length = (int)(FRAME_TIME * avctx->sample_rate); s->last_frame_length = s->data_length % s->frame_length; s->total_frames = s->data_length / s->frame_length + (s->last_frame_length ? 1 : 0); av_log(s->avctx, AV_LOG_DEBUG, "flags: %x chans: %d bps: %d rate: %d block: %d\n", s->flags, avctx->channels, avctx->bits_per_coded_sample, avctx->sample_rate, avctx->block_align); av_log(s->avctx, AV_LOG_DEBUG, "data_length: %d frame_length: %d last: %d total: %d\n", s->data_length, s->frame_length, s->last_frame_length, s->total_frames); // FIXME: seek table for (i = 0; i < s->total_frames; i++) skip_bits(&s->gb, 32); skip_bits(&s->gb, 32); // CRC32 of seektable if(s->frame_length >= UINT_MAX / (s->channels * sizeof(int32_t))){ av_log(avctx, AV_LOG_ERROR, "frame_length too large\n"); return -1; } s->decode_buffer = av_mallocz(sizeof(int32_t)*s->frame_length*s->channels); } else { av_log(avctx, AV_LOG_ERROR, "Wrong extradata present\n"); return -1; } return 0; } static int tta_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TTAContext *s = avctx->priv_data; int i; init_get_bits(&s->gb, buf, buf_size*8); { int32_t predictors[s->channels]; TTAFilter filters[s->channels]; TTARice rices[s->channels]; int cur_chan = 0, framelen = s->frame_length; int32_t *p; if (*data_size < (framelen * s->channels * 2)) { av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n"); return -1; } // FIXME: seeking s->total_frames--; if (!s->total_frames && s->last_frame_length) framelen = s->last_frame_length; // init per channel states for (i = 0; i < s->channels; i++) { predictors[i] = 0; ttafilter_init(&(filters[i]), ttafilter_configs[s->bps-1][0], ttafilter_configs[s->bps-1][1]); rice_init(&(rices[i]), 10, 10); } for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) { int32_t *predictor = &(predictors[cur_chan]); TTAFilter *filter = &(filters[cur_chan]); TTARice *rice = &(rices[cur_chan]); uint32_t unary, depth, k; int32_t value; unary = tta_get_unary(&s->gb); if (unary == 0) { depth = 0; k = rice->k0; } else { depth = 1; k = rice->k1; unary--; } if (get_bits_left(&s->gb) < k) return -1; if (k) { if (k > MIN_CACHE_BITS) return -1; value = (unary << k) + get_bits(&s->gb, k); } else value = unary; // FIXME: copy paste from original switch (depth) { case 1: rice->sum1 += value - (rice->sum1 >> 4); if (rice->k1 > 0 && rice->sum1 < shift_16[rice->k1]) rice->k1--; else if(rice->sum1 > shift_16[rice->k1 + 1]) rice->k1++; value += shift_1[rice->k0]; default: rice->sum0 += value - (rice->sum0 >> 4); if (rice->k0 > 0 && rice->sum0 < shift_16[rice->k0]) rice->k0--; else if(rice->sum0 > shift_16[rice->k0 + 1]) rice->k0++; } // extract coded value #define UNFOLD(x) (((x)&1) ? (++(x)>>1) : (-(x)>>1)) *p = UNFOLD(value); // run hybrid filter ttafilter_process(filter, p, 0); // fixed order prediction #define PRED(x, k) (int32_t)((((uint64_t)x << k) - x) >> k) switch (s->bps) { case 1: *p += PRED(*predictor, 4); break; case 2: case 3: *p += PRED(*predictor, 5); break; case 4: *p += *predictor; break; } *predictor = *p; #if 0 // extract 32bit float from last two int samples if (s->is_float && ((p - data) & 1)) { uint32_t neg = *p & 0x80000000; uint32_t hi = *(p - 1); uint32_t lo = abs(*p) - 1; hi += (hi || lo) ? 0x3f80 : 0; // SWAP16: swap all the 16 bits *(p - 1) = (hi << 16) | SWAP16(lo) | neg; } #endif /*if ((get_bits_count(&s->gb)+7)/8 > buf_size) { av_log(NULL, AV_LOG_INFO, "overread!!\n"); break; }*/ // flip channels if (cur_chan < (s->channels-1)) cur_chan++; else { // decorrelate in case of stereo integer if (!s->is_float && (s->channels > 1)) { int32_t *r = p - 1; for (*p += *r / 2; r > p - s->channels; r--) *r = *(r + 1) - *r; } cur_chan = 0; } } if (get_bits_left(&s->gb) < 32) return -1; skip_bits(&s->gb, 32); // frame crc // convert to output buffer switch(s->bps) { case 2: { uint16_t *samples = data; for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) { // *samples++ = (unsigned char)*p; // *samples++ = (unsigned char)(*p >> 8); *samples++ = *p; } *data_size = (uint8_t *)samples - (uint8_t *)data; break; } default: av_log(s->avctx, AV_LOG_ERROR, "Error, only 16bit samples supported!\n"); } } // return get_bits_count(&s->gb)+7)/8; return buf_size; } static av_cold int tta_decode_close(AVCodecContext *avctx) { TTAContext *s = avctx->priv_data; if (s->decode_buffer) av_free(s->decode_buffer); return 0; } AVCodec tta_decoder = { "tta", AVMEDIA_TYPE_AUDIO, CODEC_ID_TTA, sizeof(TTAContext), tta_decode_init, NULL, tta_decode_close, tta_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"), };
123linslouis-android-video-cutter
jni/libavcodec/tta.c
C
asf20
14,044
/* * copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VORBIS_H #define AVCODEC_VORBIS_H #include "avcodec.h" extern const float ff_vorbis_floor1_inverse_db_table[256]; extern const float * const ff_vorbis_vwin[8]; extern const uint8_t ff_vorbis_channel_layout_offsets[8][8]; extern const int64_t ff_vorbis_channel_layouts[9]; typedef struct { uint_fast16_t x; uint_fast16_t sort; uint_fast16_t low; uint_fast16_t high; } vorbis_floor1_entry; void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values); unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n) int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, uint_fast32_t num); void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values, uint_fast16_t * y_list, int * flag, int multiplier, float * out, int samples); void vorbis_inverse_coupling(float *mag, float *ang, int blocksize); #define ilog(i) av_log2(2*(i)) #endif /* AVCODEC_VORBIS_H */
123linslouis-android-video-cutter
jni/libavcodec/vorbis.h
C
asf20
1,827
/* * Interface to xvidcore for mpeg4 encoding * Copyright (c) 2004 Adam Thayer <krevnik@comcast.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Interface to xvidcore for MPEG-4 compliant encoding. * @author Adam Thayer (krevnik@comcast.net) */ #include <xvid.h> #include <unistd.h> #include "avcodec.h" #include "libavutil/intreadwrite.h" #include "libxvid_internal.h" /** * Buffer management macros. */ #define BUFFER_SIZE 1024 #define BUFFER_REMAINING(x) (BUFFER_SIZE - strlen(x)) #define BUFFER_CAT(x) (&((x)[strlen(x)])) /* For PPC Use */ int has_altivec(void); /** * Structure for the private Xvid context. * This stores all the private context for the codec. */ struct xvid_context { void *encoder_handle; /** Handle for Xvid encoder */ int xsize, ysize; /** Frame size */ int vop_flags; /** VOP flags for Xvid encoder */ int vol_flags; /** VOL flags for Xvid encoder */ int me_flags; /** Motion Estimation flags */ int qscale; /** Do we use constant scale? */ int quicktime_format; /** Are we in a QT-based format? */ AVFrame encoded_picture; /** Encoded frame information */ char *twopassbuffer; /** Character buffer for two-pass */ char *old_twopassbuffer; /** Old character buffer (two-pass) */ char *twopassfile; /** second pass temp file name */ unsigned char *intra_matrix; /** P-Frame Quant Matrix */ unsigned char *inter_matrix; /** I-Frame Quant Matrix */ }; /** * Structure for the private first-pass plugin. */ struct xvid_ff_pass1 { int version; /** Xvid version */ struct xvid_context *context; /** Pointer to private context */ }; /* Prototypes - See function implementation for details */ int xvid_strip_vol_header(AVCodecContext *avctx, unsigned char *frame, unsigned int header_len, unsigned int frame_len); int xvid_ff_2pass(void *ref, int opt, void *p1, void *p2); void xvid_correct_framerate(AVCodecContext *avctx); /** * Creates the private context for the encoder. * All buffers are allocated, settings are loaded from the user, * and the encoder context created. * * @param avctx AVCodecContext pointer to context * @return Returns 0 on success, -1 on failure */ static av_cold int xvid_encode_init(AVCodecContext *avctx) { int xerr, i; int xvid_flags = avctx->flags; struct xvid_context *x = avctx->priv_data; uint16_t *intra, *inter; int fd; xvid_plugin_single_t single; struct xvid_ff_pass1 rc2pass1; xvid_plugin_2pass2_t rc2pass2; xvid_gbl_init_t xvid_gbl_init; xvid_enc_create_t xvid_enc_create; xvid_enc_plugin_t plugins[7]; /* Bring in VOP flags from ffmpeg command-line */ x->vop_flags = XVID_VOP_HALFPEL; /* Bare minimum quality */ if( xvid_flags & CODEC_FLAG_4MV ) x->vop_flags |= XVID_VOP_INTER4V; /* Level 3 */ if( avctx->trellis ) x->vop_flags |= XVID_VOP_TRELLISQUANT; /* Level 5 */ if( xvid_flags & CODEC_FLAG_AC_PRED ) x->vop_flags |= XVID_VOP_HQACPRED; /* Level 6 */ if( xvid_flags & CODEC_FLAG_GRAY ) x->vop_flags |= XVID_VOP_GREYSCALE; /* Decide which ME quality setting to use */ x->me_flags = 0; switch( avctx->me_method ) { case ME_FULL: /* Quality 6 */ x->me_flags |= XVID_ME_EXTSEARCH16 | XVID_ME_EXTSEARCH8; case ME_EPZS: /* Quality 4 */ x->me_flags |= XVID_ME_ADVANCEDDIAMOND8 | XVID_ME_HALFPELREFINE8 | XVID_ME_CHROMA_PVOP | XVID_ME_CHROMA_BVOP; case ME_LOG: /* Quality 2 */ case ME_PHODS: case ME_X1: x->me_flags |= XVID_ME_ADVANCEDDIAMOND16 | XVID_ME_HALFPELREFINE16; case ME_ZERO: /* Quality 0 */ default: break; } /* Decide how we should decide blocks */ switch( avctx->mb_decision ) { case 2: x->vop_flags |= XVID_VOP_MODEDECISION_RD; x->me_flags |= XVID_ME_HALFPELREFINE8_RD | XVID_ME_QUARTERPELREFINE8_RD | XVID_ME_EXTSEARCH_RD | XVID_ME_CHECKPREDICTION_RD; case 1: if( !(x->vop_flags & XVID_VOP_MODEDECISION_RD) ) x->vop_flags |= XVID_VOP_FAST_MODEDECISION_RD; x->me_flags |= XVID_ME_HALFPELREFINE16_RD | XVID_ME_QUARTERPELREFINE16_RD; default: break; } /* Bring in VOL flags from ffmpeg command-line */ x->vol_flags = 0; if( xvid_flags & CODEC_FLAG_GMC ) { x->vol_flags |= XVID_VOL_GMC; x->me_flags |= XVID_ME_GME_REFINE; } if( xvid_flags & CODEC_FLAG_QPEL ) { x->vol_flags |= XVID_VOL_QUARTERPEL; x->me_flags |= XVID_ME_QUARTERPELREFINE16; if( x->vop_flags & XVID_VOP_INTER4V ) x->me_flags |= XVID_ME_QUARTERPELREFINE8; } memset(&xvid_gbl_init, 0, sizeof(xvid_gbl_init)); xvid_gbl_init.version = XVID_VERSION; xvid_gbl_init.debug = 0; #if ARCH_PPC /* Xvid's PPC support is borked, use libavcodec to detect */ #if HAVE_ALTIVEC if( has_altivec() ) { xvid_gbl_init.cpu_flags = XVID_CPU_FORCE | XVID_CPU_ALTIVEC; } else #endif xvid_gbl_init.cpu_flags = XVID_CPU_FORCE; #else /* Xvid can detect on x86 */ xvid_gbl_init.cpu_flags = 0; #endif /* Initialize */ xvid_global(NULL, XVID_GBL_INIT, &xvid_gbl_init, NULL); /* Create the encoder reference */ memset(&xvid_enc_create, 0, sizeof(xvid_enc_create)); xvid_enc_create.version = XVID_VERSION; /* Store the desired frame size */ xvid_enc_create.width = x->xsize = avctx->width; xvid_enc_create.height = x->ysize = avctx->height; /* Xvid can determine the proper profile to use */ /* xvid_enc_create.profile = XVID_PROFILE_S_L3; */ /* We don't use zones */ xvid_enc_create.zones = NULL; xvid_enc_create.num_zones = 0; xvid_enc_create.num_threads = avctx->thread_count; xvid_enc_create.plugins = plugins; xvid_enc_create.num_plugins = 0; /* Initialize Buffers */ x->twopassbuffer = NULL; x->old_twopassbuffer = NULL; x->twopassfile = NULL; if( xvid_flags & CODEC_FLAG_PASS1 ) { memset(&rc2pass1, 0, sizeof(struct xvid_ff_pass1)); rc2pass1.version = XVID_VERSION; rc2pass1.context = x; x->twopassbuffer = av_malloc(BUFFER_SIZE); x->old_twopassbuffer = av_malloc(BUFFER_SIZE); if( x->twopassbuffer == NULL || x->old_twopassbuffer == NULL ) { av_log(avctx, AV_LOG_ERROR, "Xvid: Cannot allocate 2-pass log buffers\n"); return -1; } x->twopassbuffer[0] = x->old_twopassbuffer[0] = 0; plugins[xvid_enc_create.num_plugins].func = xvid_ff_2pass; plugins[xvid_enc_create.num_plugins].param = &rc2pass1; xvid_enc_create.num_plugins++; } else if( xvid_flags & CODEC_FLAG_PASS2 ) { memset(&rc2pass2, 0, sizeof(xvid_plugin_2pass2_t)); rc2pass2.version = XVID_VERSION; rc2pass2.bitrate = avctx->bit_rate; fd = av_tempfile("xvidff.", &(x->twopassfile)); if( fd == -1 ) { av_log(avctx, AV_LOG_ERROR, "Xvid: Cannot write 2-pass pipe\n"); return -1; } if( avctx->stats_in == NULL ) { av_log(avctx, AV_LOG_ERROR, "Xvid: No 2-pass information loaded for second pass\n"); return -1; } if( strlen(avctx->stats_in) > write(fd, avctx->stats_in, strlen(avctx->stats_in)) ) { close(fd); av_log(avctx, AV_LOG_ERROR, "Xvid: Cannot write to 2-pass pipe\n"); return -1; } close(fd); rc2pass2.filename = x->twopassfile; plugins[xvid_enc_create.num_plugins].func = xvid_plugin_2pass2; plugins[xvid_enc_create.num_plugins].param = &rc2pass2; xvid_enc_create.num_plugins++; } else if( !(xvid_flags & CODEC_FLAG_QSCALE) ) { /* Single Pass Bitrate Control! */ memset(&single, 0, sizeof(xvid_plugin_single_t)); single.version = XVID_VERSION; single.bitrate = avctx->bit_rate; plugins[xvid_enc_create.num_plugins].func = xvid_plugin_single; plugins[xvid_enc_create.num_plugins].param = &single; xvid_enc_create.num_plugins++; } /* Luminance Masking */ if( 0.0 != avctx->lumi_masking ) { plugins[xvid_enc_create.num_plugins].func = xvid_plugin_lumimasking; plugins[xvid_enc_create.num_plugins].param = NULL; xvid_enc_create.num_plugins++; } /* Frame Rate and Key Frames */ xvid_correct_framerate(avctx); xvid_enc_create.fincr = avctx->time_base.num; xvid_enc_create.fbase = avctx->time_base.den; if( avctx->gop_size > 0 ) xvid_enc_create.max_key_interval = avctx->gop_size; else xvid_enc_create.max_key_interval = 240; /* Xvid's best default */ /* Quants */ if( xvid_flags & CODEC_FLAG_QSCALE ) x->qscale = 1; else x->qscale = 0; xvid_enc_create.min_quant[0] = avctx->qmin; xvid_enc_create.min_quant[1] = avctx->qmin; xvid_enc_create.min_quant[2] = avctx->qmin; xvid_enc_create.max_quant[0] = avctx->qmax; xvid_enc_create.max_quant[1] = avctx->qmax; xvid_enc_create.max_quant[2] = avctx->qmax; /* Quant Matrices */ x->intra_matrix = x->inter_matrix = NULL; if( avctx->mpeg_quant ) x->vol_flags |= XVID_VOL_MPEGQUANT; if( (avctx->intra_matrix || avctx->inter_matrix) ) { x->vol_flags |= XVID_VOL_MPEGQUANT; if( avctx->intra_matrix ) { intra = avctx->intra_matrix; x->intra_matrix = av_malloc(sizeof(unsigned char) * 64); } else intra = NULL; if( avctx->inter_matrix ) { inter = avctx->inter_matrix; x->inter_matrix = av_malloc(sizeof(unsigned char) * 64); } else inter = NULL; for( i = 0; i < 64; i++ ) { if( intra ) x->intra_matrix[i] = (unsigned char)intra[i]; if( inter ) x->inter_matrix[i] = (unsigned char)inter[i]; } } /* Misc Settings */ xvid_enc_create.frame_drop_ratio = 0; xvid_enc_create.global = 0; if( xvid_flags & CODEC_FLAG_CLOSED_GOP ) xvid_enc_create.global |= XVID_GLOBAL_CLOSED_GOP; /* Determines which codec mode we are operating in */ avctx->extradata = NULL; avctx->extradata_size = 0; if( xvid_flags & CODEC_FLAG_GLOBAL_HEADER ) { /* In this case, we are claiming to be MPEG4 */ x->quicktime_format = 1; avctx->codec_id = CODEC_ID_MPEG4; } else { /* We are claiming to be Xvid */ x->quicktime_format = 0; if(!avctx->codec_tag) avctx->codec_tag = AV_RL32("xvid"); } /* Bframes */ xvid_enc_create.max_bframes = avctx->max_b_frames; xvid_enc_create.bquant_offset = 100 * avctx->b_quant_offset; xvid_enc_create.bquant_ratio = 100 * avctx->b_quant_factor; if( avctx->max_b_frames > 0 && !x->quicktime_format ) xvid_enc_create.global |= XVID_GLOBAL_PACKED; /* Create encoder context */ xerr = xvid_encore(NULL, XVID_ENC_CREATE, &xvid_enc_create, NULL); if( xerr ) { av_log(avctx, AV_LOG_ERROR, "Xvid: Could not create encoder reference\n"); return -1; } x->encoder_handle = xvid_enc_create.handle; avctx->coded_frame = &x->encoded_picture; return 0; } /** * Encodes a single frame. * * @param avctx AVCodecContext pointer to context * @param frame Pointer to encoded frame buffer * @param buf_size Size of encoded frame buffer * @param data Pointer to AVFrame of unencoded frame * @return Returns 0 on success, -1 on failure */ static int xvid_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { int xerr, i; char *tmp; struct xvid_context *x = avctx->priv_data; AVFrame *picture = data; AVFrame *p = &(x->encoded_picture); xvid_enc_frame_t xvid_enc_frame; xvid_enc_stats_t xvid_enc_stats; /* Start setting up the frame */ memset(&xvid_enc_frame, 0, sizeof(xvid_enc_frame)); xvid_enc_frame.version = XVID_VERSION; memset(&xvid_enc_stats, 0, sizeof(xvid_enc_stats)); xvid_enc_stats.version = XVID_VERSION; *p = *picture; /* Let Xvid know where to put the frame. */ xvid_enc_frame.bitstream = frame; xvid_enc_frame.length = buf_size; /* Initialize input image fields */ if( avctx->pix_fmt != PIX_FMT_YUV420P ) { av_log(avctx, AV_LOG_ERROR, "Xvid: Color spaces other than 420p not supported\n"); return -1; } xvid_enc_frame.input.csp = XVID_CSP_PLANAR; /* YUV420P */ for( i = 0; i < 4; i++ ) { xvid_enc_frame.input.plane[i] = picture->data[i]; xvid_enc_frame.input.stride[i] = picture->linesize[i]; } /* Encoder Flags */ xvid_enc_frame.vop_flags = x->vop_flags; xvid_enc_frame.vol_flags = x->vol_flags; xvid_enc_frame.motion = x->me_flags; xvid_enc_frame.type = XVID_TYPE_AUTO; /* Pixel aspect ratio setting */ if (avctx->sample_aspect_ratio.num < 1 || avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den < 1 || avctx->sample_aspect_ratio.den > 255) { av_log(avctx, AV_LOG_ERROR, "Invalid pixel aspect ratio %i/%i\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); return -1; } xvid_enc_frame.par = XVID_PAR_EXT; xvid_enc_frame.par_width = avctx->sample_aspect_ratio.num; xvid_enc_frame.par_height = avctx->sample_aspect_ratio.den; /* Quant Setting */ if( x->qscale ) xvid_enc_frame.quant = picture->quality / FF_QP2LAMBDA; else xvid_enc_frame.quant = 0; /* Matrices */ xvid_enc_frame.quant_intra_matrix = x->intra_matrix; xvid_enc_frame.quant_inter_matrix = x->inter_matrix; /* Encode */ xerr = xvid_encore(x->encoder_handle, XVID_ENC_ENCODE, &xvid_enc_frame, &xvid_enc_stats); /* Two-pass log buffer swapping */ avctx->stats_out = NULL; if( x->twopassbuffer ) { tmp = x->old_twopassbuffer; x->old_twopassbuffer = x->twopassbuffer; x->twopassbuffer = tmp; x->twopassbuffer[0] = 0; if( x->old_twopassbuffer[0] != 0 ) { avctx->stats_out = x->old_twopassbuffer; } } if( 0 <= xerr ) { p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA; if( xvid_enc_stats.type == XVID_TYPE_PVOP ) p->pict_type = FF_P_TYPE; else if( xvid_enc_stats.type == XVID_TYPE_BVOP ) p->pict_type = FF_B_TYPE; else if( xvid_enc_stats.type == XVID_TYPE_SVOP ) p->pict_type = FF_S_TYPE; else p->pict_type = FF_I_TYPE; if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) { p->key_frame = 1; if( x->quicktime_format ) return xvid_strip_vol_header(avctx, frame, xvid_enc_stats.hlength, xerr); } else p->key_frame = 0; return xerr; } else { av_log(avctx, AV_LOG_ERROR, "Xvid: Encoding Error Occurred: %i\n", xerr); return -1; } } /** * Destroys the private context for the encoder. * All buffers are freed, and the Xvid encoder context is destroyed. * * @param avctx AVCodecContext pointer to context * @return Returns 0, success guaranteed */ static av_cold int xvid_encode_close(AVCodecContext *avctx) { struct xvid_context *x = avctx->priv_data; xvid_encore(x->encoder_handle, XVID_ENC_DESTROY, NULL, NULL); if( avctx->extradata != NULL ) av_freep(&avctx->extradata); if( x->twopassbuffer != NULL ) { av_free(x->twopassbuffer); av_free(x->old_twopassbuffer); } if( x->twopassfile != NULL ) av_free(x->twopassfile); if( x->intra_matrix != NULL ) av_free(x->intra_matrix); if( x->inter_matrix != NULL ) av_free(x->inter_matrix); return 0; } /** * Routine to create a global VO/VOL header for MP4 container. * What we do here is extract the header from the Xvid bitstream * as it is encoded. We also strip the repeated headers from the * bitstream when a global header is requested for MPEG-4 ISO * compliance. * * @param avctx AVCodecContext pointer to context * @param frame Pointer to encoded frame data * @param header_len Length of header to search * @param frame_len Length of encoded frame data * @return Returns new length of frame data */ int xvid_strip_vol_header(AVCodecContext *avctx, unsigned char *frame, unsigned int header_len, unsigned int frame_len) { int vo_len = 0, i; for( i = 0; i < header_len - 3; i++ ) { if( frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x01 && frame[i+3] == 0xB6 ) { vo_len = i; break; } } if( vo_len > 0 ) { /* We need to store the header, so extract it */ if( avctx->extradata == NULL ) { avctx->extradata = av_malloc(vo_len); memcpy(avctx->extradata, frame, vo_len); avctx->extradata_size = vo_len; } /* Less dangerous now, memmove properly copies the two chunks of overlapping data */ memmove(frame, &(frame[vo_len]), frame_len - vo_len); return frame_len - vo_len; } else return frame_len; } /** * Routine to correct a possibly erroneous framerate being fed to us. * Xvid currently chokes on framerates where the ticks per frame is * extremely large. This function works to correct problems in this area * by estimating a new framerate and taking the simpler fraction of * the two presented. * * @param avctx Context that contains the framerate to correct. */ void xvid_correct_framerate(AVCodecContext *avctx) { int frate, fbase; int est_frate, est_fbase; int gcd; float est_fps, fps; frate = avctx->time_base.den; fbase = avctx->time_base.num; gcd = av_gcd(frate, fbase); if( gcd > 1 ) { frate /= gcd; fbase /= gcd; } if( frate <= 65000 && fbase <= 65000 ) { avctx->time_base.den = frate; avctx->time_base.num = fbase; return; } fps = (float)frate / (float)fbase; est_fps = roundf(fps * 1000.0) / 1000.0; est_frate = (int)est_fps; if( est_fps > (int)est_fps ) { est_frate = (est_frate + 1) * 1000; est_fbase = (int)roundf((float)est_frate / est_fps); } else est_fbase = 1; gcd = av_gcd(est_frate, est_fbase); if( gcd > 1 ) { est_frate /= gcd; est_fbase /= gcd; } if( fbase > est_fbase ) { avctx->time_base.den = est_frate; avctx->time_base.num = est_fbase; av_log(avctx, AV_LOG_DEBUG, "Xvid: framerate re-estimated: %.2f, %.3f%% correction\n", est_fps, (((est_fps - fps)/fps) * 100.0)); } else { avctx->time_base.den = frate; avctx->time_base.num = fbase; } } /* * Xvid 2-Pass Kludge Section * * Xvid's default 2-pass doesn't allow us to create data as we need to, so * this section spends time replacing the first pass plugin so we can write * statistic information as libavcodec requests in. We have another kludge * that allows us to pass data to the second pass in Xvid without a custom * rate-control plugin. */ /** * Initializes the two-pass plugin and context. * * @param param Input construction parameter structure * @param handle Private context handle * @return Returns XVID_ERR_xxxx on failure, or 0 on success. */ static int xvid_ff_2pass_create(xvid_plg_create_t * param, void ** handle) { struct xvid_ff_pass1 *x = (struct xvid_ff_pass1 *)param->param; char *log = x->context->twopassbuffer; /* Do a quick bounds check */ if( log == NULL ) return XVID_ERR_FAIL; /* We use snprintf() */ /* This is because we can safely prevent a buffer overflow */ log[0] = 0; snprintf(log, BUFFER_REMAINING(log), "# ffmpeg 2-pass log file, using xvid codec\n"); snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log), "# Do not modify. libxvidcore version: %d.%d.%d\n\n", XVID_VERSION_MAJOR(XVID_VERSION), XVID_VERSION_MINOR(XVID_VERSION), XVID_VERSION_PATCH(XVID_VERSION)); *handle = x->context; return 0; } /** * Destroys the two-pass plugin context. * * @param ref Context pointer for the plugin * @param param Destrooy context * @return Returns 0, success guaranteed */ static int xvid_ff_2pass_destroy(struct xvid_context *ref, xvid_plg_destroy_t *param) { /* Currently cannot think of anything to do on destruction */ /* Still, the framework should be here for reference/use */ if( ref->twopassbuffer != NULL ) ref->twopassbuffer[0] = 0; return 0; } /** * Enables fast encode mode during the first pass. * * @param ref Context pointer for the plugin * @param param Frame data * @return Returns 0, success guaranteed */ static int xvid_ff_2pass_before(struct xvid_context *ref, xvid_plg_data_t *param) { int motion_remove; int motion_replacements; int vop_remove; /* Nothing to do here, result is changed too much */ if( param->zone && param->zone->mode == XVID_ZONE_QUANT ) return 0; /* We can implement a 'turbo' first pass mode here */ param->quant = 2; /* Init values */ motion_remove = ~XVID_ME_CHROMA_PVOP & ~XVID_ME_CHROMA_BVOP & ~XVID_ME_EXTSEARCH16 & ~XVID_ME_ADVANCEDDIAMOND16; motion_replacements = XVID_ME_FAST_MODEINTERPOLATE | XVID_ME_SKIP_DELTASEARCH | XVID_ME_FASTREFINE16 | XVID_ME_BFRAME_EARLYSTOP; vop_remove = ~XVID_VOP_MODEDECISION_RD & ~XVID_VOP_FAST_MODEDECISION_RD & ~XVID_VOP_TRELLISQUANT & ~XVID_VOP_INTER4V & ~XVID_VOP_HQACPRED; param->vol_flags &= ~XVID_VOL_GMC; param->vop_flags &= vop_remove; param->motion_flags &= motion_remove; param->motion_flags |= motion_replacements; return 0; } /** * Captures statistic data and writes it during first pass. * * @param ref Context pointer for the plugin * @param param Statistic data * @return Returns XVID_ERR_xxxx on failure, or 0 on success */ static int xvid_ff_2pass_after(struct xvid_context *ref, xvid_plg_data_t *param) { char *log = ref->twopassbuffer; char *frame_types = " ipbs"; char frame_type; /* Quick bounds check */ if( log == NULL ) return XVID_ERR_FAIL; /* Convert the type given to us into a character */ if( param->type < 5 && param->type > 0 ) { frame_type = frame_types[param->type]; } else { return XVID_ERR_FAIL; } snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log), "%c %d %d %d %d %d %d\n", frame_type, param->stats.quant, param->stats.kblks, param->stats.mblks, param->stats.ublks, param->stats.length, param->stats.hlength); return 0; } /** * Dispatch function for our custom plugin. * This handles the dispatch for the Xvid plugin. It passes data * on to other functions for actual processing. * * @param ref Context pointer for the plugin * @param cmd The task given for us to complete * @param p1 First parameter (varies) * @param p2 Second parameter (varies) * @return Returns XVID_ERR_xxxx on failure, or 0 on success */ int xvid_ff_2pass(void *ref, int cmd, void *p1, void *p2) { switch( cmd ) { case XVID_PLG_INFO: case XVID_PLG_FRAME: return 0; case XVID_PLG_BEFORE: return xvid_ff_2pass_before(ref, p1); case XVID_PLG_CREATE: return xvid_ff_2pass_create(p1, p2); case XVID_PLG_AFTER: return xvid_ff_2pass_after(ref, p1); case XVID_PLG_DESTROY: return xvid_ff_2pass_destroy(ref, p1); default: return XVID_ERR_FAIL; } } /** * Xvid codec definition for libavcodec. */ AVCodec libxvid_encoder = { "libxvid", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG4, sizeof(struct xvid_context), xvid_encode_init, xvid_encode_frame, xvid_encode_close, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"), };
123linslouis-android-video-cutter
jni/libavcodec/libxvidff.c
C
asf20
25,751
/* * various filters for ACELP-based codecs * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "avcodec.h" #include "celp_filters.h" void ff_celp_convolve_circ(int16_t* fc_out, const int16_t* fc_in, const int16_t* filter, int len) { int i, k; memset(fc_out, 0, len * sizeof(int16_t)); /* Since there are few pulses over an entire subframe (i.e. almost all fc_in[i] are zero) it is faster to loop over fc_in first. */ for (i = 0; i < len; i++) { if (fc_in[i]) { for (k = 0; k < i; k++) fc_out[k] += (fc_in[i] * filter[len + k - i]) >> 15; for (k = i; k < len; k++) fc_out[k] += (fc_in[i] * filter[ k - i]) >> 15; } } } void ff_celp_circ_addf(float *out, const float *in, const float *lagged, int lag, float fac, int n) { int k; for (k = 0; k < lag; k++) out[k] = in[k] + fac * lagged[n + k - lag]; for (; k < n; k++) out[k] = in[k] + fac * lagged[ k - lag]; } int ff_celp_lp_synthesis_filter(int16_t *out, const int16_t *filter_coeffs, const int16_t *in, int buffer_length, int filter_length, int stop_on_overflow, int rounder) { int i,n; for (n = 0; n < buffer_length; n++) { int sum = rounder; for (i = 1; i <= filter_length; i++) sum -= filter_coeffs[i-1] * out[n-i]; sum = (sum >> 12) + in[n]; if (sum + 0x8000 > 0xFFFFU) { if (stop_on_overflow) return 1; sum = (sum >> 31) ^ 32767; } out[n] = sum; } return 0; } void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs, const float* in, int buffer_length, int filter_length) { int i,n; #if 0 // Unoptimized code path for improved readability for (n = 0; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i <= filter_length; i++) out[n] -= filter_coeffs[i-1] * out[n-i]; } #else float out0, out1, out2, out3; float old_out0, old_out1, old_out2, old_out3; float a,b,c; a = filter_coeffs[0]; b = filter_coeffs[1]; c = filter_coeffs[2]; b -= filter_coeffs[0] * filter_coeffs[0]; c -= filter_coeffs[1] * filter_coeffs[0]; c -= filter_coeffs[0] * b; old_out0 = out[-4]; old_out1 = out[-3]; old_out2 = out[-2]; old_out3 = out[-1]; for (n = 0; n <= buffer_length - 4; n+=4) { float tmp0,tmp1,tmp2,tmp3; float val; out0 = in[0]; out1 = in[1]; out2 = in[2]; out3 = in[3]; out0 -= filter_coeffs[2] * old_out1; out1 -= filter_coeffs[2] * old_out2; out2 -= filter_coeffs[2] * old_out3; out0 -= filter_coeffs[1] * old_out2; out1 -= filter_coeffs[1] * old_out3; out0 -= filter_coeffs[0] * old_out3; val = filter_coeffs[3]; out0 -= val * old_out0; out1 -= val * old_out1; out2 -= val * old_out2; out3 -= val * old_out3; old_out3 = out[-5]; for (i = 5; i <= filter_length; i += 2) { val = filter_coeffs[i-1]; out0 -= val * old_out3; out1 -= val * old_out0; out2 -= val * old_out1; out3 -= val * old_out2; old_out2 = out[-i-1]; val = filter_coeffs[i]; out0 -= val * old_out2; out1 -= val * old_out3; out2 -= val * old_out0; out3 -= val * old_out1; FFSWAP(float, old_out0, old_out2); old_out1 = old_out3; old_out3 = out[-i-2]; } tmp0 = out0; tmp1 = out1; tmp2 = out2; tmp3 = out3; out3 -= a * tmp2; out2 -= a * tmp1; out1 -= a * tmp0; out3 -= b * tmp1; out2 -= b * tmp0; out3 -= c * tmp0; out[0] = out0; out[1] = out1; out[2] = out2; out[3] = out3; old_out0 = out0; old_out1 = out1; old_out2 = out2; old_out3 = out3; out += 4; in += 4; } out -= n; in -= n; for (; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i <= filter_length; i++) out[n] -= filter_coeffs[i-1] * out[n-i]; } #endif } void ff_celp_lp_zero_synthesis_filterf(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length) { int i,n; for (n = 0; n < buffer_length; n++) { out[n] = in[n]; for (i = 1; i <= filter_length; i++) out[n] += filter_coeffs[i-1] * in[n-i]; } }
123linslouis-android-video-cutter
jni/libavcodec/celp_filters.c
C
asf20
5,675
/* * Simple math operations * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AVR32_MATHOPS_H #define AVCODEC_AVR32_MATHOPS_H #include <stdint.h> #include "config.h" #include "libavutil/common.h" #if HAVE_INLINE_ASM #define MULL MULL static inline av_const int MULL(int a, int b, unsigned shift) { union { int64_t x; int hl[2]; } x; __asm__ ("muls.d %0, %1, %2 \n\t" "lsr %0, %3 \n\t" "or %0, %0, %m0<<%4 \n\t" : "=r"(x) : "r"(b), "r"(a), "i"(shift), "i"(32-shift)); return x.hl[1]; } #define MULH MULH static inline av_const int MULH(int a, int b) { union { int64_t x; int hl[2]; } x; __asm__ ("muls.d %0, %1, %2" : "=r"(x.x) : "r"(a), "r"(b)); return x.hl[0]; } #define MUL64 MUL64 static inline av_const int64_t MUL64(int a, int b) { int64_t x; __asm__ ("muls.d %0, %1, %2" : "=r"(x) : "r"(a), "r"(b)); return x; } static inline av_const int64_t MAC64(int64_t d, int a, int b) { __asm__ ("macs.d %0, %1, %2" : "+r"(d) : "r"(a), "r"(b)); return d; } #define MAC64(d, a, b) ((d) = MAC64(d, a, b)) #define MLS64(d, a, b) MAC64(d, -(a), b) static inline av_const int MAC16(int d, int a, int b) { __asm__ ("machh.w %0, %1:b, %2:b" : "+r"(d) : "r"(a), "r"(b)); return d; } #define MAC16(d, a, b) ((d) = MAC16(d, a, b)) #define MLS16(d, a, b) MAC16(d, -(a), b) #define MUL16 MUL16 static inline av_const int MUL16(int a, int b) { int d; __asm__ ("mulhh.w %0, %1:b, %2:b" : "=r"(d) : "r"(a), "r"(b)); return d; } #define mid_pred mid_pred static inline av_const int mid_pred(int a, int b, int c) { int m; __asm__ ("mov %0, %2 \n\t" "cp.w %1, %2 \n\t" "movgt %0, %1 \n\t" "movgt %1, %2 \n\t" "cp.w %1, %3 \n\t" "movle %1, %3 \n\t" "cp.w %0, %1 \n\t" "movgt %0, %1 \n\t" : "=&r"(m), "+r"(a) : "r"(b), "r"(c)); return m; } #endif /* HAVE_INLINE_ASM */ #endif /* AVCODEC_AVR32_MATHOPS_H */
123linslouis-android-video-cutter
jni/libavcodec/avr32/mathops.h
C
asf20
2,845
/* * WavPack lossless audio decoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define ALT_BITSTREAM_READER_LE #include "avcodec.h" #include "get_bits.h" #include "unary.h" /** * @file * WavPack lossless audio decoder */ #define WV_MONO 0x00000004 #define WV_JOINT_STEREO 0x00000010 #define WV_FALSE_STEREO 0x40000000 #define WV_HYBRID_MODE 0x00000008 #define WV_HYBRID_SHAPE 0x00000008 #define WV_HYBRID_BITRATE 0x00000200 #define WV_HYBRID_BALANCE 0x00000400 #define WV_FLT_SHIFT_ONES 0x01 #define WV_FLT_SHIFT_SAME 0x02 #define WV_FLT_SHIFT_SENT 0x04 #define WV_FLT_ZERO_SENT 0x08 #define WV_FLT_ZERO_SIGN 0x10 enum WP_ID_Flags{ WP_IDF_MASK = 0x1F, WP_IDF_IGNORE = 0x20, WP_IDF_ODD = 0x40, WP_IDF_LONG = 0x80 }; enum WP_ID{ WP_ID_DUMMY = 0, WP_ID_ENCINFO, WP_ID_DECTERMS, WP_ID_DECWEIGHTS, WP_ID_DECSAMPLES, WP_ID_ENTROPY, WP_ID_HYBRID, WP_ID_SHAPING, WP_ID_FLOATINFO, WP_ID_INT32INFO, WP_ID_DATA, WP_ID_CORR, WP_ID_EXTRABITS, WP_ID_CHANINFO }; typedef struct SavedContext { int offset; int size; int bits_used; uint32_t crc; } SavedContext; #define MAX_TERMS 16 typedef struct Decorr { int delta; int value; int weightA; int weightB; int samplesA[8]; int samplesB[8]; } Decorr; typedef struct WvChannel { int median[3]; int slow_level, error_limit; int bitrate_acc, bitrate_delta; } WvChannel; typedef struct WavpackContext { AVCodecContext *avctx; int frame_flags; int stereo, stereo_in; int joint; uint32_t CRC; GetBitContext gb; int got_extra_bits; uint32_t crc_extra_bits; GetBitContext gb_extra_bits; int data_size; // in bits int samples; int terms; Decorr decorr[MAX_TERMS]; int zero, one, zeroes; int extra_bits; int and, or, shift; int post_shift; int hybrid, hybrid_bitrate; int float_flag; int float_shift; int float_max_exp; WvChannel ch[2]; int samples_left; int max_samples; int pos; SavedContext sc, extra_sc; } WavpackContext; // exponent table copied from WavPack source static const uint8_t wp_exp2_table [256] = { 0x00, 0x01, 0x01, 0x02, 0x03, 0x03, 0x04, 0x05, 0x06, 0x06, 0x07, 0x08, 0x08, 0x09, 0x0a, 0x0b, 0x0b, 0x0c, 0x0d, 0x0e, 0x0e, 0x0f, 0x10, 0x10, 0x11, 0x12, 0x13, 0x13, 0x14, 0x15, 0x16, 0x16, 0x17, 0x18, 0x19, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1d, 0x1e, 0x1f, 0x20, 0x20, 0x21, 0x22, 0x23, 0x24, 0x24, 0x25, 0x26, 0x27, 0x28, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc8, 0xc9, 0xca, 0xcb, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4, 0xd6, 0xd7, 0xd8, 0xd9, 0xdb, 0xdc, 0xdd, 0xde, 0xe0, 0xe1, 0xe2, 0xe4, 0xe5, 0xe6, 0xe8, 0xe9, 0xea, 0xec, 0xed, 0xee, 0xf0, 0xf1, 0xf2, 0xf4, 0xf5, 0xf6, 0xf8, 0xf9, 0xfa, 0xfc, 0xfd, 0xff }; static const uint8_t wp_log2_table [] = { 0x00, 0x01, 0x03, 0x04, 0x06, 0x07, 0x09, 0x0a, 0x0b, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x18, 0x19, 0x1a, 0x1c, 0x1d, 0x1e, 0x20, 0x21, 0x22, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2a, 0x2c, 0x2d, 0x2e, 0x2f, 0x31, 0x32, 0x33, 0x34, 0x36, 0x37, 0x38, 0x39, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x41, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe4, 0xe5, 0xe6, 0xe7, 0xe7, 0xe8, 0xe9, 0xea, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xee, 0xef, 0xf0, 0xf1, 0xf1, 0xf2, 0xf3, 0xf4, 0xf4, 0xf5, 0xf6, 0xf7, 0xf7, 0xf8, 0xf9, 0xf9, 0xfa, 0xfb, 0xfc, 0xfc, 0xfd, 0xfe, 0xff, 0xff }; static av_always_inline int wp_exp2(int16_t val) { int res, neg = 0; if(val < 0){ val = -val; neg = 1; } res = wp_exp2_table[val & 0xFF] | 0x100; val >>= 8; res = (val > 9) ? (res << (val - 9)) : (res >> (9 - val)); return neg ? -res : res; } static av_always_inline int wp_log2(int32_t val) { int bits; if(!val) return 0; if(val == 1) return 256; val += val >> 9; bits = av_log2(val) + 1; if(bits < 9) return (bits << 8) + wp_log2_table[(val << (9 - bits)) & 0xFF]; else return (bits << 8) + wp_log2_table[(val >> (bits - 9)) & 0xFF]; } #define LEVEL_DECAY(a) ((a + 0x80) >> 8) // macros for manipulating median values #define GET_MED(n) ((c->median[n] >> 4) + 1) #define DEC_MED(n) c->median[n] -= ((c->median[n] + (128>>n) - 2) / (128>>n)) * 2 #define INC_MED(n) c->median[n] += ((c->median[n] + (128>>n)) / (128>>n)) * 5 // macros for applying weight #define UPDATE_WEIGHT_CLIP(weight, delta, samples, in) \ if(samples && in){ \ if((samples ^ in) < 0){ \ weight -= delta; \ if(weight < -1024) weight = -1024; \ }else{ \ weight += delta; \ if(weight > 1024) weight = 1024; \ } \ } static av_always_inline int get_tail(GetBitContext *gb, int k) { int p, e, res; if(k<1)return 0; p = av_log2(k); e = (1 << (p + 1)) - k - 1; res = p ? get_bits(gb, p) : 0; if(res >= e){ res = (res<<1) - e + get_bits1(gb); } return res; } static void update_error_limit(WavpackContext *ctx) { int i, br[2], sl[2]; for(i = 0; i <= ctx->stereo_in; i++){ ctx->ch[i].bitrate_acc += ctx->ch[i].bitrate_delta; br[i] = ctx->ch[i].bitrate_acc >> 16; sl[i] = LEVEL_DECAY(ctx->ch[i].slow_level); } if(ctx->stereo_in && ctx->hybrid_bitrate){ int balance = (sl[1] - sl[0] + br[1] + 1) >> 1; if(balance > br[0]){ br[1] = br[0] << 1; br[0] = 0; }else if(-balance > br[0]){ br[0] <<= 1; br[1] = 0; }else{ br[1] = br[0] + balance; br[0] = br[0] - balance; } } for(i = 0; i <= ctx->stereo_in; i++){ if(ctx->hybrid_bitrate){ if(sl[i] - br[i] > -0x100) ctx->ch[i].error_limit = wp_exp2(sl[i] - br[i] + 0x100); else ctx->ch[i].error_limit = 0; }else{ ctx->ch[i].error_limit = wp_exp2(br[i]); } } } static int wv_get_value(WavpackContext *ctx, GetBitContext *gb, int channel, int *last) { int t, t2; int sign, base, add, ret; WvChannel *c = &ctx->ch[channel]; *last = 0; if((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) && !ctx->zero && !ctx->one){ if(ctx->zeroes){ ctx->zeroes--; if(ctx->zeroes){ c->slow_level -= LEVEL_DECAY(c->slow_level); return 0; } }else{ t = get_unary_0_33(gb); if(t >= 2) t = get_bits(gb, t - 1) | (1 << (t-1)); ctx->zeroes = t; if(ctx->zeroes){ memset(ctx->ch[0].median, 0, sizeof(ctx->ch[0].median)); memset(ctx->ch[1].median, 0, sizeof(ctx->ch[1].median)); c->slow_level -= LEVEL_DECAY(c->slow_level); return 0; } } } if(get_bits_count(gb) >= ctx->data_size){ *last = 1; return 0; } if(ctx->zero){ t = 0; ctx->zero = 0; }else{ t = get_unary_0_33(gb); if(get_bits_count(gb) >= ctx->data_size){ *last = 1; return 0; } if(t == 16) { t2 = get_unary_0_33(gb); if(t2 < 2) t += t2; else t += get_bits(gb, t2 - 1) | (1 << (t2 - 1)); } if(ctx->one){ ctx->one = t&1; t = (t>>1) + 1; }else{ ctx->one = t&1; t >>= 1; } ctx->zero = !ctx->one; } if(ctx->hybrid && !channel) update_error_limit(ctx); if(!t){ base = 0; add = GET_MED(0) - 1; DEC_MED(0); }else if(t == 1){ base = GET_MED(0); add = GET_MED(1) - 1; INC_MED(0); DEC_MED(1); }else if(t == 2){ base = GET_MED(0) + GET_MED(1); add = GET_MED(2) - 1; INC_MED(0); INC_MED(1); DEC_MED(2); }else{ base = GET_MED(0) + GET_MED(1) + GET_MED(2) * (t - 2); add = GET_MED(2) - 1; INC_MED(0); INC_MED(1); INC_MED(2); } if(!c->error_limit){ ret = base + get_tail(gb, add); }else{ int mid = (base*2 + add + 1) >> 1; while(add > c->error_limit){ if(get_bits1(gb)){ add -= (mid - base); base = mid; }else add = mid - base - 1; mid = (base*2 + add + 1) >> 1; } ret = mid; } sign = get_bits1(gb); if(ctx->hybrid_bitrate) c->slow_level += wp_log2(ret) - LEVEL_DECAY(c->slow_level); return sign ? ~ret : ret; } static inline int wv_get_value_integer(WavpackContext *s, uint32_t *crc, int S) { int bit; if(s->extra_bits){ S <<= s->extra_bits; if(s->got_extra_bits){ S |= get_bits(&s->gb_extra_bits, s->extra_bits); *crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16); } } bit = (S & s->and) | s->or; return (((S + bit) << s->shift) - bit) << s->post_shift; } static float wv_get_value_float(WavpackContext *s, uint32_t *crc, int S) { union { float f; uint32_t u; } value; int sign; int exp = s->float_max_exp; if(s->got_extra_bits){ const int max_bits = 1 + 23 + 8 + 1; const int left_bits = get_bits_left(&s->gb_extra_bits); if(left_bits + 8 * FF_INPUT_BUFFER_PADDING_SIZE < max_bits) return 0.0; } if(S){ S <<= s->float_shift; sign = S < 0; if(sign) S = -S; if(S >= 0x1000000){ if(s->got_extra_bits && get_bits1(&s->gb_extra_bits)){ S = get_bits(&s->gb_extra_bits, 23); }else{ S = 0; } exp = 255; }else if(exp){ int shift = 23 - av_log2(S); exp = s->float_max_exp; if(exp <= shift){ shift = --exp; } exp -= shift; if(shift){ S <<= shift; if((s->float_flag & WV_FLT_SHIFT_ONES) || (s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SAME) && get_bits1(&s->gb_extra_bits)) ){ S |= (1 << shift) - 1; } else if(s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SENT)){ S |= get_bits(&s->gb_extra_bits, shift); } } }else{ exp = s->float_max_exp; } S &= 0x7fffff; }else{ sign = 0; exp = 0; if(s->got_extra_bits && (s->float_flag & WV_FLT_ZERO_SENT)){ if(get_bits1(&s->gb_extra_bits)){ S = get_bits(&s->gb_extra_bits, 23); if(s->float_max_exp >= 25) exp = get_bits(&s->gb_extra_bits, 8); sign = get_bits1(&s->gb_extra_bits); }else{ if(s->float_flag & WV_FLT_ZERO_SIGN) sign = get_bits1(&s->gb_extra_bits); } } } *crc = *crc * 27 + S * 9 + exp * 3 + sign; value.u = (sign << 31) | (exp << 23) | S; return value.f; } static void wv_reset_saved_context(WavpackContext *s) { s->pos = 0; s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF; } static inline int wv_unpack_stereo(WavpackContext *s, GetBitContext *gb, void *dst, const int type) { int i, j, count = 0; int last, t; int A, B, L, L2, R, R2; int pos = s->pos; uint32_t crc = s->sc.crc; uint32_t crc_extra_bits = s->extra_sc.crc; int16_t *dst16 = dst; int32_t *dst32 = dst; float *dstfl = dst; if(s->samples_left == s->samples) s->one = s->zero = s->zeroes = 0; do{ L = wv_get_value(s, gb, 0, &last); if(last) break; R = wv_get_value(s, gb, 1, &last); if(last) break; for(i = 0; i < s->terms; i++){ t = s->decorr[i].value; if(t > 0){ if(t > 8){ if(t & 1){ A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]; B = 2 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]; }else{ A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1; B = (3 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1; } s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0]; s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0]; j = 0; }else{ A = s->decorr[i].samplesA[pos]; B = s->decorr[i].samplesB[pos]; j = (pos + t) & 7; } if(type != SAMPLE_FMT_S16){ L2 = L + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10); R2 = R + ((s->decorr[i].weightB * (int64_t)B + 512) >> 10); }else{ L2 = L + ((s->decorr[i].weightA * A + 512) >> 10); R2 = R + ((s->decorr[i].weightB * B + 512) >> 10); } if(A && L) s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta; if(B && R) s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta; s->decorr[i].samplesA[j] = L = L2; s->decorr[i].samplesB[j] = R = R2; }else if(t == -1){ if(type != SAMPLE_FMT_S16) L2 = L + ((s->decorr[i].weightA * (int64_t)s->decorr[i].samplesA[0] + 512) >> 10); else L2 = L + ((s->decorr[i].weightA * s->decorr[i].samplesA[0] + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L); L = L2; if(type != SAMPLE_FMT_S16) R2 = R + ((s->decorr[i].weightB * (int64_t)L2 + 512) >> 10); else R2 = R + ((s->decorr[i].weightB * L2 + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R); R = R2; s->decorr[i].samplesA[0] = R; }else{ if(type != SAMPLE_FMT_S16) R2 = R + ((s->decorr[i].weightB * (int64_t)s->decorr[i].samplesB[0] + 512) >> 10); else R2 = R + ((s->decorr[i].weightB * s->decorr[i].samplesB[0] + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R); R = R2; if(t == -3){ R2 = s->decorr[i].samplesA[0]; s->decorr[i].samplesA[0] = R; } if(type != SAMPLE_FMT_S16) L2 = L + ((s->decorr[i].weightA * (int64_t)R2 + 512) >> 10); else L2 = L + ((s->decorr[i].weightA * R2 + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, R2, L); L = L2; s->decorr[i].samplesB[0] = L; } } pos = (pos + 1) & 7; if(s->joint) L += (R -= (L >> 1)); crc = (crc * 3 + L) * 3 + R; if(type == SAMPLE_FMT_FLT){ *dstfl++ = wv_get_value_float(s, &crc_extra_bits, L); *dstfl++ = wv_get_value_float(s, &crc_extra_bits, R); } else if(type == SAMPLE_FMT_S32){ *dst32++ = wv_get_value_integer(s, &crc_extra_bits, L); *dst32++ = wv_get_value_integer(s, &crc_extra_bits, R); } else { *dst16++ = wv_get_value_integer(s, &crc_extra_bits, L); *dst16++ = wv_get_value_integer(s, &crc_extra_bits, R); } count++; }while(!last && count < s->max_samples); s->samples_left -= count; if(!s->samples_left){ if(crc != s->CRC){ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); return -1; } if(s->got_extra_bits && crc_extra_bits != s->crc_extra_bits){ av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); return -1; } wv_reset_saved_context(s); }else{ s->pos = pos; s->sc.crc = crc; s->sc.bits_used = get_bits_count(&s->gb); if(s->got_extra_bits){ s->extra_sc.crc = crc_extra_bits; s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits); } } return count * 2; } static inline int wv_unpack_mono(WavpackContext *s, GetBitContext *gb, void *dst, const int type) { int i, j, count = 0; int last, t; int A, S, T; int pos = s->pos; uint32_t crc = s->sc.crc; uint32_t crc_extra_bits = s->extra_sc.crc; int16_t *dst16 = dst; int32_t *dst32 = dst; float *dstfl = dst; if(s->samples_left == s->samples) s->one = s->zero = s->zeroes = 0; do{ T = wv_get_value(s, gb, 0, &last); S = 0; if(last) break; for(i = 0; i < s->terms; i++){ t = s->decorr[i].value; if(t > 8){ if(t & 1) A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]; else A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1; s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0]; j = 0; }else{ A = s->decorr[i].samplesA[pos]; j = (pos + t) & 7; } if(type != SAMPLE_FMT_S16) S = T + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10); else S = T + ((s->decorr[i].weightA * A + 512) >> 10); if(A && T) s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta; s->decorr[i].samplesA[j] = T = S; } pos = (pos + 1) & 7; crc = crc * 3 + S; if(type == SAMPLE_FMT_FLT) *dstfl++ = wv_get_value_float(s, &crc_extra_bits, S); else if(type == SAMPLE_FMT_S32) *dst32++ = wv_get_value_integer(s, &crc_extra_bits, S); else *dst16++ = wv_get_value_integer(s, &crc_extra_bits, S); count++; }while(!last && count < s->samples); s->samples_left -= count; if(!s->samples_left){ if(crc != s->CRC){ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); return -1; } if(s->got_extra_bits && crc_extra_bits != s->crc_extra_bits){ av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); return -1; } wv_reset_saved_context(s); }else{ s->pos = pos; s->sc.crc = crc; s->sc.bits_used = get_bits_count(&s->gb); if(s->got_extra_bits){ s->extra_sc.crc = crc_extra_bits; s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits); } } return count; } static av_cold int wavpack_decode_init(AVCodecContext *avctx) { WavpackContext *s = avctx->priv_data; s->avctx = avctx; s->stereo = (avctx->channels == 2); if(avctx->bits_per_coded_sample <= 16) avctx->sample_fmt = SAMPLE_FMT_S16; else avctx->sample_fmt = SAMPLE_FMT_S32; avctx->channel_layout = (avctx->channels==2) ? CH_LAYOUT_STEREO : CH_LAYOUT_MONO; wv_reset_saved_context(s); return 0; } static int wavpack_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; WavpackContext *s = avctx->priv_data; void *samples = data; int samplecount; int got_terms = 0, got_weights = 0, got_samples = 0, got_entropy = 0, got_bs = 0, got_float = 0; int got_hybrid = 0; const uint8_t* buf_end = buf + buf_size; int i, j, id, size, ssize, weights, t; int bpp; if (buf_size == 0){ *data_size = 0; return 0; } if(!s->samples_left){ memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr)); memset(s->ch, 0, sizeof(s->ch)); s->extra_bits = 0; s->and = s->or = s->shift = 0; s->got_extra_bits = 0; } s->samples = AV_RL32(buf); buf += 4; if(!s->samples){ *data_size = 0; return buf_size; } s->frame_flags = AV_RL32(buf); buf += 4; if(s->frame_flags&0x80){ bpp = sizeof(float); avctx->sample_fmt = SAMPLE_FMT_FLT; } else if((s->frame_flags&0x03) <= 1){ bpp = 2; avctx->sample_fmt = SAMPLE_FMT_S16; } else { bpp = 4; avctx->sample_fmt = SAMPLE_FMT_S32; } s->stereo_in = (s->frame_flags & WV_FALSE_STEREO) ? 0 : s->stereo; s->joint = s->frame_flags & WV_JOINT_STEREO; s->hybrid = s->frame_flags & WV_HYBRID_MODE; s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE; s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f); s->CRC = AV_RL32(buf); buf += 4; s->max_samples = *data_size / (bpp * avctx->channels); s->max_samples = FFMIN(s->max_samples, s->samples); if(s->samples_left > 0){ s->max_samples = FFMIN(s->max_samples, s->samples_left); buf = buf_end; } // parse metadata blocks while(buf < buf_end){ id = *buf++; size = *buf++; if(id & WP_IDF_LONG) { size |= (*buf++) << 8; size |= (*buf++) << 16; } size <<= 1; // size is specified in words ssize = size; if(id & WP_IDF_ODD) size--; if(size < 0){ av_log(avctx, AV_LOG_ERROR, "Got incorrect block %02X with size %i\n", id, size); break; } if(buf + ssize > buf_end){ av_log(avctx, AV_LOG_ERROR, "Block size %i is out of bounds\n", size); break; } if(id & WP_IDF_IGNORE){ buf += ssize; continue; } switch(id & WP_IDF_MASK){ case WP_ID_DECTERMS: s->terms = size; if(s->terms > MAX_TERMS){ av_log(avctx, AV_LOG_ERROR, "Too many decorrelation terms\n"); buf += ssize; continue; } for(i = 0; i < s->terms; i++) { s->decorr[s->terms - i - 1].value = (*buf & 0x1F) - 5; s->decorr[s->terms - i - 1].delta = *buf >> 5; buf++; } got_terms = 1; break; case WP_ID_DECWEIGHTS: if(!got_terms){ av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n"); continue; } weights = size >> s->stereo_in; if(weights > MAX_TERMS || weights > s->terms){ av_log(avctx, AV_LOG_ERROR, "Too many decorrelation weights\n"); buf += ssize; continue; } for(i = 0; i < weights; i++) { t = (int8_t)(*buf++); s->decorr[s->terms - i - 1].weightA = t << 3; if(s->decorr[s->terms - i - 1].weightA > 0) s->decorr[s->terms - i - 1].weightA += (s->decorr[s->terms - i - 1].weightA + 64) >> 7; if(s->stereo_in){ t = (int8_t)(*buf++); s->decorr[s->terms - i - 1].weightB = t << 3; if(s->decorr[s->terms - i - 1].weightB > 0) s->decorr[s->terms - i - 1].weightB += (s->decorr[s->terms - i - 1].weightB + 64) >> 7; } } got_weights = 1; break; case WP_ID_DECSAMPLES: if(!got_terms){ av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n"); continue; } t = 0; for(i = s->terms - 1; (i >= 0) && (t < size); i--) { if(s->decorr[i].value > 8){ s->decorr[i].samplesA[0] = wp_exp2(AV_RL16(buf)); buf += 2; s->decorr[i].samplesA[1] = wp_exp2(AV_RL16(buf)); buf += 2; if(s->stereo_in){ s->decorr[i].samplesB[0] = wp_exp2(AV_RL16(buf)); buf += 2; s->decorr[i].samplesB[1] = wp_exp2(AV_RL16(buf)); buf += 2; t += 4; } t += 4; }else if(s->decorr[i].value < 0){ s->decorr[i].samplesA[0] = wp_exp2(AV_RL16(buf)); buf += 2; s->decorr[i].samplesB[0] = wp_exp2(AV_RL16(buf)); buf += 2; t += 4; }else{ for(j = 0; j < s->decorr[i].value; j++){ s->decorr[i].samplesA[j] = wp_exp2(AV_RL16(buf)); buf += 2; if(s->stereo_in){ s->decorr[i].samplesB[j] = wp_exp2(AV_RL16(buf)); buf += 2; } } t += s->decorr[i].value * 2 * (s->stereo_in + 1); } } got_samples = 1; break; case WP_ID_ENTROPY: if(size != 6 * (s->stereo_in + 1)){ av_log(avctx, AV_LOG_ERROR, "Entropy vars size should be %i, got %i", 6 * (s->stereo_in + 1), size); buf += ssize; continue; } for(j = 0; j <= s->stereo_in; j++){ for(i = 0; i < 3; i++){ s->ch[j].median[i] = wp_exp2(AV_RL16(buf)); buf += 2; } } got_entropy = 1; break; case WP_ID_HYBRID: if(s->hybrid_bitrate){ for(i = 0; i <= s->stereo_in; i++){ s->ch[i].slow_level = wp_exp2(AV_RL16(buf)); buf += 2; size -= 2; } } for(i = 0; i < (s->stereo_in + 1); i++){ s->ch[i].bitrate_acc = AV_RL16(buf) << 16; buf += 2; size -= 2; } if(size > 0){ for(i = 0; i < (s->stereo_in + 1); i++){ s->ch[i].bitrate_delta = wp_exp2((int16_t)AV_RL16(buf)); buf += 2; } }else{ for(i = 0; i < (s->stereo_in + 1); i++) s->ch[i].bitrate_delta = 0; } got_hybrid = 1; break; case WP_ID_INT32INFO: if(size != 4){ av_log(avctx, AV_LOG_ERROR, "Invalid INT32INFO, size = %i, sent_bits = %i\n", size, *buf); buf += ssize; continue; } if(buf[0]) s->extra_bits = buf[0]; else if(buf[1]) s->shift = buf[1]; else if(buf[2]){ s->and = s->or = 1; s->shift = buf[2]; }else if(buf[3]){ s->and = 1; s->shift = buf[3]; } buf += 4; break; case WP_ID_FLOATINFO: if(size != 4){ av_log(avctx, AV_LOG_ERROR, "Invalid FLOATINFO, size = %i\n", size); buf += ssize; continue; } s->float_flag = buf[0]; s->float_shift = buf[1]; s->float_max_exp = buf[2]; buf += 4; got_float = 1; break; case WP_ID_DATA: s->sc.offset = buf - avpkt->data; s->sc.size = size * 8; init_get_bits(&s->gb, buf, size * 8); s->data_size = size * 8; buf += size; got_bs = 1; break; case WP_ID_EXTRABITS: if(size <= 4){ av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n", size); buf += size; continue; } s->extra_sc.offset = buf - avpkt->data; s->extra_sc.size = size * 8; init_get_bits(&s->gb_extra_bits, buf, size * 8); s->crc_extra_bits = get_bits_long(&s->gb_extra_bits, 32); buf += size; s->got_extra_bits = 1; break; default: buf += size; } if(id & WP_IDF_ODD) buf++; } if(!s->samples_left){ if(!got_terms){ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n"); return -1; } if(!got_weights){ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation weights\n"); return -1; } if(!got_samples){ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation samples\n"); return -1; } if(!got_entropy){ av_log(avctx, AV_LOG_ERROR, "No block with entropy info\n"); return -1; } if(s->hybrid && !got_hybrid){ av_log(avctx, AV_LOG_ERROR, "Hybrid config not found\n"); return -1; } if(!got_bs){ av_log(avctx, AV_LOG_ERROR, "Packed samples not found\n"); return -1; } if(!got_float && avctx->sample_fmt == SAMPLE_FMT_FLT){ av_log(avctx, AV_LOG_ERROR, "Float information not found\n"); return -1; } if(s->got_extra_bits && avctx->sample_fmt != SAMPLE_FMT_FLT){ const int size = get_bits_left(&s->gb_extra_bits); const int wanted = s->samples * s->extra_bits << s->stereo_in; if(size < wanted){ av_log(avctx, AV_LOG_ERROR, "Too small EXTRABITS\n"); s->got_extra_bits = 0; } } s->samples_left = s->samples; }else{ init_get_bits(&s->gb, avpkt->data + s->sc.offset, s->sc.size); skip_bits_long(&s->gb, s->sc.bits_used); if(s->got_extra_bits){ init_get_bits(&s->gb_extra_bits, avpkt->data + s->extra_sc.offset, s->extra_sc.size); skip_bits_long(&s->gb_extra_bits, s->extra_sc.bits_used); } } if(s->stereo_in){ if(avctx->sample_fmt == SAMPLE_FMT_S16) samplecount = wv_unpack_stereo(s, &s->gb, samples, SAMPLE_FMT_S16); else if(avctx->sample_fmt == SAMPLE_FMT_S32) samplecount = wv_unpack_stereo(s, &s->gb, samples, SAMPLE_FMT_S32); else samplecount = wv_unpack_stereo(s, &s->gb, samples, SAMPLE_FMT_FLT); }else{ if(avctx->sample_fmt == SAMPLE_FMT_S16) samplecount = wv_unpack_mono(s, &s->gb, samples, SAMPLE_FMT_S16); else if(avctx->sample_fmt == SAMPLE_FMT_S32) samplecount = wv_unpack_mono(s, &s->gb, samples, SAMPLE_FMT_S32); else samplecount = wv_unpack_mono(s, &s->gb, samples, SAMPLE_FMT_FLT); if(s->stereo && avctx->sample_fmt == SAMPLE_FMT_S16){ int16_t *dst = (int16_t*)samples + samplecount * 2; int16_t *src = (int16_t*)samples + samplecount; int cnt = samplecount; while(cnt--){ *--dst = *--src; *--dst = *src; } samplecount *= 2; }else if(s->stereo && avctx->sample_fmt == SAMPLE_FMT_S32){ int32_t *dst = (int32_t*)samples + samplecount * 2; int32_t *src = (int32_t*)samples + samplecount; int cnt = samplecount; while(cnt--){ *--dst = *--src; *--dst = *src; } samplecount *= 2; }else if(s->stereo){ float *dst = (float*)samples + samplecount * 2; float *src = (float*)samples + samplecount; int cnt = samplecount; while(cnt--){ *--dst = *--src; *--dst = *src; } samplecount *= 2; } } *data_size = samplecount * bpp; return s->samples_left > 0 ? 0 : buf_size; } AVCodec wavpack_decoder = { "wavpack", AVMEDIA_TYPE_AUDIO, CODEC_ID_WAVPACK, sizeof(WavpackContext), wavpack_decode_init, NULL, NULL, wavpack_decode_frame, .capabilities = CODEC_CAP_SUBFRAMES, .long_name = NULL_IF_CONFIG_SMALL("WavPack"), };
123linslouis-android-video-cutter
jni/libavcodec/wavpack.c
C
asf20
35,305
/* * Generate a header file for hardcoded mpegaudiodec tables * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #include "mpegaudio_tablegen.h" #include "tableprint.h" int main(void) { mpegaudio_tableinit(); write_fileheader(); printf("static const int8_t table_4_3_exp[TABLE_4_3_SIZE] = {\n"); write_int8_array(table_4_3_exp, TABLE_4_3_SIZE); printf("};\n"); printf("static const uint32_t table_4_3_value[TABLE_4_3_SIZE] = {\n"); write_uint32_array(table_4_3_value, TABLE_4_3_SIZE); printf("};\n"); printf("static const uint32_t exp_table[512] = {\n"); write_uint32_array(exp_table, 512); printf("};\n"); printf("static const uint32_t expval_table[512][16] = {\n"); write_uint32_2d_array(expval_table, 512, 16); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/mpegaudio_tablegen.c
C
asf20
1,647
/* * RoQ audio encoder * * Copyright (c) 2005 Eric Lasota * Based on RoQ specs (c)2001 Tim Ferguson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intmath.h" #include "avcodec.h" #include "bytestream.h" #define ROQ_FIRST_FRAME_SIZE (735*8) #define ROQ_FRAME_SIZE 735 #define MAX_DPCM (127*127) typedef struct { short lastSample[2]; } ROQDPCMContext; static av_cold int roq_dpcm_encode_init(AVCodecContext *avctx) { ROQDPCMContext *context = avctx->priv_data; if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, "Audio must be mono or stereo\n"); return -1; } if (avctx->sample_rate != 22050) { av_log(avctx, AV_LOG_ERROR, "Audio must be 22050 Hz\n"); return -1; } if (avctx->sample_fmt != SAMPLE_FMT_S16) { av_log(avctx, AV_LOG_ERROR, "Audio must be signed 16-bit\n"); return -1; } avctx->frame_size = ROQ_FIRST_FRAME_SIZE; context->lastSample[0] = context->lastSample[1] = 0; avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; } static unsigned char dpcm_predict(short *previous, short current) { int diff; int negative; int result; int predicted; diff = current - *previous; negative = diff<0; diff = FFABS(diff); if (diff >= MAX_DPCM) result = 127; else { result = ff_sqrt(diff); result += diff > result*result+result; } /* See if this overflows */ retry: diff = result*result; if (negative) diff = -diff; predicted = *previous + diff; /* If it overflows, back off a step */ if (predicted > 32767 || predicted < -32768) { result--; goto retry; } /* Add the sign bit */ result |= negative << 7; //if (negative) result |= 128; *previous = predicted; return result; } static int roq_dpcm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { int i, samples, stereo, ch; short *in; unsigned char *out; ROQDPCMContext *context = avctx->priv_data; stereo = (avctx->channels == 2); if (stereo) { context->lastSample[0] &= 0xFF00; context->lastSample[1] &= 0xFF00; } out = frame; in = data; bytestream_put_byte(&out, stereo ? 0x21 : 0x20); bytestream_put_byte(&out, 0x10); bytestream_put_le32(&out, avctx->frame_size*avctx->channels); if (stereo) { bytestream_put_byte(&out, (context->lastSample[1])>>8); bytestream_put_byte(&out, (context->lastSample[0])>>8); } else bytestream_put_le16(&out, context->lastSample[0]); /* Write the actual samples */ samples = avctx->frame_size; for (i=0; i<samples; i++) for (ch=0; ch<avctx->channels; ch++) *out++ = dpcm_predict(&context->lastSample[ch], *in++); /* Use smaller frames from now on */ avctx->frame_size = ROQ_FRAME_SIZE; /* Return the result size */ return out - frame; } static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); return 0; } AVCodec roq_dpcm_encoder = { "roq_dpcm", AVMEDIA_TYPE_AUDIO, CODEC_ID_ROQ_DPCM, sizeof(ROQDPCMContext), roq_dpcm_encode_init, roq_dpcm_encode_frame, roq_dpcm_encode_close, NULL, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"), };
123linslouis-android-video-cutter
jni/libavcodec/roqaudioenc.c
C
asf20
4,238
/* * AC-3 tables * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * tables taken directly from the AC-3 spec. */ #include "avcodec.h" #include "ac3tab.h" /** * Possible frame sizes. * from ATSC A/52 Table 5.18 Frame Size Code Table. */ const uint16_t ff_ac3_frame_size_tab[38][3] = { { 64, 69, 96 }, { 64, 70, 96 }, { 80, 87, 120 }, { 80, 88, 120 }, { 96, 104, 144 }, { 96, 105, 144 }, { 112, 121, 168 }, { 112, 122, 168 }, { 128, 139, 192 }, { 128, 140, 192 }, { 160, 174, 240 }, { 160, 175, 240 }, { 192, 208, 288 }, { 192, 209, 288 }, { 224, 243, 336 }, { 224, 244, 336 }, { 256, 278, 384 }, { 256, 279, 384 }, { 320, 348, 480 }, { 320, 349, 480 }, { 384, 417, 576 }, { 384, 418, 576 }, { 448, 487, 672 }, { 448, 488, 672 }, { 512, 557, 768 }, { 512, 558, 768 }, { 640, 696, 960 }, { 640, 697, 960 }, { 768, 835, 1152 }, { 768, 836, 1152 }, { 896, 975, 1344 }, { 896, 976, 1344 }, { 1024, 1114, 1536 }, { 1024, 1115, 1536 }, { 1152, 1253, 1728 }, { 1152, 1254, 1728 }, { 1280, 1393, 1920 }, { 1280, 1394, 1920 }, }; /** * Maps audio coding mode (acmod) to number of full-bandwidth channels. * from ATSC A/52 Table 5.8 Audio Coding Mode */ const uint8_t ff_ac3_channels_tab[8] = { 2, 1, 2, 3, 3, 4, 4, 5 }; /** * Maps audio coding mode (acmod) to channel layout mask. */ const uint16_t ff_ac3_channel_layout_tab[8] = { CH_LAYOUT_STEREO, CH_LAYOUT_MONO, CH_LAYOUT_STEREO, CH_LAYOUT_SURROUND, CH_LAYOUT_2_1, CH_LAYOUT_4POINT0, CH_LAYOUT_2_2, CH_LAYOUT_5POINT0 }; #define COMMON_CHANNEL_MAP \ { { 0, 1, }, { 0, 1, 2, } },\ { { 0, }, { 0, 1, } },\ { { 0, 1, }, { 0, 1, 2, } },\ { { 0, 2, 1, }, { 0, 2, 1, 3, } },\ { { 0, 1, 2, }, { 0, 1, 3, 2, } },\ { { 0, 2, 1, 3, }, { 0, 2, 1, 4, 3, } }, /** * Table to remap channels from SMPTE order to AC-3 order. * [channel_mode][lfe][ch] */ const uint8_t ff_ac3_enc_channel_map[8][2][6] = { COMMON_CHANNEL_MAP { { 0, 1, 2, 3, }, { 0, 1, 3, 4, 2, } }, { { 0, 2, 1, 3, 4, }, { 0, 2, 1, 4, 5, 3 } }, }; /** * Table to remap channels from from AC-3 order to SMPTE order. * [channel_mode][lfe][ch] */ const uint8_t ff_ac3_dec_channel_map[8][2][6] = { COMMON_CHANNEL_MAP { { 0, 1, 2, 3, }, { 0, 1, 4, 2, 3, } }, { { 0, 2, 1, 3, 4, }, { 0, 2, 1, 5, 3, 4 } }, }; /* possible frequencies */ const uint16_t ff_ac3_sample_rate_tab[3] = { 48000, 44100, 32000 }; /* possible bitrates */ const uint16_t ff_ac3_bitrate_tab[19] = { 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640 }; /* AC-3 MDCT window */ /* MDCT window */ const int16_t ff_ac3_window[256] = { 4, 7, 12, 16, 21, 28, 34, 42, 51, 61, 72, 84, 97, 111, 127, 145, 164, 184, 207, 231, 257, 285, 315, 347, 382, 419, 458, 500, 544, 591, 641, 694, 750, 810, 872, 937, 1007, 1079, 1155, 1235, 1318, 1406, 1497, 1593, 1692, 1796, 1903, 2016, 2132, 2253, 2379, 2509, 2644, 2783, 2927, 3076, 3230, 3389, 3552, 3721, 3894, 4072, 4255, 4444, 4637, 4835, 5038, 5246, 5459, 5677, 5899, 6127, 6359, 6596, 6837, 7083, 7334, 7589, 7848, 8112, 8380, 8652, 8927, 9207, 9491, 9778,10069,10363, 10660,10960,11264,11570,11879,12190,12504,12820, 13138,13458,13780,14103,14427,14753,15079,15407, 15735,16063,16392,16720,17049,17377,17705,18032, 18358,18683,19007,19330,19651,19970,20287,20602, 20914,21225,21532,21837,22139,22438,22733,23025, 23314,23599,23880,24157,24430,24699,24964,25225, 25481,25732,25979,26221,26459,26691,26919,27142, 27359,27572,27780,27983,28180,28373,28560,28742, 28919,29091,29258,29420,29577,29729,29876,30018, 30155,30288,30415,30538,30657,30771,30880,30985, 31086,31182,31274,31363,31447,31528,31605,31678, 31747,31814,31877,31936,31993,32046,32097,32145, 32190,32232,32272,32310,32345,32378,32409,32438, 32465,32490,32513,32535,32556,32574,32592,32608, 32623,32636,32649,32661,32671,32681,32690,32698, 32705,32712,32718,32724,32729,32733,32737,32741, 32744,32747,32750,32752,32754,32756,32757,32759, 32760,32761,32762,32763,32764,32764,32765,32765, 32766,32766,32766,32766,32767,32767,32767,32767, 32767,32767,32767,32767,32767,32767,32767,32767, 32767,32767,32767,32767,32767,32767,32767,32767, }; const uint8_t ff_ac3_log_add_tab[260]= { 0x40,0x3f,0x3e,0x3d,0x3c,0x3b,0x3a,0x39,0x38,0x37, 0x36,0x35,0x34,0x34,0x33,0x32,0x31,0x30,0x2f,0x2f, 0x2e,0x2d,0x2c,0x2c,0x2b,0x2a,0x29,0x29,0x28,0x27, 0x26,0x26,0x25,0x24,0x24,0x23,0x23,0x22,0x21,0x21, 0x20,0x20,0x1f,0x1e,0x1e,0x1d,0x1d,0x1c,0x1c,0x1b, 0x1b,0x1a,0x1a,0x19,0x19,0x18,0x18,0x17,0x17,0x16, 0x16,0x15,0x15,0x15,0x14,0x14,0x13,0x13,0x13,0x12, 0x12,0x12,0x11,0x11,0x11,0x10,0x10,0x10,0x0f,0x0f, 0x0f,0x0e,0x0e,0x0e,0x0d,0x0d,0x0d,0x0d,0x0c,0x0c, 0x0c,0x0c,0x0b,0x0b,0x0b,0x0b,0x0a,0x0a,0x0a,0x0a, 0x0a,0x09,0x09,0x09,0x09,0x09,0x08,0x08,0x08,0x08, 0x08,0x08,0x07,0x07,0x07,0x07,0x07,0x07,0x06,0x06, 0x06,0x06,0x06,0x06,0x06,0x06,0x05,0x05,0x05,0x05, 0x05,0x05,0x05,0x05,0x04,0x04,0x04,0x04,0x04,0x04, 0x04,0x04,0x04,0x04,0x04,0x03,0x03,0x03,0x03,0x03, 0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x02, 0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02, 0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x01,0x01, 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, }; const uint16_t ff_ac3_hearing_threshold_tab[50][3]= { { 0x04d0,0x04f0,0x0580 }, { 0x04d0,0x04f0,0x0580 }, { 0x0440,0x0460,0x04b0 }, { 0x0400,0x0410,0x0450 }, { 0x03e0,0x03e0,0x0420 }, { 0x03c0,0x03d0,0x03f0 }, { 0x03b0,0x03c0,0x03e0 }, { 0x03b0,0x03b0,0x03d0 }, { 0x03a0,0x03b0,0x03c0 }, { 0x03a0,0x03a0,0x03b0 }, { 0x03a0,0x03a0,0x03b0 }, { 0x03a0,0x03a0,0x03b0 }, { 0x03a0,0x03a0,0x03a0 }, { 0x0390,0x03a0,0x03a0 }, { 0x0390,0x0390,0x03a0 }, { 0x0390,0x0390,0x03a0 }, { 0x0380,0x0390,0x03a0 }, { 0x0380,0x0380,0x03a0 }, { 0x0370,0x0380,0x03a0 }, { 0x0370,0x0380,0x03a0 }, { 0x0360,0x0370,0x0390 }, { 0x0360,0x0370,0x0390 }, { 0x0350,0x0360,0x0390 }, { 0x0350,0x0360,0x0390 }, { 0x0340,0x0350,0x0380 }, { 0x0340,0x0350,0x0380 }, { 0x0330,0x0340,0x0380 }, { 0x0320,0x0340,0x0370 }, { 0x0310,0x0320,0x0360 }, { 0x0300,0x0310,0x0350 }, { 0x02f0,0x0300,0x0340 }, { 0x02f0,0x02f0,0x0330 }, { 0x02f0,0x02f0,0x0320 }, { 0x02f0,0x02f0,0x0310 }, { 0x0300,0x02f0,0x0300 }, { 0x0310,0x0300,0x02f0 }, { 0x0340,0x0320,0x02f0 }, { 0x0390,0x0350,0x02f0 }, { 0x03e0,0x0390,0x0300 }, { 0x0420,0x03e0,0x0310 }, { 0x0460,0x0420,0x0330 }, { 0x0490,0x0450,0x0350 }, { 0x04a0,0x04a0,0x03c0 }, { 0x0460,0x0490,0x0410 }, { 0x0440,0x0460,0x0470 }, { 0x0440,0x0440,0x04a0 }, { 0x0520,0x0480,0x0460 }, { 0x0800,0x0630,0x0440 }, { 0x0840,0x0840,0x0450 }, { 0x0840,0x0840,0x04e0 }, }; const uint8_t ff_ac3_bap_tab[64]= { 0, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, }; const uint8_t ff_ac3_slow_decay_tab[4]={ 0x0f, 0x11, 0x13, 0x15, }; const uint8_t ff_ac3_fast_decay_tab[4]={ 0x3f, 0x53, 0x67, 0x7b, }; const uint16_t ff_ac3_slow_gain_tab[4]= { 0x540, 0x4d8, 0x478, 0x410, }; const uint16_t ff_ac3_db_per_bit_tab[4]= { 0x000, 0x700, 0x900, 0xb00, }; const int16_t ff_ac3_floor_tab[8]= { 0x2f0, 0x2b0, 0x270, 0x230, 0x1f0, 0x170, 0x0f0, 0xf800, }; const uint16_t ff_ac3_fast_gain_tab[8]= { 0x080, 0x100, 0x180, 0x200, 0x280, 0x300, 0x380, 0x400, }; const uint8_t ff_ac3_critical_band_size_tab[50]={ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 12, 12, 12, 12, 24, 24, 24, 24, 24 }; /** * Default channel map for a dependent substream defined by acmod */ const uint16_t ff_eac3_default_chmap[8] = { AC3_CHMAP_L | AC3_CHMAP_R, // FIXME Ch1+Ch2 AC3_CHMAP_C, AC3_CHMAP_L | AC3_CHMAP_R, AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R, AC3_CHMAP_L | AC3_CHMAP_R | AC3_CHMAP_C_SUR, AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R | AC3_CHMAP_C_SUR, AC3_CHMAP_L | AC3_CHMAP_R | AC3_CHMAP_L_SUR | AC3_CHMAP_R_SUR, AC3_CHMAP_L | AC3_CHMAP_C | AC3_CHMAP_R | AC3_CHMAP_L_SUR | AC3_CHMAP_R_SUR };
123linslouis-android-video-cutter
jni/libavcodec/ac3tab.c
C
asf20
9,748
/* * Bethesda VID video decoder * Copyright (C) 2007 Nicholas Tung * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_BETHSOFTVIDEO_H #define AVCODEC_BETHSOFTVIDEO_H enum BethsoftVidBlockType { PALETTE_BLOCK = 0x02, FIRST_AUDIO_BLOCK = 0x7c, AUDIO_BLOCK = 0x7d, VIDEO_I_FRAME = 0x03, VIDEO_P_FRAME = 0x01, VIDEO_YOFF_P_FRAME = 0x04, EOF_BLOCK = 0x14, }; #endif /* AVCODEC_BETHSOFTVIDEO_H */
123linslouis-android-video-cutter
jni/libavcodec/bethsoftvideo.h
C
asf20
1,191
/* * Copyright (C) 2007 FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_XIPH_H #define AVCODEC_XIPH_H #include "libavutil/common.h" /** * Splits a single extradata buffer into the three headers that most * Xiph codecs use. (e.g. Theora and Vorbis) * Works both with Matroska's packing and lavc's packing. * * @param[in] extradata The single chunk that combines all three headers * @param[in] extradata_size The size of the extradata buffer * @param[in] first_header_size The size of the first header, used to * differentiate between the Matroska packing and lavc packing. * @param[out] header_start Pointers to the start of the three separate headers. * @param[out] header_len The sizes of each of the three headers. * @return On error a negative value is returned, on success zero. */ int ff_split_xiph_headers(uint8_t *extradata, int extradata_size, int first_header_size, uint8_t *header_start[3], int header_len[3]); #endif /* AVCODEC_XIPH_H */
123linslouis-android-video-cutter
jni/libavcodec/xiph.h
C
asf20
1,765
/* * lossless JPEG encoder * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2003 Alex Beregszaszi * Copyright (c) 2003-2004 Michael Niedermayer * * Support for external huffman table, various fixes (AVID workaround), * aspecting, new decode_frame mechanism and apple mjpeg-b support * by Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * lossless JPEG encoder. */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "mjpeg.h" #include "mjpegenc.h" static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ MpegEncContext * const s = avctx->priv_data; MJpegContext * const m = s->mjpeg_ctx; AVFrame *pict = data; const int width= s->width; const int height= s->height; AVFrame * const p= (AVFrame*)&s->current_picture; const int predictor= avctx->prediction_method+1; init_put_bits(&s->pb, buf, buf_size); *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; ff_mjpeg_encode_picture_header(s); s->header_bits= put_bits_count(&s->pb); if(avctx->pix_fmt == PIX_FMT_BGRA){ int x, y, i; const int linesize= p->linesize[0]; uint16_t (*buffer)[4]= (void *) s->rd_scratchpad; int left[3], top[3], topleft[3]; for(i=0; i<3; i++){ buffer[0][i]= 1 << (9 - 1); } for(y = 0; y < height; y++) { const int modified_predictor= y ? predictor : 1; uint8_t *ptr = p->data[0] + (linesize * y); if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } for(i=0; i<3; i++){ top[i]= left[i]= topleft[i]= buffer[0][i]; } for(x = 0; x < width; x++) { buffer[x][1] = ptr[4*x+0] - ptr[4*x+1] + 0x100; buffer[x][2] = ptr[4*x+2] - ptr[4*x+1] + 0x100; buffer[x][0] = (ptr[4*x+0] + 2*ptr[4*x+1] + ptr[4*x+2])>>2; for(i=0;i<3;i++) { int pred, diff; PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); topleft[i]= top[i]; top[i]= buffer[x+1][i]; left[i]= buffer[x][i]; diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100; if(i==0) ff_mjpeg_encode_dc(s, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(s, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); } } } }else{ int mb_x, mb_y, i; const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; for(mb_y = 0; mb_y < mb_height; mb_y++) { if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } for(mb_x = 0; mb_x < mb_width; mb_x++) { if(mb_x==0 || mb_y==0){ for(i=0;i<3;i++) { uint8_t *ptr; int x, y, h, v, linesize; h = s->mjpeg_hsample[i]; v = s->mjpeg_vsample[i]; linesize= p->linesize[i]; for(y=0; y<v; y++){ for(x=0; x<h; x++){ int pred; ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap if(y==0 && mb_y==0){ if(x==0 && mb_x==0){ pred= 128; }else{ pred= ptr[-1]; } }else{ if(x==0 && mb_x==0){ pred= ptr[-linesize]; }else{ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); } } if(i==0) ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); } } } }else{ for(i=0;i<3;i++) { uint8_t *ptr; int x, y, h, v, linesize; h = s->mjpeg_hsample[i]; v = s->mjpeg_vsample[i]; linesize= p->linesize[i]; for(y=0; y<v; y++){ for(x=0; x<h; x++){ int pred; ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap //printf("%d %d %d %d %8X\n", mb_x, mb_y, x, y, ptr); PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); if(i==0) ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); } } } } } } } emms_c(); ff_mjpeg_encode_picture_trailer(s); s->picture_number++; flush_put_bits(&s->pb); return put_bits_ptr(&s->pb) - s->pb.buf; // return (put_bits_count(&f->pb)+7)/8; } AVCodec ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need them "ljpeg", AVMEDIA_TYPE_VIDEO, CODEC_ID_LJPEG, sizeof(MpegEncContext), MPV_encode_init, encode_picture_lossless, MPV_encode_end, .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"), };
123linslouis-android-video-cutter
jni/libavcodec/ljpegenc.c
C
asf20
7,475
/* * PGS subtitle decoder * Copyright (c) 2009 Stephen Backway * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * PGS subtitle decoder */ #include "avcodec.h" #include "dsputil.h" #include "colorspace.h" #include "bytestream.h" //#define DEBUG_PACKET_CONTENTS #define RGBA(r,g,b,a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b)) enum SegmentType { PALETTE_SEGMENT = 0x14, PICTURE_SEGMENT = 0x15, PRESENTATION_SEGMENT = 0x16, WINDOW_SEGMENT = 0x17, DISPLAY_SEGMENT = 0x80, }; typedef struct PGSSubPresentation { int x; int y; int video_w; int video_h; int id_number; } PGSSubPresentation; typedef struct PGSSubPicture { int w; int h; uint8_t *rle; unsigned int rle_buffer_size, rle_data_len; } PGSSubPicture; typedef struct PGSSubContext { PGSSubPresentation presentation; uint32_t clut[256]; PGSSubPicture picture; } PGSSubContext; static av_cold int init_decoder(AVCodecContext *avctx) { avctx->pix_fmt = PIX_FMT_RGB32; return 0; } static av_cold int close_decoder(AVCodecContext *avctx) { PGSSubContext *ctx = avctx->priv_data; av_freep(&ctx->picture.rle); ctx->picture.rle_buffer_size = 0; return 0; } /** * Decodes the RLE data. * * The subtitle is stored as an Run Length Encoded image. * * @param avctx contains the current codec context * @param sub pointer to the processed subtitle data * @param buf pointer to the RLE data to process * @param buf_size size of the RLE data to process */ static int decode_rle(AVCodecContext *avctx, AVSubtitle *sub, const uint8_t *buf, unsigned int buf_size) { const uint8_t *rle_bitmap_end; int pixel_count, line_count; rle_bitmap_end = buf + buf_size; sub->rects[0]->pict.data[0] = av_malloc(sub->rects[0]->w * sub->rects[0]->h); if (!sub->rects[0]->pict.data[0]) return -1; pixel_count = 0; line_count = 0; while (buf < rle_bitmap_end && line_count < sub->rects[0]->h) { uint8_t flags, color; int run; color = bytestream_get_byte(&buf); run = 1; if (color == 0x00) { flags = bytestream_get_byte(&buf); run = flags & 0x3f; if (flags & 0x40) run = (run << 8) + bytestream_get_byte(&buf); color = flags & 0x80 ? bytestream_get_byte(&buf) : 0; } if (run > 0 && pixel_count + run <= sub->rects[0]->w * sub->rects[0]->h) { memset(sub->rects[0]->pict.data[0] + pixel_count, color, run); pixel_count += run; } else if (!run) { /* * New Line. Check if correct pixels decoded, if not display warning * and adjust bitmap pointer to correct new line position. */ if (pixel_count % sub->rects[0]->w > 0) av_log(avctx, AV_LOG_ERROR, "Decoded %d pixels, when line should be %d pixels\n", pixel_count % sub->rects[0]->w, sub->rects[0]->w); line_count++; } } dprintf(avctx, "Pixel Count = %d, Area = %d\n", pixel_count, sub->rects[0]->w * sub->rects[0]->h); return 0; } /** * Parses the picture segment packet. * * The picture segment contains details on the sequence id, * width, height and Run Length Encoded (RLE) bitmap data. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Enable support for RLE data over multiple packets */ static int parse_picture_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; uint8_t sequence_desc; unsigned int rle_bitmap_len, width, height; /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */ buf += 3; /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */ sequence_desc = bytestream_get_byte(&buf); if (!(sequence_desc & 0x80)) { av_log(avctx, AV_LOG_ERROR, "Decoder does not support object data over multiple packets.\n"); return -1; } /* Decode rle bitmap length */ rle_bitmap_len = bytestream_get_be24(&buf); /* Check to ensure we have enough data for rle_bitmap_length if just a single packet */ if (rle_bitmap_len > buf_size - 7) { av_log(avctx, AV_LOG_ERROR, "Not enough RLE data for specified length of %d.\n", rle_bitmap_len); return -1; } ctx->picture.rle_data_len = rle_bitmap_len; /* Get bitmap dimensions from data */ width = bytestream_get_be16(&buf); height = bytestream_get_be16(&buf); /* Make sure the bitmap is not too large */ if (ctx->presentation.video_w < width || ctx->presentation.video_h < height) { av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n"); return -1; } ctx->picture.w = width; ctx->picture.h = height; av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len); if (!ctx->picture.rle) return -1; memcpy(ctx->picture.rle, buf, rle_bitmap_len); return 0; } /** * Parses the palette segment packet. * * The palette segment contains details of the palette, * a maximum of 256 colors can be defined. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process */ static void parse_palette_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; const uint8_t *buf_end = buf + buf_size; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int color_id; int y, cb, cr, alpha; int r, g, b, r_add, g_add, b_add; /* Skip two null bytes */ buf += 2; while (buf < buf_end) { color_id = bytestream_get_byte(&buf); y = bytestream_get_byte(&buf); cb = bytestream_get_byte(&buf); cr = bytestream_get_byte(&buf); alpha = bytestream_get_byte(&buf); YUV_TO_RGB1(cb, cr); YUV_TO_RGB2(r, g, b, y); dprintf(avctx, "Color %d := (%d,%d,%d,%d)\n", color_id, r, g, b, alpha); /* Store color in palette */ ctx->clut[color_id] = RGBA(r,g,b,alpha); } } /** * Parses the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping * @todo TODO: Implement forcing of subtitles * @todo TODO: Blanking of subtitle */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int x, y; uint8_t block; ctx->presentation.video_w = bytestream_get_be16(&buf); ctx->presentation.video_h = bytestream_get_be16(&buf); dprintf(avctx, "Video Dimensions %dx%d\n", ctx->presentation.video_w, ctx->presentation.video_h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* Next byte is the state. */ block = bytestream_get_byte(&buf);; if (block == 0x80) { /* * Skip 7 bytes of unknown: * palette_update_flag (0x80), * palette_id_to_use, * Object Number (if > 0 determines if more data to process), * object_id_ref (2 bytes), * window_id_ref, * composition_flag (0x80 - object cropped, 0x40 - object forced) */ buf += 7; x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ dprintf(avctx, "Subtitle Placement x=%d, y=%d\n", x, y); if (x > ctx->presentation.video_w || y > ctx->presentation.video_h) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", x, y, ctx->presentation.video_w, ctx->presentation.video_h); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; } else if (block == 0x00) { /* TODO: Blank context as subtitle should not be displayed. * If the subtitle is blanked now the subtitle is not * on screen long enough to read, due to a delay in * initial display timing. */ } } /** * Parses the display segment packet. * * The display segment controls the updating of the display. * * @param avctx contains the current codec context * @param data pointer to the data pertaining the subtitle to display * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Fix start time, relies on correct PTS, currently too late * * @todo TODO: Fix end time, normally cleared by a second display * @todo segment, which is currently ignored as it clears * @todo the subtitle too early. */ static int display_end_segment(AVCodecContext *avctx, void *data, const uint8_t *buf, int buf_size) { AVSubtitle *sub = data; PGSSubContext *ctx = avctx->priv_data; /* * The end display time is a timeout value and is only reached * if the next subtitle is later then timeout or subtitle has * not been cleared by a subsequent empty display command. */ memset(sub, 0, sizeof(*sub)); sub->start_display_time = 0; sub->end_display_time = 20000; sub->format = 0; sub->rects = av_mallocz(sizeof(*sub->rects)); sub->rects[0] = av_mallocz(sizeof(*sub->rects[0])); sub->num_rects = 1; sub->rects[0]->x = ctx->presentation.x; sub->rects[0]->y = ctx->presentation.y; sub->rects[0]->w = ctx->picture.w; sub->rects[0]->h = ctx->picture.h; sub->rects[0]->type = SUBTITLE_BITMAP; /* Process bitmap */ sub->rects[0]->pict.linesize[0] = ctx->picture.w; if (ctx->picture.rle) if(decode_rle(avctx, sub, ctx->picture.rle, ctx->picture.rle_data_len) < 0) return 0; /* Allocate memory for colors */ sub->rects[0]->nb_colors = 256; sub->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE); memcpy(sub->rects[0]->pict.data[1], ctx->clut, sub->rects[0]->nb_colors * sizeof(uint32_t)); return 1; } static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end; uint8_t segment_type; int segment_length; #ifdef DEBUG_PACKET_CONTENTS int i; av_log(avctx, AV_LOG_INFO, "PGS sub packet:\n"); for (i = 0; i < buf_size; i++) { av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]); if (i % 16 == 15) av_log(avctx, AV_LOG_INFO, "\n"); } if (i & 15) av_log(avctx, AV_LOG_INFO, "\n"); #endif *data_size = 0; /* Ensure that we have received at a least a segment code and segment length */ if (buf_size < 3) return -1; buf_end = buf + buf_size; /* Step through buffer to identify segments */ while (buf < buf_end) { segment_type = bytestream_get_byte(&buf); segment_length = bytestream_get_be16(&buf); dprintf(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type); if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf) break; switch (segment_type) { case PALETTE_SEGMENT: parse_palette_segment(avctx, buf, segment_length); break; case PICTURE_SEGMENT: parse_picture_segment(avctx, buf, segment_length); break; case PRESENTATION_SEGMENT: parse_presentation_segment(avctx, buf, segment_length); break; case WINDOW_SEGMENT: /* * Window Segment Structure (No new information provided): * 2 bytes: Unkown, * 2 bytes: X position of subtitle, * 2 bytes: Y position of subtitle, * 2 bytes: Width of subtitle, * 2 bytes: Height of subtitle. */ break; case DISPLAY_SEGMENT: *data_size = display_end_segment(avctx, data, buf, segment_length); break; default: av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n", segment_type, segment_length); break; } buf += segment_length; } return buf_size; } AVCodec pgssub_decoder = { "pgssub", AVMEDIA_TYPE_SUBTITLE, CODEC_ID_HDMV_PGS_SUBTITLE, sizeof(PGSSubContext), init_decoder, NULL, close_decoder, decode, .long_name = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"), };
123linslouis-android-video-cutter
jni/libavcodec/pgssubdec.c
C
asf20
14,194
/* * Rate control for video encoders * * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Rate control for video encoders. */ #include "libavutil/intmath.h" #include "avcodec.h" #include "dsputil.h" #include "ratecontrol.h" #include "mpegvideo.h" #include "eval.h" #undef NDEBUG // Always check asserts, the speed effect is far too small to disable them. #include <assert.h> #ifndef M_E #define M_E 2.718281828 #endif static int init_pass2(MpegEncContext *s); static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num); void ff_write_pass1_stats(MpegEncContext *s){ snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n", s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type, s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits); } static inline double qp2bits(RateControlEntry *rce, double qp){ if(qp<=0.0){ av_log(NULL, AV_LOG_ERROR, "qp<=0.0\n"); } return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ qp; } static inline double bits2qp(RateControlEntry *rce, double bits){ if(bits<0.9){ av_log(NULL, AV_LOG_ERROR, "bits<0.9\n"); } return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ bits; } int ff_rate_control_init(MpegEncContext *s) { RateControlContext *rcc= &s->rc_context; int i; const char *error = NULL; static const char * const const_names[]={ "PI", "E", "iTex", "pTex", "tex", "mv", "fCode", "iCount", "mcVar", "var", "isI", "isP", "isB", "avgQP", "qComp", /* "lastIQP", "lastPQP", "lastBQP", "nextNonBQP",*/ "avgIITex", "avgPITex", "avgPPTex", "avgBPTex", "avgTex", NULL }; static double (* const func1[])(void *, double)={ (void *)bits2qp, (void *)qp2bits, NULL }; static const char * const func1_names[]={ "bits2qp", "qp2bits", NULL }; emms_c(); rcc->rc_eq_eval = ff_parse_expr(s->avctx->rc_eq ? s->avctx->rc_eq : "tex^qComp", const_names, func1, func1_names, NULL, NULL, &error); if (!rcc->rc_eq_eval) { av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\": %s\n", s->avctx->rc_eq, error? error : ""); return -1; } for(i=0; i<5; i++){ rcc->pred[i].coeff= FF_QP2LAMBDA * 7.0; rcc->pred[i].count= 1.0; rcc->pred[i].decay= 0.4; rcc->i_cplx_sum [i]= rcc->p_cplx_sum [i]= rcc->mv_bits_sum[i]= rcc->qscale_sum [i]= rcc->frame_count[i]= 1; // 1 is better because of 1/0 and such rcc->last_qscale_for[i]=FF_QP2LAMBDA * 5; } rcc->buffer_index= s->avctx->rc_initial_buffer_occupancy; if(s->flags&CODEC_FLAG_PASS2){ int i; char *p; /* find number of pics */ p= s->avctx->stats_in; for(i=-1; p; i++){ p= strchr(p+1, ';'); } i+= s->max_b_frames; if(i<=0 || i>=INT_MAX / sizeof(RateControlEntry)) return -1; rcc->entry = av_mallocz(i*sizeof(RateControlEntry)); rcc->num_entries= i; /* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */ for(i=0; i<rcc->num_entries; i++){ RateControlEntry *rce= &rcc->entry[i]; rce->pict_type= rce->new_pict_type=FF_P_TYPE; rce->qscale= rce->new_qscale=FF_QP2LAMBDA * 2; rce->misc_bits= s->mb_num + 10; rce->mb_var_sum= s->mb_num*100; } /* read stats */ p= s->avctx->stats_in; for(i=0; i<rcc->num_entries - s->max_b_frames; i++){ RateControlEntry *rce; int picture_number; int e; char *next; next= strchr(p, ';'); if(next){ (*next)=0; //sscanf in unbelievably slow on looong strings //FIXME copy / do not write next++; } e= sscanf(p, " in:%d ", &picture_number); assert(picture_number >= 0); assert(picture_number < rcc->num_entries); rce= &rcc->entry[picture_number]; e+=sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d", &rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits, &rce->mv_bits, &rce->misc_bits, &rce->f_code, &rce->b_code, &rce->mc_mb_var_sum, &rce->mb_var_sum, &rce->i_count, &rce->skip_count, &rce->header_bits); if(e!=14){ av_log(s->avctx, AV_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e); return -1; } p= next; } if(init_pass2(s) < 0) return -1; //FIXME maybe move to end if((s->flags&CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID) { #if CONFIG_LIBXVID return ff_xvid_rate_control_init(s); #else av_log(s->avctx, AV_LOG_ERROR, "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n"); return -1; #endif } } if(!(s->flags&CODEC_FLAG_PASS2)){ rcc->short_term_qsum=0.001; rcc->short_term_qcount=0.001; rcc->pass1_rc_eq_output_sum= 0.001; rcc->pass1_wanted_bits=0.001; if(s->avctx->qblur > 1.0){ av_log(s->avctx, AV_LOG_ERROR, "qblur too large\n"); return -1; } /* init stuff with the user specified complexity */ if(s->avctx->rc_initial_cplx){ for(i=0; i<60*30; i++){ double bits= s->avctx->rc_initial_cplx * (i/10000.0 + 1.0)*s->mb_num; RateControlEntry rce; if (i%((s->gop_size+3)/4)==0) rce.pict_type= FF_I_TYPE; else if(i%(s->max_b_frames+1)) rce.pict_type= FF_B_TYPE; else rce.pict_type= FF_P_TYPE; rce.new_pict_type= rce.pict_type; rce.mc_mb_var_sum= bits*s->mb_num/100000; rce.mb_var_sum = s->mb_num; rce.qscale = FF_QP2LAMBDA * 2; rce.f_code = 2; rce.b_code = 1; rce.misc_bits= 1; if(s->pict_type== FF_I_TYPE){ rce.i_count = s->mb_num; rce.i_tex_bits= bits; rce.p_tex_bits= 0; rce.mv_bits= 0; }else{ rce.i_count = 0; //FIXME we do know this approx rce.i_tex_bits= 0; rce.p_tex_bits= bits*0.9; rce.mv_bits= bits*0.1; } rcc->i_cplx_sum [rce.pict_type] += rce.i_tex_bits*rce.qscale; rcc->p_cplx_sum [rce.pict_type] += rce.p_tex_bits*rce.qscale; rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits; rcc->frame_count[rce.pict_type] ++; get_qscale(s, &rce, rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum, i); rcc->pass1_wanted_bits+= s->bit_rate/(1/av_q2d(s->avctx->time_base)); //FIXME misbehaves a little for variable fps } } } return 0; } void ff_rate_control_uninit(MpegEncContext *s) { RateControlContext *rcc= &s->rc_context; emms_c(); ff_free_expr(rcc->rc_eq_eval); av_freep(&rcc->entry); #if CONFIG_LIBXVID if((s->flags&CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID) ff_xvid_rate_control_uninit(s); #endif } int ff_vbv_update(MpegEncContext *s, int frame_size){ RateControlContext *rcc= &s->rc_context; const double fps= 1/av_q2d(s->avctx->time_base); const int buffer_size= s->avctx->rc_buffer_size; const double min_rate= s->avctx->rc_min_rate/fps; const double max_rate= s->avctx->rc_max_rate/fps; //printf("%d %f %d %f %f\n", buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate); if(buffer_size){ int left; rcc->buffer_index-= frame_size; if(rcc->buffer_index < 0){ av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n"); rcc->buffer_index= 0; } left= buffer_size - rcc->buffer_index - 1; rcc->buffer_index += av_clip(left, min_rate, max_rate); if(rcc->buffer_index > buffer_size){ int stuffing= ceil((rcc->buffer_index - buffer_size)/8); if(stuffing < 4 && s->codec_id == CODEC_ID_MPEG4) stuffing=4; rcc->buffer_index -= 8*stuffing; if(s->avctx->debug & FF_DEBUG_RC) av_log(s->avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing); return stuffing; } } return 0; } /** * modifies the bitrate curve from pass1 for one frame */ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){ RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; double q, bits; const int pict_type= rce->new_pict_type; const double mb_num= s->mb_num; int i; double const_values[]={ M_PI, M_E, rce->i_tex_bits*rce->qscale, rce->p_tex_bits*rce->qscale, (rce->i_tex_bits + rce->p_tex_bits)*(double)rce->qscale, rce->mv_bits/mb_num, rce->pict_type == FF_B_TYPE ? (rce->f_code + rce->b_code)*0.5 : rce->f_code, rce->i_count/mb_num, rce->mc_mb_var_sum/mb_num, rce->mb_var_sum/mb_num, rce->pict_type == FF_I_TYPE, rce->pict_type == FF_P_TYPE, rce->pict_type == FF_B_TYPE, rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type], a->qcompress, /* rcc->last_qscale_for[FF_I_TYPE], rcc->last_qscale_for[FF_P_TYPE], rcc->last_qscale_for[FF_B_TYPE], rcc->next_non_b_qscale,*/ rcc->i_cplx_sum[FF_I_TYPE] / (double)rcc->frame_count[FF_I_TYPE], rcc->i_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE], rcc->p_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE], rcc->p_cplx_sum[FF_B_TYPE] / (double)rcc->frame_count[FF_B_TYPE], (rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type], 0 }; bits= ff_eval_expr(rcc->rc_eq_eval, const_values, rce); if (isnan(bits)) { av_log(s->avctx, AV_LOG_ERROR, "Error evaluating rc_eq \"%s\"\n", s->avctx->rc_eq); return -1; } rcc->pass1_rc_eq_output_sum+= bits; bits*=rate_factor; if(bits<0.0) bits=0.0; bits+= 1.0; //avoid 1/0 issues /* user override */ for(i=0; i<s->avctx->rc_override_count; i++){ RcOverride *rco= s->avctx->rc_override; if(rco[i].start_frame > frame_num) continue; if(rco[i].end_frame < frame_num) continue; if(rco[i].qscale) bits= qp2bits(rce, rco[i].qscale); //FIXME move at end to really force it? else bits*= rco[i].quality_factor; } q= bits2qp(rce, bits); /* I/B difference */ if (pict_type==FF_I_TYPE && s->avctx->i_quant_factor<0.0) q= -q*s->avctx->i_quant_factor + s->avctx->i_quant_offset; else if(pict_type==FF_B_TYPE && s->avctx->b_quant_factor<0.0) q= -q*s->avctx->b_quant_factor + s->avctx->b_quant_offset; if(q<1) q=1; return q; } static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, double q){ RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; const int pict_type= rce->new_pict_type; const double last_p_q = rcc->last_qscale_for[FF_P_TYPE]; const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type]; if (pict_type==FF_I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==FF_P_TYPE)) q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset; else if(pict_type==FF_B_TYPE && a->b_quant_factor>0.0) q= last_non_b_q* a->b_quant_factor + a->b_quant_offset; if(q<1) q=1; /* last qscale / qdiff stuff */ if(rcc->last_non_b_pict_type==pict_type || pict_type!=FF_I_TYPE){ double last_q= rcc->last_qscale_for[pict_type]; const int maxdiff= FF_QP2LAMBDA * a->max_qdiff; if (q > last_q + maxdiff) q= last_q + maxdiff; else if(q < last_q - maxdiff) q= last_q - maxdiff; } rcc->last_qscale_for[pict_type]= q; //Note we cannot do that after blurring if(pict_type!=FF_B_TYPE) rcc->last_non_b_pict_type= pict_type; return q; } /** * gets the qmin & qmax for pict_type */ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type){ int qmin= s->avctx->lmin; int qmax= s->avctx->lmax; assert(qmin <= qmax); if(pict_type==FF_B_TYPE){ qmin= (int)(qmin*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5); qmax= (int)(qmax*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5); }else if(pict_type==FF_I_TYPE){ qmin= (int)(qmin*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); } qmin= av_clip(qmin, 1, FF_LAMBDA_MAX); qmax= av_clip(qmax, 1, FF_LAMBDA_MAX); if(qmax<qmin) qmax= qmin; *qmin_ret= qmin; *qmax_ret= qmax; } static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, int frame_num){ RateControlContext *rcc= &s->rc_context; int qmin, qmax; const int pict_type= rce->new_pict_type; const double buffer_size= s->avctx->rc_buffer_size; const double fps= 1/av_q2d(s->avctx->time_base); const double min_rate= s->avctx->rc_min_rate / fps; const double max_rate= s->avctx->rc_max_rate / fps; get_qminmax(&qmin, &qmax, s, pict_type); /* modulation */ if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==FF_P_TYPE) q*= s->avctx->rc_qmod_amp; //printf("q:%f\n", q); /* buffer overflow/underflow protection */ if(buffer_size){ double expected_size= rcc->buffer_index; double q_limit; if(min_rate){ double d= 2*(buffer_size - expected_size)/buffer_size; if(d>1.0) d=1.0; else if(d<0.0001) d=0.0001; q*= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); q_limit= bits2qp(rce, FFMAX((min_rate - buffer_size + rcc->buffer_index) * s->avctx->rc_min_vbv_overflow_use, 1)); if(q > q_limit){ if(s->avctx->debug&FF_DEBUG_RC){ av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f\n", q, q_limit); } q= q_limit; } } if(max_rate){ double d= 2*expected_size/buffer_size; if(d>1.0) d=1.0; else if(d<0.0001) d=0.0001; q/= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); q_limit= bits2qp(rce, FFMAX(rcc->buffer_index * s->avctx->rc_max_available_vbv_use, 1)); if(q < q_limit){ if(s->avctx->debug&FF_DEBUG_RC){ av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f\n", q, q_limit); } q= q_limit; } } } //printf("q:%f max:%f min:%f size:%f index:%d bits:%f agr:%f\n", q,max_rate, min_rate, buffer_size, rcc->buffer_index, bits, s->avctx->rc_buffer_aggressivity); if(s->avctx->rc_qsquish==0.0 || qmin==qmax){ if (q<qmin) q=qmin; else if(q>qmax) q=qmax; }else{ double min2= log(qmin); double max2= log(qmax); q= log(q); q= (q - min2)/(max2-min2) - 0.5; q*= -4.0; q= 1.0/(1.0 + exp(q)); q= q*(max2-min2) + min2; q= exp(q); } return q; } //---------------------------------- // 1 Pass Code static double predict_size(Predictor *p, double q, double var) { return p->coeff*var / (q*p->count); } /* static double predict_qp(Predictor *p, double size, double var) { //printf("coeff:%f, count:%f, var:%f, size:%f//\n", p->coeff, p->count, var, size); return p->coeff*var / (size*p->count); } */ static void update_predictor(Predictor *p, double q, double var, double size) { double new_coeff= size*q / (var + 1); if(var<10) return; p->count*= p->decay; p->coeff*= p->decay; p->count++; p->coeff+= new_coeff; } static void adaptive_quantization(MpegEncContext *s, double q){ int i; const float lumi_masking= s->avctx->lumi_masking / (128.0*128.0); const float dark_masking= s->avctx->dark_masking / (128.0*128.0); const float temp_cplx_masking= s->avctx->temporal_cplx_masking; const float spatial_cplx_masking = s->avctx->spatial_cplx_masking; const float p_masking = s->avctx->p_masking; const float border_masking = s->avctx->border_masking; float bits_sum= 0.0; float cplx_sum= 0.0; float cplx_tab[s->mb_num]; float bits_tab[s->mb_num]; const int qmin= s->avctx->mb_lmin; const int qmax= s->avctx->mb_lmax; Picture * const pic= &s->current_picture; const int mb_width = s->mb_width; const int mb_height = s->mb_height; for(i=0; i<s->mb_num; i++){ const int mb_xy= s->mb_index2xy[i]; float temp_cplx= sqrt(pic->mc_mb_var[mb_xy]); //FIXME merge in pow() float spat_cplx= sqrt(pic->mb_var[mb_xy]); const int lumi= pic->mb_mean[mb_xy]; float bits, cplx, factor; int mb_x = mb_xy % s->mb_stride; int mb_y = mb_xy / s->mb_stride; int mb_distance; float mb_factor = 0.0; #if 0 if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune if(temp_cplx < q/3) temp_cplx= q/3; //FIXME finetune #endif if(spat_cplx < 4) spat_cplx= 4; //FIXME finetune if(temp_cplx < 4) temp_cplx= 4; //FIXME finetune if((s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTRA)){//FIXME hq mode cplx= spat_cplx; factor= 1.0 + p_masking; }else{ cplx= temp_cplx; factor= pow(temp_cplx, - temp_cplx_masking); } factor*=pow(spat_cplx, - spatial_cplx_masking); if(lumi>127) factor*= (1.0 - (lumi-128)*(lumi-128)*lumi_masking); else factor*= (1.0 - (lumi-128)*(lumi-128)*dark_masking); if(mb_x < mb_width/5){ mb_distance = mb_width/5 - mb_x; mb_factor = (float)mb_distance / (float)(mb_width/5); }else if(mb_x > 4*mb_width/5){ mb_distance = mb_x - 4*mb_width/5; mb_factor = (float)mb_distance / (float)(mb_width/5); } if(mb_y < mb_height/5){ mb_distance = mb_height/5 - mb_y; mb_factor = FFMAX(mb_factor, (float)mb_distance / (float)(mb_height/5)); }else if(mb_y > 4*mb_height/5){ mb_distance = mb_y - 4*mb_height/5; mb_factor = FFMAX(mb_factor, (float)mb_distance / (float)(mb_height/5)); } factor*= 1.0 - border_masking*mb_factor; if(factor<0.00001) factor= 0.00001; bits= cplx*factor; cplx_sum+= cplx; bits_sum+= bits; cplx_tab[i]= cplx; bits_tab[i]= bits; } /* handle qmin/qmax clipping */ if(s->flags&CODEC_FLAG_NORMALIZE_AQP){ float factor= bits_sum/cplx_sum; for(i=0; i<s->mb_num; i++){ float newq= q*cplx_tab[i]/bits_tab[i]; newq*= factor; if (newq > qmax){ bits_sum -= bits_tab[i]; cplx_sum -= cplx_tab[i]*q/qmax; } else if(newq < qmin){ bits_sum -= bits_tab[i]; cplx_sum -= cplx_tab[i]*q/qmin; } } if(bits_sum < 0.001) bits_sum= 0.001; if(cplx_sum < 0.001) cplx_sum= 0.001; } for(i=0; i<s->mb_num; i++){ const int mb_xy= s->mb_index2xy[i]; float newq= q*cplx_tab[i]/bits_tab[i]; int intq; if(s->flags&CODEC_FLAG_NORMALIZE_AQP){ newq*= bits_sum/cplx_sum; } intq= (int)(newq + 0.5); if (intq > qmax) intq= qmax; else if(intq < qmin) intq= qmin; //if(i%s->mb_width==0) printf("\n"); //printf("%2d%3d ", intq, ff_sqrt(s->mc_mb_var[i])); s->lambda_table[mb_xy]= intq; } } void ff_get_2pass_fcode(MpegEncContext *s){ RateControlContext *rcc= &s->rc_context; int picture_number= s->picture_number; RateControlEntry *rce; rce= &rcc->entry[picture_number]; s->f_code= rce->f_code; s->b_code= rce->b_code; } //FIXME rd or at least approx for dquant float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run) { float q; int qmin, qmax; float br_compensation; double diff; double short_term_q; double fps; int picture_number= s->picture_number; int64_t wanted_bits; RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; RateControlEntry local_rce, *rce; double bits; double rate_factor; int var; const int pict_type= s->pict_type; Picture * const pic= &s->current_picture; emms_c(); #if CONFIG_LIBXVID if((s->flags&CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID) return ff_xvid_rate_estimate_qscale(s, dry_run); #endif get_qminmax(&qmin, &qmax, s, pict_type); fps= 1/av_q2d(s->avctx->time_base); //printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate); /* update predictors */ if(picture_number>2 && !dry_run){ const int last_var= s->last_pict_type == FF_I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum; update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits); } if(s->flags&CODEC_FLAG_PASS2){ assert(picture_number>=0); assert(picture_number<rcc->num_entries); rce= &rcc->entry[picture_number]; wanted_bits= rce->expected_bits; }else{ Picture *dts_pic; rce= &local_rce; //FIXME add a dts field to AVFrame and ensure its set and use it here instead of reordering //but the reordering is simpler for now until h.264 b pyramid must be handeld if(s->pict_type == FF_B_TYPE || s->low_delay) dts_pic= s->current_picture_ptr; else dts_pic= s->last_picture_ptr; //if(dts_pic) // av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number); if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE) wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps); else wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps); } diff= s->total_bits - wanted_bits; br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance; if(br_compensation<=0.0) br_compensation=0.001; var= pict_type == FF_I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum; short_term_q = 0; /* avoid warning */ if(s->flags&CODEC_FLAG_PASS2){ if(pict_type!=FF_I_TYPE) assert(pict_type == rce->new_pict_type); q= rce->new_qscale / br_compensation; //printf("%f %f %f last:%d var:%d type:%d//\n", q, rce->new_qscale, br_compensation, s->frame_bits, var, pict_type); }else{ rce->pict_type= rce->new_pict_type= pict_type; rce->mc_mb_var_sum= pic->mc_mb_var_sum; rce->mb_var_sum = pic-> mb_var_sum; rce->qscale = FF_QP2LAMBDA * 2; rce->f_code = s->f_code; rce->b_code = s->b_code; rce->misc_bits= 1; bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var)); if(pict_type== FF_I_TYPE){ rce->i_count = s->mb_num; rce->i_tex_bits= bits; rce->p_tex_bits= 0; rce->mv_bits= 0; }else{ rce->i_count = 0; //FIXME we do know this approx rce->i_tex_bits= 0; rce->p_tex_bits= bits*0.9; rce->mv_bits= bits*0.1; } rcc->i_cplx_sum [pict_type] += rce->i_tex_bits*rce->qscale; rcc->p_cplx_sum [pict_type] += rce->p_tex_bits*rce->qscale; rcc->mv_bits_sum[pict_type] += rce->mv_bits; rcc->frame_count[pict_type] ++; bits= rce->i_tex_bits + rce->p_tex_bits; rate_factor= rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum * br_compensation; q= get_qscale(s, rce, rate_factor, picture_number); if (q < 0) return -1; assert(q>0.0); //printf("%f ", q); q= get_diff_limited_q(s, rce, q); //printf("%f ", q); assert(q>0.0); if(pict_type==FF_P_TYPE || s->intra_only){ //FIXME type dependent blur like in 2-pass rcc->short_term_qsum*=a->qblur; rcc->short_term_qcount*=a->qblur; rcc->short_term_qsum+= q; rcc->short_term_qcount++; //printf("%f ", q); q= short_term_q= rcc->short_term_qsum/rcc->short_term_qcount; //printf("%f ", q); } assert(q>0.0); q= modify_qscale(s, rce, q, picture_number); rcc->pass1_wanted_bits+= s->bit_rate/fps; assert(q>0.0); } if(s->avctx->debug&FF_DEBUG_RC){ av_log(s->avctx, AV_LOG_DEBUG, "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f size:%d var:%d/%d br:%d fps:%d\n", av_get_pict_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000, br_compensation, short_term_q, s->frame_bits, pic->mb_var_sum, pic->mc_mb_var_sum, s->bit_rate/1000, (int)fps ); } if (q<qmin) q=qmin; else if(q>qmax) q=qmax; if(s->adaptive_quant) adaptive_quantization(s, q); else q= (int)(q + 0.5); if(!dry_run){ rcc->last_qscale= q; rcc->last_mc_mb_var_sum= pic->mc_mb_var_sum; rcc->last_mb_var_sum= pic->mb_var_sum; } #if 0 { static int mvsum=0, texsum=0; mvsum += s->mv_bits; texsum += s->i_tex_bits + s->p_tex_bits; printf("%d %d//\n\n", mvsum, texsum); } #endif return q; } //---------------------------------------------- // 2-Pass code static int init_pass2(MpegEncContext *s) { RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; int i, toobig; double fps= 1/av_q2d(s->avctx->time_base); double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1 uint64_t const_bits[5]={0,0,0,0,0}; // quantizer independent bits uint64_t all_const_bits; uint64_t all_available_bits= (uint64_t)(s->bit_rate*(double)rcc->num_entries/fps); double rate_factor=0; double step; //int last_i_frame=-10000000; const int filter_size= (int)(a->qblur*4) | 1; double expected_bits; double *qscale, *blurred_qscale, qscale_sum; /* find complexity & const_bits & decide the pict_types */ for(i=0; i<rcc->num_entries; i++){ RateControlEntry *rce= &rcc->entry[i]; rce->new_pict_type= rce->pict_type; rcc->i_cplx_sum [rce->pict_type] += rce->i_tex_bits*rce->qscale; rcc->p_cplx_sum [rce->pict_type] += rce->p_tex_bits*rce->qscale; rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits; rcc->frame_count[rce->pict_type] ++; complexity[rce->new_pict_type]+= (rce->i_tex_bits+ rce->p_tex_bits)*(double)rce->qscale; const_bits[rce->new_pict_type]+= rce->mv_bits + rce->misc_bits; } all_const_bits= const_bits[FF_I_TYPE] + const_bits[FF_P_TYPE] + const_bits[FF_B_TYPE]; if(all_available_bits < all_const_bits){ av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n"); return -1; } qscale= av_malloc(sizeof(double)*rcc->num_entries); blurred_qscale= av_malloc(sizeof(double)*rcc->num_entries); toobig = 0; for(step=256*256; step>0.0000001; step*=0.5){ expected_bits=0; rate_factor+= step; rcc->buffer_index= s->avctx->rc_buffer_size/2; /* find qscale */ for(i=0; i<rcc->num_entries; i++){ qscale[i]= get_qscale(s, &rcc->entry[i], rate_factor, i); } assert(filter_size%2==1); /* fixed I/B QP relative to P mode */ for(i=rcc->num_entries-1; i>=0; i--){ RateControlEntry *rce= &rcc->entry[i]; qscale[i]= get_diff_limited_q(s, rce, qscale[i]); } /* smooth curve */ for(i=0; i<rcc->num_entries; i++){ RateControlEntry *rce= &rcc->entry[i]; const int pict_type= rce->new_pict_type; int j; double q=0.0, sum=0.0; for(j=0; j<filter_size; j++){ int index= i+j-filter_size/2; double d= index-i; double coeff= a->qblur==0 ? 1.0 : exp(-d*d/(a->qblur * a->qblur)); if(index < 0 || index >= rcc->num_entries) continue; if(pict_type != rcc->entry[index].new_pict_type) continue; q+= qscale[index] * coeff; sum+= coeff; } blurred_qscale[i]= q/sum; } /* find expected bits */ for(i=0; i<rcc->num_entries; i++){ RateControlEntry *rce= &rcc->entry[i]; double bits; rce->new_qscale= modify_qscale(s, rce, blurred_qscale[i], i); bits= qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits; //printf("%d %f\n", rce->new_bits, blurred_qscale[i]); bits += 8*ff_vbv_update(s, bits); rce->expected_bits= expected_bits; expected_bits += bits; } /* av_log(s->avctx, AV_LOG_INFO, "expected_bits: %f all_available_bits: %d rate_factor: %f\n", expected_bits, (int)all_available_bits, rate_factor); */ if(expected_bits > all_available_bits) { rate_factor-= step; ++toobig; } } av_free(qscale); av_free(blurred_qscale); /* check bitrate calculations and print info */ qscale_sum = 0.0; for(i=0; i<rcc->num_entries; i++){ /* av_log(s->avctx, AV_LOG_DEBUG, "[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\n", i, rcc->entry[i].new_qscale, rcc->entry[i].new_qscale / FF_QP2LAMBDA); */ qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax); } assert(toobig <= 40); av_log(s->avctx, AV_LOG_DEBUG, "[lavc rc] requested bitrate: %d bps expected bitrate: %d bps\n", s->bit_rate, (int)(expected_bits / ((double)all_available_bits/s->bit_rate))); av_log(s->avctx, AV_LOG_DEBUG, "[lavc rc] estimated target average qp: %.3f\n", (float)qscale_sum / rcc->num_entries); if (toobig == 0) { av_log(s->avctx, AV_LOG_INFO, "[lavc rc] Using all of requested bitrate is not " "necessary for this video with these parameters.\n"); } else if (toobig == 40) { av_log(s->avctx, AV_LOG_ERROR, "[lavc rc] Error: bitrate too low for this video " "with these parameters.\n"); return -1; } else if (fabs(expected_bits/all_available_bits - 1.0) > 0.01) { av_log(s->avctx, AV_LOG_ERROR, "[lavc rc] Error: 2pass curve failed to converge\n"); return -1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/ratecontrol.c
C
asf20
32,786
/* * Micrsoft RLE Video Decoder * Copyright (C) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MS RLE Video Decoder by Mike Melanson (melanson@pcisys.net) * For more information about the MS RLE format, visit: * http://www.pcisys.net/~melanson/codecs/ * * The MS RLE decoder outputs PAL8 colorspace data. * * Note that this decoder expects the palette colors from the end of the * BITMAPINFO header passed through palctrl. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "avcodec.h" #include "dsputil.h" #include "msrledec.h" typedef struct MsrleContext { AVCodecContext *avctx; AVFrame frame; const unsigned char *buf; int size; } MsrleContext; static av_cold int msrle_decode_init(AVCodecContext *avctx) { MsrleContext *s = avctx->priv_data; s->avctx = avctx; switch (avctx->bits_per_coded_sample) { case 4: case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; default: av_log(avctx, AV_LOG_ERROR, "unsupported bits per sample\n"); return -1; } s->frame.data[0] = NULL; return 0; } static int msrle_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MsrleContext *s = avctx->priv_data; int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8; s->buf = buf; s->size = buf_size; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame)) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } if (s->avctx->palctrl) { /* make the palette available */ memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); if (s->avctx->palctrl->palette_changed) { s->frame.palette_has_changed = 1; s->avctx->palctrl->palette_changed = 0; } } /* FIXME how to correctly detect RLE ??? */ if (avctx->height * istride == avpkt->size) { /* assume uncompressed */ int linesize = avctx->width * avctx->bits_per_coded_sample / 8; uint8_t *ptr = s->frame.data[0]; uint8_t *buf = avpkt->data + (avctx->height-1)*istride; int i, j; for (i = 0; i < avctx->height; i++) { if (avctx->bits_per_coded_sample == 4) { for (j = 0; j < avctx->width - 1; j += 2) { ptr[j+0] = buf[j>>1] >> 4; ptr[j+1] = buf[j>>1] & 0xF; } if (avctx->width & 1) ptr[j+0] = buf[j>>1] >> 4; } else { memcpy(ptr, buf, linesize); } buf -= istride; ptr += s->frame.linesize[0]; } } else { ff_msrle_decode(avctx, (AVPicture*)&s->frame, avctx->bits_per_coded_sample, buf, buf_size); } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; /* report that the buffer was completely consumed */ return buf_size; } static av_cold int msrle_decode_end(AVCodecContext *avctx) { MsrleContext *s = avctx->priv_data; /* release the last frame */ if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec msrle_decoder = { "msrle", AVMEDIA_TYPE_VIDEO, CODEC_ID_MSRLE, sizeof(MsrleContext), msrle_decode_init, NULL, msrle_decode_end, msrle_decode_frame, CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Microsoft RLE"), };
123linslouis-android-video-cutter
jni/libavcodec/msrle.c
C
asf20
4,505
/* * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_CAVSDATA_H #define AVCODEC_CAVSDATA_H #include "cavs.h" const uint8_t ff_cavs_partition_flags[30] = { 0, //I_8X8 0, //P_SKIP 0, //P_16X16 SPLITH, //P_16X8 SPLITV, //P_8X16 SPLITH|SPLITV, //P_8X8 SPLITH|SPLITV, //B_SKIP SPLITH|SPLITV, //B_DIRECT 0, //B_FWD_16X16 0, //B_BWD_16X16 0, //B_SYM_16X16 FWD0|FWD1 |SPLITH, FWD0|FWD1 |SPLITV, BWD0|BWD1 |SPLITH, BWD0|BWD1 |SPLITV, FWD0|BWD1 |SPLITH, FWD0|BWD1 |SPLITV, BWD0|FWD1 |SPLITH, BWD0|FWD1 |SPLITV, FWD0|FWD1 |SYM1|SPLITH, FWD0|FWD1 |SYM1 |SPLITV, BWD0|FWD1 |SYM1|SPLITH, BWD0|FWD1 |SYM1 |SPLITV, FWD0|FWD1|SYM0 |SPLITH, FWD0|FWD1|SYM0 |SPLITV, FWD0|BWD1|SYM0 |SPLITH, FWD0|BWD1|SYM0 |SPLITV, FWD0|FWD1|SYM0|SYM1|SPLITH, FWD0|FWD1|SYM0|SYM1 |SPLITV, SPLITH|SPLITV, //B_8X8 = 29 }; const uint8_t ff_cavs_scan3x3[4] = {4,5,7,8}; const uint8_t ff_cavs_chroma_qp[64] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,42,43,43,44,44, 45,45,46,46,47,47,48,48,48,49,49,49,50,50,50,51 }; const uint8_t ff_cavs_dequant_shift[64] = { 14,14,14,14,14,14,14,14, 13,13,13,13,13,13,13,13, 13,12,12,12,12,12,12,12, 11,11,11,11,11,11,11,11, 11,10,10,10,10,10,10,10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7 }; const uint16_t ff_cavs_dequant_mul[64] = { 32768,36061,38968,42495,46341,50535,55437,60424, 32932,35734,38968,42495,46177,50535,55109,59933, 65535,35734,38968,42577,46341,50617,55027,60097, 32809,35734,38968,42454,46382,50576,55109,60056, 65535,35734,38968,42495,46320,50515,55109,60076, 65535,35744,38968,42495,46341,50535,55099,60087, 65535,35734,38973,42500,46341,50535,55109,60097, 32771,35734,38965,42497,46341,50535,55109,60099 }; /** marks block as unavailable, i.e. out of picture or not yet decoded */ const cavs_vector ff_cavs_un_mv = {0,0,1,NOT_AVAIL}; /** marks block as "no prediction from this direction" e.g. forward motion vector in BWD partition */ const cavs_vector ff_cavs_dir_mv = {0,0,1,REF_DIR}; /** marks block as using intra prediction */ const cavs_vector ff_cavs_intra_mv = {0,0,1,REF_INTRA}; #define EOB 0,0,0 const struct dec_2dvlc ff_cavs_intra_dec[7] = { { { //level / run / table_inc { 1, 1, 1},{ -1, 1, 1},{ 1, 2, 1},{ -1, 2, 1},{ 1, 3, 1},{ -1, 3, 1}, { 1, 4, 1},{ -1, 4, 1},{ 1, 5, 1},{ -1, 5, 1},{ 1, 6, 1},{ -1, 6, 1}, { 1, 7, 1},{ -1, 7, 1},{ 1, 8, 1},{ -1, 8, 1},{ 1, 9, 1},{ -1, 9, 1}, { 1,10, 1},{ -1,10, 1},{ 1,11, 1},{ -1,11, 1},{ 2, 1, 2},{ -2, 1, 2}, { 1,12, 1},{ -1,12, 1},{ 1,13, 1},{ -1,13, 1},{ 1,14, 1},{ -1,14, 1}, { 1,15, 1},{ -1,15, 1},{ 2, 2, 2},{ -2, 2, 2},{ 1,16, 1},{ -1,16, 1}, { 1,17, 1},{ -1,17, 1},{ 3, 1, 3},{ -3, 1, 3},{ 1,18, 1},{ -1,18, 1}, { 1,19, 1},{ -1,19, 1},{ 2, 3, 2},{ -2, 3, 2},{ 1,20, 1},{ -1,20, 1}, { 1,21, 1},{ -1,21, 1},{ 2, 4, 2},{ -2, 4, 2},{ 1,22, 1},{ -1,22, 1}, { 2, 5, 2},{ -2, 5, 2},{ 1,23, 1},{ -1,23, 1},{ EOB } }, //level_add { 0, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,-1,-1,-1}, 2, //golomb_order 0, //inc_limit 23, //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 2, 1, 1},{ -2, 1, 1}, { 1, 3, 0},{ -1, 3, 0},{ EOB },{ 1, 4, 0},{ -1, 4, 0},{ 1, 5, 0}, { -1, 5, 0},{ 1, 6, 0},{ -1, 6, 0},{ 3, 1, 2},{ -3, 1, 2},{ 2, 2, 1}, { -2, 2, 1},{ 1, 7, 0},{ -1, 7, 0},{ 1, 8, 0},{ -1, 8, 0},{ 1, 9, 0}, { -1, 9, 0},{ 2, 3, 1},{ -2, 3, 1},{ 4, 1, 2},{ -4, 1, 2},{ 1,10, 0}, { -1,10, 0},{ 1,11, 0},{ -1,11, 0},{ 2, 4, 1},{ -2, 4, 1},{ 3, 2, 2}, { -3, 2, 2},{ 1,12, 0},{ -1,12, 0},{ 2, 5, 1},{ -2, 5, 1},{ 5, 1, 3}, { -5, 1, 3},{ 1,13, 0},{ -1,13, 0},{ 2, 6, 1},{ -2, 6, 1},{ 1,14, 0}, { -1,14, 0},{ 2, 7, 1},{ -2, 7, 1},{ 2, 8, 1},{ -2, 8, 1},{ 3, 3, 2}, { -3, 3, 2},{ 6, 1, 3},{ -6, 1, 3},{ 1,15, 0},{ -1,15, 0} }, //level_add { 0, 7, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 1, //inc_limit 15, //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 1, 2, 0},{ -1, 2, 0}, { 3, 1, 1},{ -3, 1, 1},{ EOB },{ 1, 3, 0},{ -1, 3, 0},{ 2, 2, 0}, { -2, 2, 0},{ 4, 1, 1},{ -4, 1, 1},{ 1, 4, 0},{ -1, 4, 0},{ 5, 1, 2}, { -5, 1, 2},{ 1, 5, 0},{ -1, 5, 0},{ 3, 2, 1},{ -3, 2, 1},{ 2, 3, 0}, { -2, 3, 0},{ 1, 6, 0},{ -1, 6, 0},{ 6, 1, 2},{ -6, 1, 2},{ 2, 4, 0}, { -2, 4, 0},{ 1, 7, 0},{ -1, 7, 0},{ 4, 2, 1},{ -4, 2, 1},{ 7, 1, 2}, { -7, 1, 2},{ 3, 3, 1},{ -3, 3, 1},{ 2, 5, 0},{ -2, 5, 0},{ 1, 8, 0}, { -1, 8, 0},{ 2, 6, 0},{ -2, 6, 0},{ 8, 1, 3},{ -8, 1, 3},{ 1, 9, 0}, { -1, 9, 0},{ 5, 2, 2},{ -5, 2, 2},{ 3, 4, 1},{ -3, 4, 1},{ 2, 7, 0}, { -2, 7, 0},{ 9, 1, 3},{ -9, 1, 3},{ 1,10, 0},{ -1,10, 0} }, //level_add { 0,10, 6, 4, 4, 3, 3, 3, 2, 2, 2,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 2, //inc_limit 10, //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0},{ -3, 1, 0}, { 1, 2, 0},{ -1, 2, 0},{ EOB },{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 1}, { -5, 1, 1},{ 2, 2, 0},{ -2, 2, 0},{ 1, 3, 0},{ -1, 3, 0},{ 6, 1, 1}, { -6, 1, 1},{ 3, 2, 0},{ -3, 2, 0},{ 7, 1, 1},{ -7, 1, 1},{ 1, 4, 0}, { -1, 4, 0},{ 8, 1, 2},{ -8, 1, 2},{ 2, 3, 0},{ -2, 3, 0},{ 4, 2, 0}, { -4, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 9, 1, 2},{ -9, 1, 2},{ 5, 2, 1}, { -5, 2, 1},{ 2, 4, 0},{ -2, 4, 0},{ 10, 1, 2},{-10, 1, 2},{ 3, 3, 0}, { -3, 3, 0},{ 1, 6, 0},{ -1, 6, 0},{ 11, 1, 3},{-11, 1, 3},{ 6, 2, 1}, { -6, 2, 1},{ 1, 7, 0},{ -1, 7, 0},{ 2, 5, 0},{ -2, 5, 0},{ 3, 4, 0}, { -3, 4, 0},{ 12, 1, 3},{-12, 1, 3},{ 4, 3, 0},{ -4, 3, 0} }, //level_add { 0,13, 7, 5, 4, 3, 2, 2,-1,-1,-1 -1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 4, //inc_limit 7, //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0},{ -3, 1, 0}, { EOB },{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0},{ -5, 1, 0},{ 6, 1, 0}, { -6, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 7, 1, 0},{ -7, 1, 0},{ 8, 1, 1}, { -8, 1, 1},{ 2, 2, 0},{ -2, 2, 0},{ 9, 1, 1},{ -9, 1, 1},{ 10, 1, 1}, {-10, 1, 1},{ 1, 3, 0},{ -1, 3, 0},{ 3, 2, 0},{ -3, 2, 0},{ 11, 1, 2}, {-11, 1, 2},{ 4, 2, 0},{ -4, 2, 0},{ 12, 1, 2},{-12, 1, 2},{ 13, 1, 2}, {-13, 1, 2},{ 5, 2, 0},{ -5, 2, 0},{ 1, 4, 0},{ -1, 4, 0},{ 2, 3, 0}, { -2, 3, 0},{ 14, 1, 2},{-14, 1, 2},{ 6, 2, 0},{ -6, 2, 0},{ 15, 1, 2}, {-15, 1, 2},{ 16, 1, 2},{-16, 1, 2},{ 3, 3, 0},{ -3, 3, 0},{ 1, 5, 0}, { -1, 5, 0},{ 7, 2, 0},{ -7, 2, 0},{ 17, 1, 2},{-17, 1, 2} }, //level_add { 0,18, 8, 4, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 7, //inc_limit 5, //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0},{ -5, 1, 0},{ 6, 1, 0}, { -6, 1, 0},{ 7, 1, 0},{ -7, 1, 0},{ 8, 1, 0},{ -8, 1, 0},{ 9, 1, 0}, { -9, 1, 0},{ 10, 1, 0},{-10, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 11, 1, 1}, {-11, 1, 1},{ 12, 1, 1},{-12, 1, 1},{ 13, 1, 1},{-13, 1, 1},{ 2, 2, 0}, { -2, 2, 0},{ 14, 1, 1},{-14, 1, 1},{ 15, 1, 1},{-15, 1, 1},{ 3, 2, 0}, { -3, 2, 0},{ 16, 1, 1},{-16, 1, 1},{ 1, 3, 0},{ -1, 3, 0},{ 17, 1, 1}, {-17, 1, 1},{ 4, 2, 0},{ -4, 2, 0},{ 18, 1, 1},{-18, 1, 1},{ 5, 2, 0}, { -5, 2, 0},{ 19, 1, 1},{-19, 1, 1},{ 20, 1, 1},{-20, 1, 1},{ 6, 2, 0}, { -6, 2, 0},{ 21, 1, 1},{-21, 1, 1},{ 2, 3, 0},{ -2, 3, 0} }, //level_add { 0,22, 7, 3,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 10, //inc_limit 3, //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0},{ -5, 1, 0},{ 6, 1, 0}, { -6, 1, 0},{ 7, 1, 0},{ -7, 1, 0},{ 8, 1, 0},{ -8, 1, 0},{ 9, 1, 0}, { -9, 1, 0},{ 10, 1, 0},{-10, 1, 0},{ 11, 1, 0},{-11, 1, 0},{ 12, 1, 0}, {-12, 1, 0},{ 13, 1, 0},{-13, 1, 0},{ 14, 1, 0},{-14, 1, 0},{ 15, 1, 0}, {-15, 1, 0},{ 16, 1, 0},{-16, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 17, 1, 0}, {-17, 1, 0},{ 18, 1, 0},{-18, 1, 0},{ 19, 1, 0},{-19, 1, 0},{ 20, 1, 0}, {-20, 1, 0},{ 21, 1, 0},{-21, 1, 0},{ 2, 2, 0},{ -2, 2, 0},{ 22, 1, 0}, {-22, 1, 0},{ 23, 1, 0},{-23, 1, 0},{ 24, 1, 0},{-24, 1, 0},{ 25, 1, 0}, {-25, 1, 0},{ 3, 2, 0},{ -3, 2, 0},{ 26, 1, 0},{-26, 1, 0} }, //level_add { 0,27, 4,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order INT_MAX, //inc_limit 2, //max_run } }; const struct dec_2dvlc ff_cavs_inter_dec[7] = { { { //level / run { 1, 1, 1},{ -1, 1, 1},{ 1, 2, 1},{ -1, 2, 1},{ 1, 3, 1},{ -1, 3, 1}, { 1, 4, 1},{ -1, 4, 1},{ 1, 5, 1},{ -1, 5, 1},{ 1, 6, 1},{ -1, 6, 1}, { 1, 7, 1},{ -1, 7, 1},{ 1, 8, 1},{ -1, 8, 1},{ 1, 9, 1},{ -1, 9, 1}, { 1,10, 1},{ -1,10, 1},{ 1,11, 1},{ -1,11, 1},{ 1,12, 1},{ -1,12, 1}, { 1,13, 1},{ -1,13, 1},{ 2, 1, 2},{ -2, 1, 2},{ 1,14, 1},{ -1,14, 1}, { 1,15, 1},{ -1,15, 1},{ 1,16, 1},{ -1,16, 1},{ 1,17, 1},{ -1,17, 1}, { 1,18, 1},{ -1,18, 1},{ 1,19, 1},{ -1,19, 1},{ 3, 1, 3},{ -3, 1, 3}, { 1,20, 1},{ -1,20, 1},{ 1,21, 1},{ -1,21, 1},{ 2, 2, 2},{ -2, 2, 2}, { 1,22, 1},{ -1,22, 1},{ 1,23, 1},{ -1,23, 1},{ 1,24, 1},{ -1,24, 1}, { 1,25, 1},{ -1,25, 1},{ 1,26, 1},{ -1,26, 1},{ EOB } }, //level_add { 0, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, 3, //golomb_order 0, //inc_limit 26 //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ EOB },{ 1, 2, 0},{ -1, 2, 0},{ 1, 3, 0}, { -1, 3, 0},{ 1, 4, 0},{ -1, 4, 0},{ 1, 5, 0},{ -1, 5, 0},{ 1, 6, 0}, { -1, 6, 0},{ 2, 1, 1},{ -2, 1, 1},{ 1, 7, 0},{ -1, 7, 0},{ 1, 8, 0}, { -1, 8, 0},{ 1, 9, 0},{ -1, 9, 0},{ 1,10, 0},{ -1,10, 0},{ 2, 2, 1}, { -2, 2, 1},{ 1,11, 0},{ -1,11, 0},{ 1,12, 0},{ -1,12, 0},{ 3, 1, 2}, { -3, 1, 2},{ 1,13, 0},{ -1,13, 0},{ 1,14, 0},{ -1,14, 0},{ 2, 3, 1}, { -2, 3, 1},{ 1,15, 0},{ -1,15, 0},{ 2, 4, 1},{ -2, 4, 1},{ 1,16, 0}, { -1,16, 0},{ 2, 5, 1},{ -2, 5, 1},{ 1,17, 0},{ -1,17, 0},{ 4, 1, 3}, { -4, 1, 3},{ 2, 6, 1},{ -2, 6, 1},{ 1,18, 0},{ -1,18, 0},{ 1,19, 0}, { -1,19, 0},{ 2, 7, 1},{ -2, 7, 1},{ 3, 2, 2},{ -3, 2, 2} }, //level_add { 0, 5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 1, //inc_limit 19 //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ EOB },{ 1, 2, 0},{ -1, 2, 0},{ 2, 1, 0}, { -2, 1, 0},{ 1, 3, 0},{ -1, 3, 0},{ 1, 4, 0},{ -1, 4, 0},{ 3, 1, 1}, { -3, 1, 1},{ 2, 2, 0},{ -2, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 1, 6, 0}, { -1, 6, 0},{ 1, 7, 0},{ -1, 7, 0},{ 2, 3, 0},{ -2, 3, 0},{ 4, 1, 2}, { -4, 1, 2},{ 1, 8, 0},{ -1, 8, 0},{ 3, 2, 1},{ -3, 2, 1},{ 2, 4, 0}, { -2, 4, 0},{ 1, 9, 0},{ -1, 9, 0},{ 1,10, 0},{ -1,10, 0},{ 5, 1, 2}, { -5, 1, 2},{ 2, 5, 0},{ -2, 5, 0},{ 1,11, 0},{ -1,11, 0},{ 2, 6, 0}, { -2, 6, 0},{ 1,12, 0},{ -1,12, 0},{ 3, 3, 1},{ -3, 3, 1},{ 6, 1, 2}, { -6, 1, 2},{ 4, 2, 2},{ -4, 2, 2},{ 1,13, 0},{ -1,13, 0},{ 2, 7, 0}, { -2, 7, 0},{ 3, 4, 1},{ -3, 4, 1},{ 1,14, 0},{ -1,14, 0} }, //level_add { 0, 7, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 2, //inc_limit 14 //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ EOB },{ 2, 1, 0},{ -2, 1, 0},{ 1, 2, 0}, { -1, 2, 0},{ 3, 1, 0},{ -3, 1, 0},{ 1, 3, 0},{ -1, 3, 0},{ 2, 2, 0}, { -2, 2, 0},{ 4, 1, 1},{ -4, 1, 1},{ 1, 4, 0},{ -1, 4, 0},{ 5, 1, 1}, { -5, 1, 1},{ 1, 5, 0},{ -1, 5, 0},{ 3, 2, 0},{ -3, 2, 0},{ 2, 3, 0}, { -2, 3, 0},{ 1, 6, 0},{ -1, 6, 0},{ 6, 1, 1},{ -6, 1, 1},{ 2, 4, 0}, { -2, 4, 0},{ 1, 7, 0},{ -1, 7, 0},{ 4, 2, 1},{ -4, 2, 1},{ 7, 1, 2}, { -7, 1, 2},{ 3, 3, 0},{ -3, 3, 0},{ 1, 8, 0},{ -1, 8, 0},{ 2, 5, 0}, { -2, 5, 0},{ 8, 1, 2},{ -8, 1, 2},{ 1, 9, 0},{ -1, 9, 0},{ 3, 4, 0}, { -3, 4, 0},{ 2, 6, 0},{ -2, 6, 0},{ 5, 2, 1},{ -5, 2, 1},{ 1,10, 0}, { -1,10, 0},{ 9, 1, 2},{ -9, 1, 2},{ 4, 3, 1},{ -4, 3, 1} }, //level_add { 0,10, 6, 5, 4, 3, 3, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 3, //inc_limit 10 //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ EOB },{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0}, { -5, 1, 0},{ 2, 2, 0},{ -2, 2, 0},{ 1, 3, 0},{ -1, 3, 0},{ 6, 1, 0}, { -6, 1, 0},{ 3, 2, 0},{ -3, 2, 0},{ 7, 1, 1},{ -7, 1, 1},{ 1, 4, 0}, { -1, 4, 0},{ 8, 1, 1},{ -8, 1, 1},{ 2, 3, 0},{ -2, 3, 0},{ 4, 2, 0}, { -4, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 9, 1, 1},{ -9, 1, 1},{ 5, 2, 0}, { -5, 2, 0},{ 2, 4, 0},{ -2, 4, 0},{ 1, 6, 0},{ -1, 6, 0},{ 10, 1, 2}, {-10, 1, 2},{ 3, 3, 0},{ -3, 3, 0},{ 11, 1, 2},{-11, 1, 2},{ 1, 7, 0}, { -1, 7, 0},{ 6, 2, 0},{ -6, 2, 0},{ 3, 4, 0},{ -3, 4, 0},{ 2, 5, 0}, { -2, 5, 0},{ 12, 1, 2},{-12, 1, 2},{ 4, 3, 0},{ -4, 3, 0} }, //level_add { 0,13, 7, 5, 4, 3, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 6, //inc_limit 7 //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0},{ -5, 1, 0},{ 1, 2, 0}, { -1, 2, 0},{ 6, 1, 0},{ -6, 1, 0},{ 7, 1, 0},{ -7, 1, 0},{ 8, 1, 0}, { -8, 1, 0},{ 2, 2, 0},{ -2, 2, 0},{ 9, 1, 0},{ -9, 1, 0},{ 1, 3, 0}, { -1, 3, 0},{ 10, 1, 1},{-10, 1, 1},{ 3, 2, 0},{ -3, 2, 0},{ 11, 1, 1}, {-11, 1, 1},{ 4, 2, 0},{ -4, 2, 0},{ 12, 1, 1},{-12, 1, 1},{ 1, 4, 0}, { -1, 4, 0},{ 2, 3, 0},{ -2, 3, 0},{ 13, 1, 1},{-13, 1, 1},{ 5, 2, 0}, { -5, 2, 0},{ 14, 1, 1},{-14, 1, 1},{ 6, 2, 0},{ -6, 2, 0},{ 1, 5, 0}, { -1, 5, 0},{ 15, 1, 1},{-15, 1, 1},{ 3, 3, 0},{ -3, 3, 0},{ 16, 1, 1}, {-16, 1, 1},{ 2, 4, 0},{ -2, 4, 0},{ 7, 2, 0},{ -7, 2, 0} }, //level_add { 0,17, 8, 4, 3, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order 9, //inc_limit 5 //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0},{ -5, 1, 0},{ 6, 1, 0}, { -6, 1, 0},{ 7, 1, 0},{ -7, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 8, 1, 0}, { -8, 1, 0},{ 9, 1, 0},{ -9, 1, 0},{ 10, 1, 0},{-10, 1, 0},{ 11, 1, 0}, {-11, 1, 0},{ 12, 1, 0},{-12, 1, 0},{ 2, 2, 0},{ -2, 2, 0},{ 13, 1, 0}, {-13, 1, 0},{ 1, 3, 0},{ -1, 3, 0},{ 14, 1, 0},{-14, 1, 0},{ 15, 1, 0}, {-15, 1, 0},{ 3, 2, 0},{ -3, 2, 0},{ 16, 1, 0},{-16, 1, 0},{ 17, 1, 0}, {-17, 1, 0},{ 18, 1, 0},{-18, 1, 0},{ 4, 2, 0},{ -4, 2, 0},{ 19, 1, 0}, {-19, 1, 0},{ 20, 1, 0},{-20, 1, 0},{ 2, 3, 0},{ -2, 3, 0},{ 1, 4, 0}, { -1, 4, 0},{ 5, 2, 0},{ -5, 2, 0},{ 21, 1, 0},{-21, 1, 0} }, //level_add { 0,22, 6, 3, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 2, //golomb_order INT_MAX, //inc_limit 4 //max_run } }; const struct dec_2dvlc ff_cavs_chroma_dec[5] = { { { //level / run { 1, 1, 1},{ -1, 1, 1},{ 1, 2, 1},{ -1, 2, 1},{ 1, 3, 1},{ -1, 3, 1}, { 1, 4, 1},{ -1, 4, 1},{ 1, 5, 1},{ -1, 5, 1},{ 1, 6, 1},{ -1, 6, 1}, { 1, 7, 1},{ -1, 7, 1},{ 2, 1, 2},{ -2, 1, 2},{ 1, 8, 1},{ -1, 8, 1}, { 1, 9, 1},{ -1, 9, 1},{ 1,10, 1},{ -1,10, 1},{ 1,11, 1},{ -1,11, 1}, { 1,12, 1},{ -1,12, 1},{ 1,13, 1},{ -1,13, 1},{ 1,14, 1},{ -1,14, 1}, { 1,15, 1},{ -1,15, 1},{ 3, 1, 3},{ -3, 1, 3},{ 1,16, 1},{ -1,16, 1}, { 1,17, 1},{ -1,17, 1},{ 1,18, 1},{ -1,18, 1},{ 1,19, 1},{ -1,19, 1}, { 1,20, 1},{ -1,20, 1},{ 1,21, 1},{ -1,21, 1},{ 1,22, 1},{ -1,22, 1}, { 2, 2, 2},{ -2, 2, 2},{ 1,23, 1},{ -1,23, 1},{ 1,24, 1},{ -1,24, 1}, { 1,25, 1},{ -1,25, 1},{ 4, 1, 3},{ -4, 1, 3},{ EOB } }, //level_add { 0, 5, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,-1}, 2, //golomb_order 0, //inc_limit 25 //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 2, 1, 1}, { -2, 1, 1},{ 1, 3, 0},{ -1, 3, 0},{ 1, 4, 0},{ -1, 4, 0},{ 1, 5, 0}, { -1, 5, 0},{ 1, 6, 0},{ -1, 6, 0},{ 3, 1, 2},{ -3, 1, 2},{ 1, 7, 0}, { -1, 7, 0},{ 1, 8, 0},{ -1, 8, 0},{ 2, 2, 1},{ -2, 2, 1},{ 1, 9, 0}, { -1, 9, 0},{ 1,10, 0},{ -1,10, 0},{ 1,11, 0},{ -1,11, 0},{ 4, 1, 2}, { -4, 1, 2},{ 1,12, 0},{ -1,12, 0},{ 1,13, 0},{ -1,13, 0},{ 1,14, 0}, { -1,14, 0},{ 2, 3, 1},{ -2, 3, 1},{ 1,15, 0},{ -1,15, 0},{ 2, 4, 1}, { -2, 4, 1},{ 5, 1, 3},{ -5, 1, 3},{ 3, 2, 2},{ -3, 2, 2},{ 1,16, 0}, { -1,16, 0},{ 1,17, 0},{ -1,17, 0},{ 1,18, 0},{ -1,18, 0},{ 2, 5, 1}, { -2, 5, 1},{ 1,19, 0},{ -1,19, 0},{ 1,20, 0},{ -1,20, 0} }, //level_add { 0, 6, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1}, 0, //golomb_order 1, //inc_limit 20 //max_run },{ { //level / run { 1, 1, 0},{ -1, 1, 0},{ EOB },{ 2, 1, 0},{ -2, 1, 0},{ 1, 2, 0}, { -1, 2, 0},{ 3, 1, 1},{ -3, 1, 1},{ 1, 3, 0},{ -1, 3, 0},{ 4, 1, 1}, { -4, 1, 1},{ 2, 2, 0},{ -2, 2, 0},{ 1, 4, 0},{ -1, 4, 0},{ 5, 1, 2}, { -5, 1, 2},{ 1, 5, 0},{ -1, 5, 0},{ 3, 2, 1},{ -3, 2, 1},{ 2, 3, 0}, { -2, 3, 0},{ 1, 6, 0},{ -1, 6, 0},{ 6, 1, 2},{ -6, 1, 2},{ 1, 7, 0}, { -1, 7, 0},{ 2, 4, 0},{ -2, 4, 0},{ 7, 1, 2},{ -7, 1, 2},{ 1, 8, 0}, { -1, 8, 0},{ 4, 2, 1},{ -4, 2, 1},{ 1, 9, 0},{ -1, 9, 0},{ 3, 3, 1}, { -3, 3, 1},{ 2, 5, 0},{ -2, 5, 0},{ 2, 6, 0},{ -2, 6, 0},{ 8, 1, 2}, { -8, 1, 2},{ 1,10, 0},{ -1,10, 0},{ 1,11, 0},{ -1,11, 0},{ 9, 1, 2}, { -9, 1, 2},{ 5, 2, 2},{ -5, 2, 2},{ 3, 4, 1},{ -3, 4, 1}, }, //level_add { 0,10, 6, 4, 4, 3, 3, 2, 2, 2, 2, 2,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 1, //golomb_order 2, //inc_limit 11 //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 4, 1, 0},{ -4, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 5, 1, 1}, { -5, 1, 1},{ 2, 2, 0},{ -2, 2, 0},{ 6, 1, 1},{ -6, 1, 1},{ 1, 3, 0}, { -1, 3, 0},{ 7, 1, 1},{ -7, 1, 1},{ 3, 2, 0},{ -3, 2, 0},{ 8, 1, 1}, { -8, 1, 1},{ 1, 4, 0},{ -1, 4, 0},{ 2, 3, 0},{ -2, 3, 0},{ 9, 1, 1}, { -9, 1, 1},{ 4, 2, 0},{ -4, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 10, 1, 1}, {-10, 1, 1},{ 3, 3, 0},{ -3, 3, 0},{ 5, 2, 1},{ -5, 2, 1},{ 2, 4, 0}, { -2, 4, 0},{ 11, 1, 1},{-11, 1, 1},{ 1, 6, 0},{ -1, 6, 0},{ 12, 1, 1}, {-12, 1, 1},{ 1, 7, 0},{ -1, 7, 0},{ 6, 2, 1},{ -6, 2, 1},{ 13, 1, 1}, {-13, 1, 1},{ 2, 5, 0},{ -2, 5, 0},{ 1, 8, 0},{ -1, 8, 0}, }, //level_add { 0,14, 7, 4, 3, 3, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 1, //golomb_order 4, //inc_limit 8 //max_run },{ { //level / run { EOB },{ 1, 1, 0},{ -1, 1, 0},{ 2, 1, 0},{ -2, 1, 0},{ 3, 1, 0}, { -3, 1, 0},{ 4, 1, 0},{ -4, 1, 0},{ 5, 1, 0},{ -5, 1, 0},{ 6, 1, 0}, { -6, 1, 0},{ 7, 1, 0},{ -7, 1, 0},{ 8, 1, 0},{ -8, 1, 0},{ 1, 2, 0}, { -1, 2, 0},{ 9, 1, 0},{ -9, 1, 0},{ 10, 1, 0},{-10, 1, 0},{ 11, 1, 0}, {-11, 1, 0},{ 2, 2, 0},{ -2, 2, 0},{ 12, 1, 0},{-12, 1, 0},{ 13, 1, 0}, {-13, 1, 0},{ 3, 2, 0},{ -3, 2, 0},{ 14, 1, 0},{-14, 1, 0},{ 1, 3, 0}, { -1, 3, 0},{ 15, 1, 0},{-15, 1, 0},{ 4, 2, 0},{ -4, 2, 0},{ 16, 1, 0}, {-16, 1, 0},{ 17, 1, 0},{-17, 1, 0},{ 5, 2, 0},{ -5, 2, 0},{ 1, 4, 0}, { -1, 4, 0},{ 2, 3, 0},{ -2, 3, 0},{ 18, 1, 0},{-18, 1, 0},{ 6, 2, 0}, { -6, 2, 0},{ 19, 1, 0},{-19, 1, 0},{ 1, 5, 0},{ -1, 5, 0}, }, //level_add { 0,20, 7, 3, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, 0, //golomb_order INT_MAX, //inc_limit 5, //max_run } }; #undef EOB static const uint8_t alpha_tab[64] = { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 18, 20, 22, 24, 26, 28, 30, 33, 33, 35, 35, 36, 37, 37, 39, 39, 42, 44, 46, 48, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 }; static const uint8_t beta_tab[64] = { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27 }; static const uint8_t tc_tab[64] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9 }; const int_fast8_t ff_left_modifier_l[8] = { 0,-1, 6,-1,-1, 7, 6, 7}; const int_fast8_t ff_top_modifier_l[8] = {-1, 1, 5,-1,-1, 5, 7, 7}; const int_fast8_t ff_left_modifier_c[7] = { 5,-1, 2,-1, 6, 5, 6}; const int_fast8_t ff_top_modifier_c[7] = { 4, 1,-1,-1, 4, 6, 6}; #endif /* AVCODEC_CAVSDATA_H */
123linslouis-android-video-cutter
jni/libavcodec/cavsdata.h
C
asf20
23,556
/* * adaptive and fixed codebook vector operations for ACELP-based codecs * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "avcodec.h" #include "acelp_vectors.h" #include "celp_math.h" const uint8_t ff_fc_2pulses_9bits_track1[16] = { 1, 3, 6, 8, 11, 13, 16, 18, 21, 23, 26, 28, 31, 33, 36, 38 }; const uint8_t ff_fc_2pulses_9bits_track1_gray[16] = { 1, 3, 8, 6, 18, 16, 11, 13, 38, 36, 31, 33, 21, 23, 28, 26, }; const uint8_t ff_fc_2pulses_9bits_track2_gray[32] = { 0, 2, 5, 4, 12, 10, 7, 9, 25, 24, 20, 22, 14, 15, 19, 17, 36, 31, 21, 26, 1, 6, 16, 11, 27, 29, 32, 30, 39, 37, 34, 35, }; const uint8_t ff_fc_4pulses_8bits_tracks_13[16] = { 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, }; const uint8_t ff_fc_4pulses_8bits_track_4[32] = { 3, 4, 8, 9, 13, 14, 18, 19, 23, 24, 28, 29, 33, 34, 38, 39, 43, 44, 48, 49, 53, 54, 58, 59, 63, 64, 68, 69, 73, 74, 78, 79, }; #if 0 static uint8_t gray_decode[32] = { 0, 1, 3, 2, 7, 6, 4, 5, 15, 14, 12, 13, 8, 9, 11, 10, 31, 30, 28, 29, 24, 25, 27, 26, 16, 17, 19, 18, 23, 22, 20, 21 }; #endif const float ff_pow_0_7[10] = { 0.700000, 0.490000, 0.343000, 0.240100, 0.168070, 0.117649, 0.082354, 0.057648, 0.040354, 0.028248 }; const float ff_pow_0_75[10] = { 0.750000, 0.562500, 0.421875, 0.316406, 0.237305, 0.177979, 0.133484, 0.100113, 0.075085, 0.056314 }; const float ff_pow_0_55[10] = { 0.550000, 0.302500, 0.166375, 0.091506, 0.050328, 0.027681, 0.015224, 0.008373, 0.004605, 0.002533 }; const float ff_b60_sinc[61] = { 0.898529 , 0.865051 , 0.769257 , 0.624054 , 0.448639 , 0.265289 , 0.0959167 , -0.0412598 , -0.134338 , -0.178986 , -0.178528 , -0.142609 , -0.0849304 , -0.0205078 , 0.0369568 , 0.0773926 , 0.0955200 , 0.0912781 , 0.0689392 , 0.0357056 , 0. , -0.0305481 , -0.0504150 , -0.0570068 , -0.0508423 , -0.0350037 , -0.0141602 , 0.00665283, 0.0230713 , 0.0323486 , 0.0335388 , 0.0275879 , 0.0167847 , 0.00411987, -0.00747681, -0.0156860 , -0.0193481 , -0.0183716 , -0.0137634 , -0.00704956, 0. , 0.00582886 , 0.00939941, 0.0103760 , 0.00903320, 0.00604248, 0.00238037, -0.00109863 , -0.00366211, -0.00497437, -0.00503540, -0.00402832, -0.00241089, -0.000579834, 0.00103760, 0.00222778, 0.00277710, 0.00271606, 0.00213623, 0.00115967 , 0. }; void ff_acelp_fc_pulse_per_track( int16_t* fc_v, const uint8_t *tab1, const uint8_t *tab2, int pulse_indexes, int pulse_signs, int pulse_count, int bits) { int mask = (1 << bits) - 1; int i; for(i=0; i<pulse_count; i++) { fc_v[i + tab1[pulse_indexes & mask]] += (pulse_signs & 1) ? 8191 : -8192; // +/-1 in (2.13) pulse_indexes >>= bits; pulse_signs >>= 1; } fc_v[tab2[pulse_indexes]] += (pulse_signs & 1) ? 8191 : -8192; } void ff_decode_10_pulses_35bits(const int16_t *fixed_index, AMRFixed *fixed_sparse, const uint8_t *gray_decode, int half_pulse_count, int bits) { int i; int mask = (1 << bits) - 1; fixed_sparse->no_repeat_mask = 0; fixed_sparse->n = 2 * half_pulse_count; for (i = 0; i < half_pulse_count; i++) { const int pos1 = gray_decode[fixed_index[2*i+1] & mask] + i; const int pos2 = gray_decode[fixed_index[2*i ] & mask] + i; const float sign = (fixed_index[2*i+1] & (1 << bits)) ? -1.0 : 1.0; fixed_sparse->x[2*i+1] = pos1; fixed_sparse->x[2*i ] = pos2; fixed_sparse->y[2*i+1] = sign; fixed_sparse->y[2*i ] = pos2 < pos1 ? -sign : sign; } } void ff_acelp_weighted_vector_sum( int16_t* out, const int16_t *in_a, const int16_t *in_b, int16_t weight_coeff_a, int16_t weight_coeff_b, int16_t rounder, int shift, int length) { int i; // Clipping required here; breaks OVERFLOW test. for(i=0; i<length; i++) out[i] = av_clip_int16(( in_a[i] * weight_coeff_a + in_b[i] * weight_coeff_b + rounder) >> shift); } void ff_weighted_vector_sumf(float *out, const float *in_a, const float *in_b, float weight_coeff_a, float weight_coeff_b, int length) { int i; for(i=0; i<length; i++) out[i] = weight_coeff_a * in_a[i] + weight_coeff_b * in_b[i]; } void ff_adaptive_gain_control(float *out, const float *in, float speech_energ, int size, float alpha, float *gain_mem) { int i; float postfilter_energ = ff_dot_productf(in, in, size); float gain_scale_factor = 1.0; float mem = *gain_mem; if (postfilter_energ) gain_scale_factor = sqrt(speech_energ / postfilter_energ); gain_scale_factor *= 1.0 - alpha; for (i = 0; i < size; i++) { mem = alpha * mem + gain_scale_factor; out[i] = in[i] * mem; } *gain_mem = mem; } void ff_scale_vector_to_given_sum_of_squares(float *out, const float *in, float sum_of_squares, const int n) { int i; float scalefactor = ff_dot_productf(in, in, n); if (scalefactor) scalefactor = sqrt(sum_of_squares / scalefactor); for (i = 0; i < n; i++) out[i] = in[i] * scalefactor; } void ff_set_fixed_vector(float *out, const AMRFixed *in, float scale, int size) { int i; for (i=0; i < in->n; i++) { int x = in->x[i], repeats = !((in->no_repeat_mask >> i) & 1); float y = in->y[i] * scale; do { out[x] += y; y *= in->pitch_fac; x += in->pitch_lag; } while (x < size && repeats); } } void ff_clear_fixed_vector(float *out, const AMRFixed *in, int size) { int i; for (i=0; i < in->n; i++) { int x = in->x[i], repeats = !((in->no_repeat_mask >> i) & 1); do { out[x] = 0.0; x += in->pitch_lag; } while (x < size && repeats); } }
123linslouis-android-video-cutter
jni/libavcodec/acelp_vectors.c
C
asf20
7,061
/* * Common code between Nellymoser encoder and decoder * Copyright (c) 2007 a840bda5870ba11f19698ff6eb9581dfb0f95fa5, * 539459aeb7d425140b62a3ec7dbf6dc8e408a306, and * 520e17cd55896441042b14df2566a6eb610ed444 * Copyright (c) 2007 Loic Minier <lool at dooz.org> * Benjamin Larsson * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /** * @file * The 3 alphanumeric copyright notices are md5summed they are from the original * implementors. The original code is available from http://code.google.com/p/nelly2pcm/ */ #include "nellymoser.h" #include "avcodec.h" #include "dsputil.h" #define ALT_BITSTREAM_READER_LE #include "get_bits.h" const float ff_nelly_dequantization_table[127] = { 0.0000000000, -0.8472560048, 0.7224709988, -1.5247479677,-0.4531480074, 0.3753609955, 1.4717899561, -1.9822579622,-1.1929379702,-0.5829370022,-0.0693780035, 0.3909569979, 0.9069200158, 1.4862740040, 2.2215409279, -2.3887870312,-1.8067539930,-1.4105420113,-1.0773609877,-0.7995010018,-0.5558109879,-0.3334020078,-0.1324490011, 0.0568020009, 0.2548770010, 0.4773550034, 0.7386850119, 1.0443060398, 1.3954459429, 1.8098750114, 2.3918759823, -2.3893830776,-1.9884680510,-1.7514040470,-1.5643119812,-1.3922129869,-1.2164649963,-1.0469499826,-0.8905100226, -0.7645580173,-0.6454579830,-0.5259280205,-0.4059549868,-0.3029719889,-0.2096900046,-0.1239869967,-0.0479229987, 0.0257730000, 0.1001340002, 0.1737180054, 0.2585540116, 0.3522900045, 0.4569880068, 0.5767750144, 0.7003160119, 0.8425520062, 1.0093879700, 1.1821349859, 1.3534560204, 1.5320819616, 1.7332619429, 1.9722349644, 2.3978140354, -2.5756309032,-2.0573320389,-1.8984919786,-1.7727810144,-1.6662600040,-1.5742180347,-1.4993319511,-1.4316639900, -1.3652280569,-1.3000990152,-1.2280930281,-1.1588579416,-1.0921250582,-1.0135740042,-0.9202849865,-0.8287050128, -0.7374889851,-0.6447759867,-0.5590940118,-0.4857139885,-0.4110319912,-0.3459700048,-0.2851159871,-0.2341620028, -0.1870580018,-0.1442500055,-0.1107169986,-0.0739680007,-0.0365610011,-0.0073290002, 0.0203610007, 0.0479039997, 0.0751969963, 0.0980999991, 0.1220389977, 0.1458999962, 0.1694349945, 0.1970459968, 0.2252430022, 0.2556869984, 0.2870100141, 0.3197099864, 0.3525829911, 0.3889069855, 0.4334920049, 0.4769459963, 0.5204820037, 0.5644530058, 0.6122040153, 0.6685929894, 0.7341650128, 0.8032159805, 0.8784040213, 0.9566209912, 1.0397069454, 1.1293770075, 1.2211159468, 1.3080279827, 1.4024800062, 1.5056819916, 1.6227730513, 1.7724959850, 1.9430880547, 2.2903931141 }; const uint8_t ff_nelly_band_sizes_table[NELLY_BANDS] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 12, 14, 15 }; const uint16_t ff_nelly_init_table[64] = { 3134, 5342, 6870, 7792, 8569, 9185, 9744, 10191, 10631, 11061, 11434, 11770, 12116, 12513, 12925, 13300, 13674, 14027, 14352, 14716, 15117, 15477, 15824, 16157, 16513, 16804, 17090, 17401, 17679, 17948, 18238, 18520, 18764, 19078, 19381, 19640, 19921, 20205, 20500, 20813, 21162, 21465, 21794, 22137, 22453, 22756, 23067, 23350, 23636, 23926, 24227, 24521, 24819, 25107, 25414, 25730, 26120, 26497, 26895, 27344, 27877, 28463, 29426, 31355 }; const int16_t ff_nelly_delta_table[32] = { -11725, -9420, -7910, -6801, -5948, -5233, -4599, -4039, -3507, -3030, -2596, -2170, -1774, -1383, -1016, -660, -329, -1, 337, 696, 1085, 1512, 1962, 2433, 2968, 3569, 4314, 5279, 6622, 8154, 10076, 12975 }; static inline int signed_shift(int i, int shift) { if (shift > 0) return i << shift; return i >> -shift; } static int sum_bits(short *buf, short shift, short off) { int i, ret = 0; for (i = 0; i < NELLY_FILL_LEN; i++) { int b = buf[i]-off; b = ((b>>(shift-1))+1)>>1; ret += av_clip(b, 0, NELLY_BIT_CAP); } return ret; } static int headroom(int *la) { int l; if (*la == 0) { return 31; } l = 30 - av_log2(FFABS(*la)); *la <<= l; return l; } void ff_nelly_get_sample_bits(const float *buf, int *bits) { int i, j; short sbuf[128]; int bitsum = 0, last_bitsum, small_bitsum, big_bitsum; short shift, shift_saved; int max, sum, last_off, tmp; int big_off, small_off; int off; max = 0; for (i = 0; i < NELLY_FILL_LEN; i++) { max = FFMAX(max, buf[i]); } shift = -16; shift += headroom(&max); sum = 0; for (i = 0; i < NELLY_FILL_LEN; i++) { sbuf[i] = signed_shift(buf[i], shift); sbuf[i] = (3*sbuf[i])>>2; sum += sbuf[i]; } shift += 11; shift_saved = shift; sum -= NELLY_DETAIL_BITS << shift; shift += headroom(&sum); small_off = (NELLY_BASE_OFF * (sum>>16)) >> 15; shift = shift_saved - (NELLY_BASE_SHIFT+shift-31); small_off = signed_shift(small_off, shift); bitsum = sum_bits(sbuf, shift_saved, small_off); if (bitsum != NELLY_DETAIL_BITS) { off = bitsum - NELLY_DETAIL_BITS; for(shift=0; FFABS(off) <= 16383; shift++) off *= 2; off = (off * NELLY_BASE_OFF) >> 15; shift = shift_saved-(NELLY_BASE_SHIFT+shift-15); off = signed_shift(off, shift); for (j = 1; j < 20; j++) { last_off = small_off; small_off += off; last_bitsum = bitsum; bitsum = sum_bits(sbuf, shift_saved, small_off); if ((bitsum-NELLY_DETAIL_BITS) * (last_bitsum-NELLY_DETAIL_BITS) <= 0) break; } if (bitsum > NELLY_DETAIL_BITS) { big_off = small_off; small_off = last_off; big_bitsum=bitsum; small_bitsum=last_bitsum; } else { big_off = last_off; big_bitsum=last_bitsum; small_bitsum=bitsum; } while (bitsum != NELLY_DETAIL_BITS && j <= 19) { off = (big_off+small_off)>>1; bitsum = sum_bits(sbuf, shift_saved, off); if (bitsum > NELLY_DETAIL_BITS) { big_off=off; big_bitsum=bitsum; } else { small_off = off; small_bitsum=bitsum; } j++; } if (abs(big_bitsum-NELLY_DETAIL_BITS) >= abs(small_bitsum-NELLY_DETAIL_BITS)) { bitsum = small_bitsum; } else { small_off = big_off; bitsum = big_bitsum; } } for (i = 0; i < NELLY_FILL_LEN; i++) { tmp = sbuf[i]-small_off; tmp = ((tmp>>(shift_saved-1))+1)>>1; bits[i] = av_clip(tmp, 0, NELLY_BIT_CAP); } if (bitsum > NELLY_DETAIL_BITS) { tmp = i = 0; while (tmp < NELLY_DETAIL_BITS) { tmp += bits[i]; i++; } bits[i-1] -= tmp - NELLY_DETAIL_BITS; for(; i < NELLY_FILL_LEN; i++) bits[i] = 0; } }
123linslouis-android-video-cutter
jni/libavcodec/nellymoser.c
C
asf20
7,893
/* * Atrac 3 compatible decoder data * Copyright (c) 2006-2007 Maxim Poliakovski * Copyright (c) 2006-2007 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Atrac 3 AKA RealAudio 8 compatible decoder data */ #ifndef AVCODEC_ATRAC3DATA_H #define AVCODEC_ATRAC3DATA_H #include <stdint.h> /* VLC tables */ static const uint8_t huffcode1[9] = { 0x0,0x4,0x5,0xC,0xD,0x1C,0x1D,0x1E,0x1F, }; static const uint8_t huffbits1[9] = { 1,3,3,4,4,5,5,5,5, }; static const uint8_t huffcode2[5] = { 0x0,0x4,0x5,0x6,0x7, }; static const uint8_t huffbits2[5] = { 1,3,3,3,3, }; static const uint8_t huffcode3[7] = { 0x0,0x4,0x5,0xC,0xD,0xE,0xF, }; static const uint8_t huffbits3[7] = { 1,3,3,4,4,4,4, }; static const uint8_t huffcode4[9] = { 0x0,0x4,0x5,0xC,0xD,0x1C,0x1D,0x1E,0x1F, }; static const uint8_t huffbits4[9] = { 1,3,3,4,4,5,5,5,5, }; static const uint8_t huffcode5[15] = { 0x0,0x2,0x3,0x8,0x9,0xA,0xB,0x1C,0x1D,0x3C,0x3D,0x3E,0x3F,0xC,0xD, }; static const uint8_t huffbits5[15] = { 2,3,3,4,4,4,4,5,5,6,6,6,6,4,4 }; static const uint8_t huffcode6[31] = { 0x0,0x2,0x3,0x4,0x5,0x6,0x7,0x14,0x15,0x16,0x17,0x18,0x19,0x34,0x35, 0x36,0x37,0x38,0x39,0x3A,0x3B,0x78,0x79,0x7A,0x7B,0x7C,0x7D,0x7E,0x7F,0x8,0x9, }; static const uint8_t huffbits6[31] = { 3,4,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,4,4 }; static const uint8_t huffcode7[63] = { 0x0,0x8,0x9,0xA,0xB,0xC,0xD,0xE,0xF,0x10,0x11,0x24,0x25,0x26,0x27,0x28, 0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,0x30,0x31,0x32,0x33,0x68,0x69,0x6A,0x6B,0x6C, 0x6D,0x6E,0x6F,0x70,0x71,0x72,0x73,0x74,0x75,0xEC,0xED,0xEE,0xEF,0xF0,0xF1,0xF2, 0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF,0x2,0x3, }; static const uint8_t huffbits7[63] = { 3,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,4,4 }; static const uint8_t huff_tab_sizes[7] = { 9, 5, 7, 9, 15, 31, 63, }; static const uint8_t* const huff_codes[7] = { huffcode1,huffcode2,huffcode3,huffcode4,huffcode5,huffcode6,huffcode7, }; static const uint8_t* const huff_bits[7] = { huffbits1,huffbits2,huffbits3,huffbits4,huffbits5,huffbits6,huffbits7, }; static const uint16_t atrac3_vlc_offs[] = { 0,512,1024,1536,2048,2560,3072,3584,4096 }; /* selector tables */ static const uint8_t CLCLengthTab[8] = {0, 4, 3, 3, 4, 4, 5, 6}; static const int8_t seTab_0[4] = {0, 1, -2, -1}; static const int8_t decTable1[18] = {0,0, 0,1, 0,-1, 1,0, -1,0, 1,1, 1,-1, -1,1, -1,-1}; /* tables for the scalefactor decoding */ static const float iMaxQuant[8] = { 0.0, 1.0/1.5, 1.0/2.5, 1.0/3.5, 1.0/4.5, 1.0/7.5, 1.0/15.5, 1.0/31.5 }; static const uint16_t subbandTab[33] = { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024 }; /* joint stereo related tables */ static const float matrixCoeffs[8] = {0.0, 2.0, 2.0, 2.0, 0.0, 0.0, 1.0, 1.0}; #endif /* AVCODEC_ATRAC3DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/atrac3data.h
C
asf20
3,768
/* * MPEG4 encoder/decoder internal header. * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_MPEG4VIDEO_H #define AVCODEC_MPEG4VIDEO_H #include <stdint.h> #include "get_bits.h" #include "mpegvideo.h" #include "rl.h" // shapes #define RECT_SHAPE 0 #define BIN_SHAPE 1 #define BIN_ONLY_SHAPE 2 #define GRAY_SHAPE 3 #define SIMPLE_VO_TYPE 1 #define CORE_VO_TYPE 3 #define MAIN_VO_TYPE 4 #define NBIT_VO_TYPE 5 #define ARTS_VO_TYPE 10 #define ACE_VO_TYPE 12 #define ADV_SIMPLE_VO_TYPE 17 // aspect_ratio_info #define EXTENDED_PAR 15 //vol_sprite_usage / sprite_enable #define STATIC_SPRITE 1 #define GMC_SPRITE 2 #define MOTION_MARKER 0x1F001 #define DC_MARKER 0x6B001 #define VOS_STARTCODE 0x1B0 #define USER_DATA_STARTCODE 0x1B2 #define GOP_STARTCODE 0x1B3 #define VISUAL_OBJ_STARTCODE 0x1B5 #define VOP_STARTCODE 0x1B6 /* dc encoding for mpeg4 */ extern const uint8_t ff_mpeg4_DCtab_lum[13][2]; extern const uint8_t ff_mpeg4_DCtab_chrom[13][2]; extern const uint16_t ff_mpeg4_intra_vlc[103][2]; extern RLTable ff_mpeg4_rl_intra; /* Note this is identical to the intra rvlc except that it is reordered. */ extern const uint16_t inter_rvlc[170][2]; extern RLTable rvlc_rl_inter; extern const uint16_t intra_rvlc[170][2]; extern RLTable rvlc_rl_intra; extern const uint16_t sprite_trajectory_tab[15][2]; extern const uint8_t mb_type_b_tab[4][2]; /* these matrixes will be permuted for the idct */ extern const int16_t ff_mpeg4_default_intra_matrix[64]; extern const int16_t ff_mpeg4_default_non_intra_matrix[64]; extern const uint8_t ff_mpeg4_y_dc_scale_table[32]; extern const uint8_t ff_mpeg4_c_dc_scale_table[32]; extern const uint16_t ff_mpeg4_resync_prefix[8]; extern const uint8_t mpeg4_dc_threshold[8]; void mpeg4_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y); void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n, int dir); void ff_set_mpeg4_time(MpegEncContext * s); void mpeg4_encode_picture_header(MpegEncContext *s, int picture_number); int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb); void ff_mpeg4_encode_video_packet_header(MpegEncContext *s); void ff_mpeg4_clean_buffers(MpegEncContext *s); void ff_mpeg4_stuffing(PutBitContext * pbc); void ff_mpeg4_init_partitions(MpegEncContext *s); void ff_mpeg4_merge_partitions(MpegEncContext *s); void ff_clean_mpeg4_qscales(MpegEncContext *s); int ff_mpeg4_decode_partitions(MpegEncContext *s); int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s); int mpeg4_decode_video_packet_header(MpegEncContext *s); void ff_mpeg4_init_direct_mv(MpegEncContext *s); /** * * @return the mb_type */ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my); extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; #if 0 //3IV1 is quite rare and it slows things down a tiny bit #define IS_3IV1 s->codec_tag == AV_RL32("3IV1") #else #define IS_3IV1 0 #endif /** * predicts the dc. * encoding quantized level -> quantized diff * decoding quantized diff -> quantized level * @param n block index (0-3 are luma, 4-5 are chroma) * @param dir_ptr pointer to an integer where the prediction direction will be stored */ static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *dir_ptr, int encoding) { int a, b, c, wrap, pred, scale, ret; int16_t *dc_val; /* find prediction */ if (n < 4) { scale = s->y_dc_scale; } else { scale = s->c_dc_scale; } if(IS_3IV1) scale= 8; wrap= s->block_wrap[n]; dc_val = s->dc_val[0] + s->block_index[n]; /* B C * A X */ a = dc_val[ - 1]; b = dc_val[ - 1 - wrap]; c = dc_val[ - wrap]; /* outside slice handling (we can't do that by memset as we need the dc for error resilience) */ if(s->first_slice_line && n!=3){ if(n!=2) b=c= 1024; if(n!=1 && s->mb_x == s->resync_mb_x) b=a= 1024; } if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1){ if(n==0 || n==4 || n==5) b=1024; } if (abs(a - b) < abs(b - c)) { pred = c; *dir_ptr = 1; /* top */ } else { pred = a; *dir_ptr = 0; /* left */ } /* we assume pred is positive */ pred = FASTDIV((pred + (scale >> 1)), scale); if(encoding){ ret = level - pred; }else{ level += pred; ret= level; if(s->error_recognition>=3){ if(level<0){ av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y); return -1; } if(level*scale > 2048 + scale){ av_log(s->avctx, AV_LOG_ERROR, "dc overflow at %dx%d\n", s->mb_x, s->mb_y); return -1; } } } level *=scale; if(level&(~2047)){ if(level<0) level=0; else if(!(s->workaround_bugs&FF_BUG_DC_CLIP)) level=2047; } dc_val[0]= level; return ret; } #endif
123linslouis-android-video-cutter
jni/libavcodec/mpeg4video.h
C
asf20
6,028
/* * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "util_altivec.h" #include "dsputil_altivec.h" // main steps of 8x8 transform #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \ do { \ t0 = vec_sl(vec_add(s0, s4), vec_2); \ t0 = vec_add(vec_sl(t0, vec_1), t0); \ t0 = vec_add(t0, vec_rnd); \ t1 = vec_sl(vec_sub(s0, s4), vec_2); \ t1 = vec_add(vec_sl(t1, vec_1), t1); \ t1 = vec_add(t1, vec_rnd); \ t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \ t2 = vec_add(t2, vec_sl(s2, vec_4)); \ t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \ t3 = vec_sub(t3, vec_sl(s6, vec_4)); \ t4 = vec_add(t0, t2); \ t5 = vec_add(t1, t3); \ t6 = vec_sub(t1, t3); \ t7 = vec_sub(t0, t2); \ \ t0 = vec_sl(vec_add(s1, s3), vec_4); \ t0 = vec_add(t0, vec_sl(s5, vec_3)); \ t0 = vec_add(t0, vec_sl(s7, vec_2)); \ t0 = vec_add(t0, vec_sub(s5, s3)); \ \ t1 = vec_sl(vec_sub(s1, s5), vec_4); \ t1 = vec_sub(t1, vec_sl(s7, vec_3)); \ t1 = vec_sub(t1, vec_sl(s3, vec_2)); \ t1 = vec_sub(t1, vec_add(s1, s7)); \ \ t2 = vec_sl(vec_sub(s7, s3), vec_4); \ t2 = vec_add(t2, vec_sl(s1, vec_3)); \ t2 = vec_add(t2, vec_sl(s5, vec_2)); \ t2 = vec_add(t2, vec_sub(s1, s7)); \ \ t3 = vec_sl(vec_sub(s5, s7), vec_4); \ t3 = vec_sub(t3, vec_sl(s3, vec_3)); \ t3 = vec_add(t3, vec_sl(s1, vec_2)); \ t3 = vec_sub(t3, vec_add(s3, s5)); \ \ s0 = vec_add(t4, t0); \ s1 = vec_add(t5, t1); \ s2 = vec_add(t6, t2); \ s3 = vec_add(t7, t3); \ s4 = vec_sub(t7, t3); \ s5 = vec_sub(t6, t2); \ s6 = vec_sub(t5, t1); \ s7 = vec_sub(t4, t0); \ }while(0) #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \ do { \ s0 = vec_sra(s0, vec_3); \ s1 = vec_sra(s1, vec_3); \ s2 = vec_sra(s2, vec_3); \ s3 = vec_sra(s3, vec_3); \ s4 = vec_sra(s4, vec_3); \ s5 = vec_sra(s5, vec_3); \ s6 = vec_sra(s6, vec_3); \ s7 = vec_sra(s7, vec_3); \ }while(0) #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \ do { \ s0 = vec_sra(s0, vec_7); \ s1 = vec_sra(s1, vec_7); \ s2 = vec_sra(s2, vec_7); \ s3 = vec_sra(s3, vec_7); \ s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \ s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \ s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \ s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \ }while(0) /* main steps of 4x4 transform */ #define STEP4(s0, s1, s2, s3, vec_rnd) \ do { \ t1 = vec_add(vec_sl(s0, vec_4), s0); \ t1 = vec_add(t1, vec_rnd); \ t2 = vec_add(vec_sl(s2, vec_4), s2); \ t0 = vec_add(t1, t2); \ t1 = vec_sub(t1, t2); \ t3 = vec_sl(vec_sub(s3, s1), vec_1); \ t3 = vec_add(t3, vec_sl(t3, vec_2)); \ t2 = vec_add(t3, vec_sl(s1, vec_5)); \ t3 = vec_add(t3, vec_sl(s3, vec_3)); \ t3 = vec_add(t3, vec_sl(s3, vec_2)); \ s0 = vec_add(t0, t2); \ s1 = vec_sub(t1, t3); \ s2 = vec_add(t1, t3); \ s3 = vec_sub(t0, t2); \ }while (0) #define SHIFT_HOR4(s0, s1, s2, s3) \ s0 = vec_sra(s0, vec_3); \ s1 = vec_sra(s1, vec_3); \ s2 = vec_sra(s2, vec_3); \ s3 = vec_sra(s3, vec_3); #define SHIFT_VERT4(s0, s1, s2, s3) \ s0 = vec_sra(s0, vec_7); \ s1 = vec_sra(s1, vec_7); \ s2 = vec_sra(s2, vec_7); \ s3 = vec_sra(s3, vec_7); /** Do inverse transform on 8x8 block */ static void vc1_inv_trans_8x8_altivec(DCTELEM block[64]) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector signed int vec_1s = vec_splat_s32(1); const vector unsigned int vec_1 = vec_splat_u32(1); src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64); SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64); SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); vec_st(src0, 0, block); vec_st(src1, 16, block); vec_st(src2, 32, block); vec_st(src3, 48, block); vec_st(src4, 64, block); vec_st(src5, 80, block); vec_st(src6, 96, block); vec_st(src7,112, block); } /** Do inverse transform on 8x4 part of block */ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block) { vector signed short src0, src1, src2, src3, src4, src5, src6, src7; vector signed int s0, s1, s2, s3, s4, s5, s6, s7; vector signed int s8, s9, sA, sB, sC, sD, sE, sF; vector signed int t0, t1, t2, t3, t4, t5, t6, t7; const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); const vector unsigned int vec_7 = vec_splat_u32(7); const vector unsigned int vec_5 = vec_splat_u32(5); const vector unsigned int vec_4 = vec_splat_u32(4); const vector signed int vec_4s = vec_splat_s32(4); const vector unsigned int vec_3 = vec_splat_u32(3); const vector unsigned int vec_2 = vec_splat_u32(2); const vector unsigned int vec_1 = vec_splat_u32(1); vector unsigned char tmp; vector signed short tmp2, tmp3; vector unsigned char perm0, perm1, p0, p1, p; src0 = vec_ld( 0, block); src1 = vec_ld( 16, block); src2 = vec_ld( 32, block); src3 = vec_ld( 48, block); src4 = vec_ld( 64, block); src5 = vec_ld( 80, block); src6 = vec_ld( 96, block); src7 = vec_ld(112, block); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackl(src0); s1 = vec_unpackl(src1); s2 = vec_unpackl(src2); s3 = vec_unpackl(src3); s4 = vec_unpackl(src4); s5 = vec_unpackl(src5); s6 = vec_unpackl(src6); s7 = vec_unpackl(src7); s8 = vec_unpackh(src0); s9 = vec_unpackh(src1); sA = vec_unpackh(src2); sB = vec_unpackh(src3); sC = vec_unpackh(src4); sD = vec_unpackh(src5); sE = vec_unpackh(src6); sF = vec_unpackh(src7); STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); src0 = vec_pack(s8, s0); src1 = vec_pack(s9, s1); src2 = vec_pack(sA, s2); src3 = vec_pack(sB, s3); src4 = vec_pack(sC, s4); src5 = vec_pack(sD, s5); src6 = vec_pack(sE, s6); src7 = vec_pack(sF, s7); TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); s0 = vec_unpackh(src0); s1 = vec_unpackh(src1); s2 = vec_unpackh(src2); s3 = vec_unpackh(src3); s8 = vec_unpackl(src0); s9 = vec_unpackl(src1); sA = vec_unpackl(src2); sB = vec_unpackl(src3); STEP4(s0, s1, s2, s3, vec_64); SHIFT_VERT4(s0, s1, s2, s3); STEP4(s8, s9, sA, sB, vec_64); SHIFT_VERT4(s8, s9, sA, sB); src0 = vec_pack(s0, s8); src1 = vec_pack(s1, s9); src2 = vec_pack(s2, sA); src3 = vec_pack(s3, sB); p0 = vec_lvsl (0, dest); p1 = vec_lvsl (stride, dest); p = vec_splat_u8 (-1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); #define ADD(dest,src,perm) \ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ tmp = vec_ld (0, dest); \ tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \ tmp3 = vec_adds (tmp2, src); \ tmp = vec_packsu (tmp3, tmp3); \ vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest); ADD (dest, src0, perm0) dest += stride; ADD (dest, src1, perm1) dest += stride; ADD (dest, src2, perm0) dest += stride; ADD (dest, src3, perm1) } void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) { dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec; dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec; }
123linslouis-android-video-cutter
jni/libavcodec/ppc/vc1dsp_altivec.c
C
asf20
11,122
/* * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PPC_DSPUTIL_PPC_H #define AVCODEC_PPC_DSPUTIL_PPC_H #include "config.h" #if CONFIG_POWERPC_PERF void powerpc_display_perf_report(void); /* the 604* have 2, the G3* have 4, the G4s have 6, and the G5 are completely different (they MUST use ARCH_PPC64, and let's hope all future 64 bis PPC will use the same PMCs... */ #define POWERPC_NUM_PMC_ENABLED 6 /* if you add to the enum below, also add to the perfname array in dsputil_ppc.c */ enum powerpc_perf_index { altivec_fft_num = 0, altivec_gmc1_num, altivec_dct_unquantize_h263_num, altivec_fdct, altivec_idct_add_num, altivec_idct_put_num, altivec_put_pixels16_num, altivec_avg_pixels16_num, altivec_avg_pixels8_num, altivec_put_pixels8_xy2_num, altivec_put_no_rnd_pixels8_xy2_num, altivec_put_pixels16_xy2_num, altivec_put_no_rnd_pixels16_xy2_num, altivec_hadamard8_diff8x8_num, altivec_hadamard8_diff16_num, altivec_avg_pixels8_xy2_num, powerpc_clear_blocks_dcbz32, powerpc_clear_blocks_dcbz128, altivec_put_h264_chroma_mc8_num, altivec_avg_h264_chroma_mc8_num, altivec_put_h264_qpel16_h_lowpass_num, altivec_avg_h264_qpel16_h_lowpass_num, altivec_put_h264_qpel16_v_lowpass_num, altivec_avg_h264_qpel16_v_lowpass_num, altivec_put_h264_qpel16_hv_lowpass_num, altivec_avg_h264_qpel16_hv_lowpass_num, powerpc_perf_total }; enum powerpc_data_index { powerpc_data_min = 0, powerpc_data_max, powerpc_data_sum, powerpc_data_num, powerpc_data_total }; extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][powerpc_data_total]; #if !ARCH_PPC64 #define POWERP_PMC_DATATYPE unsigned long #define POWERPC_GET_PMC1(a) __asm__ volatile("mfspr %0, 937" : "=r" (a)) #define POWERPC_GET_PMC2(a) __asm__ volatile("mfspr %0, 938" : "=r" (a)) #if (POWERPC_NUM_PMC_ENABLED > 2) #define POWERPC_GET_PMC3(a) __asm__ volatile("mfspr %0, 941" : "=r" (a)) #define POWERPC_GET_PMC4(a) __asm__ volatile("mfspr %0, 942" : "=r" (a)) #else #define POWERPC_GET_PMC3(a) do {} while (0) #define POWERPC_GET_PMC4(a) do {} while (0) #endif #if (POWERPC_NUM_PMC_ENABLED > 4) #define POWERPC_GET_PMC5(a) __asm__ volatile("mfspr %0, 929" : "=r" (a)) #define POWERPC_GET_PMC6(a) __asm__ volatile("mfspr %0, 930" : "=r" (a)) #else #define POWERPC_GET_PMC5(a) do {} while (0) #define POWERPC_GET_PMC6(a) do {} while (0) #endif #else /* ARCH_PPC64 */ #define POWERP_PMC_DATATYPE unsigned long long #define POWERPC_GET_PMC1(a) __asm__ volatile("mfspr %0, 771" : "=r" (a)) #define POWERPC_GET_PMC2(a) __asm__ volatile("mfspr %0, 772" : "=r" (a)) #if (POWERPC_NUM_PMC_ENABLED > 2) #define POWERPC_GET_PMC3(a) __asm__ volatile("mfspr %0, 773" : "=r" (a)) #define POWERPC_GET_PMC4(a) __asm__ volatile("mfspr %0, 774" : "=r" (a)) #else #define POWERPC_GET_PMC3(a) do {} while (0) #define POWERPC_GET_PMC4(a) do {} while (0) #endif #if (POWERPC_NUM_PMC_ENABLED > 4) #define POWERPC_GET_PMC5(a) __asm__ volatile("mfspr %0, 775" : "=r" (a)) #define POWERPC_GET_PMC6(a) __asm__ volatile("mfspr %0, 776" : "=r" (a)) #else #define POWERPC_GET_PMC5(a) do {} while (0) #define POWERPC_GET_PMC6(a) do {} while (0) #endif #endif /* ARCH_PPC64 */ #define POWERPC_PERF_DECLARE(a, cond) \ POWERP_PMC_DATATYPE \ pmc_start[POWERPC_NUM_PMC_ENABLED], \ pmc_stop[POWERPC_NUM_PMC_ENABLED], \ pmc_loop_index; #define POWERPC_PERF_START_COUNT(a, cond) do { \ POWERPC_GET_PMC6(pmc_start[5]); \ POWERPC_GET_PMC5(pmc_start[4]); \ POWERPC_GET_PMC4(pmc_start[3]); \ POWERPC_GET_PMC3(pmc_start[2]); \ POWERPC_GET_PMC2(pmc_start[1]); \ POWERPC_GET_PMC1(pmc_start[0]); \ } while (0) #define POWERPC_PERF_STOP_COUNT(a, cond) do { \ POWERPC_GET_PMC1(pmc_stop[0]); \ POWERPC_GET_PMC2(pmc_stop[1]); \ POWERPC_GET_PMC3(pmc_stop[2]); \ POWERPC_GET_PMC4(pmc_stop[3]); \ POWERPC_GET_PMC5(pmc_stop[4]); \ POWERPC_GET_PMC6(pmc_stop[5]); \ if (cond) { \ for(pmc_loop_index = 0; \ pmc_loop_index < POWERPC_NUM_PMC_ENABLED; \ pmc_loop_index++) { \ if (pmc_stop[pmc_loop_index] >= pmc_start[pmc_loop_index]) { \ POWERP_PMC_DATATYPE diff = \ pmc_stop[pmc_loop_index] - pmc_start[pmc_loop_index]; \ if (diff < perfdata[pmc_loop_index][a][powerpc_data_min]) \ perfdata[pmc_loop_index][a][powerpc_data_min] = diff; \ if (diff > perfdata[pmc_loop_index][a][powerpc_data_max]) \ perfdata[pmc_loop_index][a][powerpc_data_max] = diff; \ perfdata[pmc_loop_index][a][powerpc_data_sum] += diff; \ perfdata[pmc_loop_index][a][powerpc_data_num] ++; \ } \ } \ } \ } while (0) #else /* CONFIG_POWERPC_PERF */ // those are needed to avoid empty statements. #define POWERPC_PERF_DECLARE(a, cond) int altivec_placeholder __attribute__ ((unused)) #define POWERPC_PERF_START_COUNT(a, cond) do {} while (0) #define POWERPC_PERF_STOP_COUNT(a, cond) do {} while (0) #endif /* CONFIG_POWERPC_PERF */ #endif /* AVCODEC_PPC_DSPUTIL_PPC_H */
123linslouis-android-video-cutter
jni/libavcodec/ppc/dsputil_ppc.h
C
asf20
6,301
/* * Copyright (c) 2002 Brian Foley * Copyright (c) 2002 Dieter Shirley * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #if HAVE_ALTIVEC_H #include <altivec.h> #endif #include "libavcodec/dsputil.h" #include "dsputil_ppc.h" #include "util_altivec.h" #include "types_altivec.h" #include "dsputil_altivec.h" static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0); vector unsigned char *tv; vector unsigned char pix1v, pix2v, pix2iv, avgv, t5; vector unsigned int sad; vector signed int sumdiffs; s = 0; sad = (vector unsigned int)vec_splat_u32(0); for (i = 0; i < h; i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); tv = (vector unsigned char *) &pix2[0]; pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0])); tv = (vector unsigned char *) &pix2[1]; pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1])); /* Calculate the average vector */ avgv = vec_avg(pix2v, pix2iv); /* Calculate a sum of abs differences vector */ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv)); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; pix2 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0); vector unsigned char *tv; vector unsigned char pix1v, pix2v, pix3v, avgv, t5; vector unsigned int sad; vector signed int sumdiffs; uint8_t *pix3 = pix2 + line_size; s = 0; sad = (vector unsigned int)vec_splat_u32(0); /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one iteration becomes pix2 in the next iteration. We can use this fact to avoid a potentially expensive unaligned read, each time around the loop. Read unaligned pixels into our vectors. The vectors are as follows: pix2v: pix2[0]-pix2[15] Split the pixel vectors into shorts */ tv = (vector unsigned char *) &pix2[0]; pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0])); for (i = 0; i < h; i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] pix3v: pix3[0]-pix3[15] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); tv = (vector unsigned char *) &pix3[0]; pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0])); /* Calculate the average vector */ avgv = vec_avg(pix2v, pix3v); /* Calculate a sum of abs differences vector */ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv)); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; pix2v = pix3v; pix3 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; uint8_t *pix3 = pix2 + line_size; const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0); const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2); vector unsigned char *tv, avgv, t5; vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv; vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv; vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv; vector unsigned short avghv, avglv; vector unsigned short t1, t2, t3, t4; vector unsigned int sad; vector signed int sumdiffs; sad = (vector unsigned int)vec_splat_u32(0); s = 0; /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one iteration becomes pix2 in the next iteration. We can use this fact to avoid a potentially expensive unaligned read, as well as some splitting, and vector addition each time around the loop. Read unaligned pixels into our vectors. The vectors are as follows: pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] Split the pixel vectors into shorts */ tv = (vector unsigned char *) &pix2[0]; pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0])); tv = (vector unsigned char *) &pix2[1]; pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1])); pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v); pix2lv = (vector unsigned short) vec_mergel(zero, pix2v); pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv); pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv); t1 = vec_add(pix2hv, pix2ihv); t2 = vec_add(pix2lv, pix2ilv); for (i = 0; i < h; i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); tv = (vector unsigned char *) &pix3[0]; pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0])); tv = (vector unsigned char *) &pix3[1]; pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1])); /* Note that AltiVec does have vec_avg, but this works on vector pairs and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding would mean that, for example, avg(3,0,0,1) = 2, when it should be 1. Instead, we have to split the pixel vectors into vectors of shorts, and do the averaging by hand. */ /* Split the pixel vectors into shorts */ pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v); pix3lv = (vector unsigned short) vec_mergel(zero, pix3v); pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv); pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv); /* Do the averaging on them */ t3 = vec_add(pix3hv, pix3ihv); t4 = vec_add(pix3lv, pix3ilv); avghv = vec_sr(vec_add(vec_add(t1, t3), two), two); avglv = vec_sr(vec_add(vec_add(t2, t4), two), two); /* Pack the shorts back into a result */ avgv = vec_pack(avghv, avglv); /* Calculate a sum of abs differences vector */ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv)); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; pix3 += line_size; /* Transfer the calculated values for pix3 into pix2 */ t1 = t3; t2 = t4; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char perm1, perm2, *pix1v, *pix2v; vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sad; vector signed int sumdiffs; sad = (vector unsigned int)vec_splat_u32(0); for (i = 0; i < h; i++) { /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); pix2v = (vector unsigned char *) pix2; t1 = vec_perm(pix1v[0], pix1v[1], perm1); t2 = vec_perm(pix2v[0], pix2v[1], perm2); /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; pix2 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v; vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sad; vector signed int sumdiffs; sad = (vector unsigned int)vec_splat_u32(0); permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0}; for (i = 0; i < h; i++) { /* Read potentially unaligned pixels into t1 and t2 Since we're reading 16 pixels, and actually only want 8, mask out the last 8 pixels. The 0s don't change the sum. */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); pix2v = (vector unsigned char *) pix2; t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear); t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear); /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; pix2 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } static int pix_norm1_altivec(uint8_t *pix, int line_size) { int i; int s; const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char *tv; vector unsigned char pixv; vector unsigned int sv; vector signed int sum; sv = (vector unsigned int)vec_splat_u32(0); s = 0; for (i = 0; i < 16; i++) { /* Read in the potentially unaligned pixels */ tv = (vector unsigned char *) pix; pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix)); /* Square the values, and add them to our sum */ sv = vec_msum(pixv, pixv, sv); pix += line_size; } /* Sum up the four partial sums, and put the result into s */ sum = vec_sums((vector signed int) sv, (vector signed int) zero); sum = vec_splat(sum, 3); vec_ste(sum, 0, &s); return s; } /** * Sum of Squared Errors for a 8x8 block. * AltiVec-enhanced. * It's the sad8_altivec code above w/ squaring added. */ static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v; vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sum; vector signed int sumsqr; sum = (vector unsigned int)vec_splat_u32(0); permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0}; for (i = 0; i < h; i++) { /* Read potentially unaligned pixels into t1 and t2 Since we're reading 16 pixels, and actually only want 8, mask out the last 8 pixels. The 0s don't change the sum. */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); pix2v = (vector unsigned char *) pix2; t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear); t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear); /* Since we want to use unsigned chars, we can take advantage of the fact that abs(a-b)^2 = (a-b)^2. */ /* Calculate abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); /* Square the values and add them to our sum */ sum = vec_msum(t5, t5, sum); pix1 += line_size; pix2 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero); sumsqr = vec_splat(sumsqr, 3); vec_ste(sumsqr, 0, &s); return s; } /** * Sum of Squared Errors for a 16x16 block. * AltiVec-enhanced. * It's the sad16_altivec code above w/ squaring added. */ static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char perm1, perm2, *pix1v, *pix2v; vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sum; vector signed int sumsqr; sum = (vector unsigned int)vec_splat_u32(0); for (i = 0; i < h; i++) { /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); pix2v = (vector unsigned char *) pix2; t1 = vec_perm(pix1v[0], pix1v[1], perm1); t2 = vec_perm(pix2v[0], pix2v[1], perm2); /* Since we want to use unsigned chars, we can take advantage of the fact that abs(a-b)^2 = (a-b)^2. */ /* Calculate abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); /* Square the values and add them to our sum */ sum = vec_msum(t5, t5, sum); pix1 += line_size; pix2 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero); sumsqr = vec_splat(sumsqr, 3); vec_ste(sumsqr, 0, &s); return s; } static int pix_sum_altivec(uint8_t * pix, int line_size) { const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char perm, *pixv; vector unsigned char t1; vector unsigned int sad; vector signed int sumdiffs; int i; int s; sad = (vector unsigned int)vec_splat_u32(0); for (i = 0; i < 16; i++) { /* Read the potentially unaligned 16 pixels into t1 */ perm = vec_lvsl(0, pix); pixv = (vector unsigned char *) pix; t1 = vec_perm(pixv[0], pixv[1], perm); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t1, sad); pix += line_size; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size) { int i; vector unsigned char perm, bytes, *pixv; const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0); vector signed short shorts; for (i = 0; i < 8; i++) { // Read potentially unaligned pixels. // We're reading 16 pixels, and actually only want 8, // but we simply ignore the extras. perm = vec_lvsl(0, pixels); pixv = (vector unsigned char *) pixels; bytes = vec_perm(pixv[0], pixv[1], perm); // convert the bytes into shorts shorts = (vector signed short)vec_mergeh(zero, bytes); // save the data to the block, we assume the block is 16-byte aligned vec_st(shorts, i*16, (vector signed short*)block); pixels += line_size; } } static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1, const uint8_t *s2, int stride) { int i; vector unsigned char perm, bytes, *pixv; const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0); vector signed short shorts1, shorts2; for (i = 0; i < 4; i++) { // Read potentially unaligned pixels // We're reading 16 pixels, and actually only want 8, // but we simply ignore the extras. perm = vec_lvsl(0, s1); pixv = (vector unsigned char *) s1; bytes = vec_perm(pixv[0], pixv[1], perm); // convert the bytes into shorts shorts1 = (vector signed short)vec_mergeh(zero, bytes); // Do the same for the second block of pixels perm = vec_lvsl(0, s2); pixv = (vector unsigned char *) s2; bytes = vec_perm(pixv[0], pixv[1], perm); // convert the bytes into shorts shorts2 = (vector signed short)vec_mergeh(zero, bytes); // Do the subtraction shorts1 = vec_sub(shorts1, shorts2); // save the data to the block, we assume the block is 16-byte aligned vec_st(shorts1, 0, (vector signed short*)block); s1 += stride; s2 += stride; block += 8; // The code below is a copy of the code above... This is a manual // unroll. // Read potentially unaligned pixels // We're reading 16 pixels, and actually only want 8, // but we simply ignore the extras. perm = vec_lvsl(0, s1); pixv = (vector unsigned char *) s1; bytes = vec_perm(pixv[0], pixv[1], perm); // convert the bytes into shorts shorts1 = (vector signed short)vec_mergeh(zero, bytes); // Do the same for the second block of pixels perm = vec_lvsl(0, s2); pixv = (vector unsigned char *) s2; bytes = vec_perm(pixv[0], pixv[1], perm); // convert the bytes into shorts shorts2 = (vector signed short)vec_mergeh(zero, bytes); // Do the subtraction shorts1 = vec_sub(shorts1, shorts2); // save the data to the block, we assume the block is 16-byte aligned vec_st(shorts1, 0, (vector signed short*)block); s1 += stride; s2 += stride; block += 8; } } static void clear_block_altivec(DCTELEM *block) { LOAD_ZERO; vec_st(zero_s16v, 0, block); vec_st(zero_s16v, 16, block); vec_st(zero_s16v, 32, block); vec_st(zero_s16v, 48, block); vec_st(zero_s16v, 64, block); vec_st(zero_s16v, 80, block); vec_st(zero_s16v, 96, block); vec_st(zero_s16v, 112, block); } static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) { register int i; register vector unsigned char vdst, vsrc; /* dst and src are 16 bytes-aligned (guaranteed) */ for (i = 0 ; (i + 15) < w ; i+=16) { vdst = vec_ld(i, (unsigned char*)dst); vsrc = vec_ld(i, (unsigned char*)src); vdst = vec_add(vsrc, vdst); vec_st(vdst, i, (unsigned char*)dst); } /* if w is not a multiple of 16 */ for (; (i < w) ; i++) { dst[i] = src[i]; } } /* next one assumes that ((line_size % 16) == 0) */ void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1); register vector unsigned char pixelsv1, pixelsv2; register vector unsigned char pixelsv1B, pixelsv2B; register vector unsigned char pixelsv1C, pixelsv2C; register vector unsigned char pixelsv1D, pixelsv2D; register vector unsigned char perm = vec_lvsl(0, pixels); int i; register int line_size_2 = line_size << 1; register int line_size_3 = line_size + line_size_2; register int line_size_4 = line_size << 2; POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1); // hand-unrolling the loop by 4 gains about 15% // mininum execution time goes from 74 to 60 cycles // it's faster than -funroll-loops, but using // -funroll-loops w/ this is bad - 74 cycles again. // all this is on a 7450, tuning for the 7450 #if 0 for (i = 0; i < h; i++) { pixelsv1 = vec_ld(0, pixels); pixelsv2 = vec_ld(16, pixels); vec_st(vec_perm(pixelsv1, pixelsv2, perm), 0, block); pixels+=line_size; block +=line_size; } #else for (i = 0; i < h; i += 4) { pixelsv1 = vec_ld( 0, pixels); pixelsv2 = vec_ld(15, pixels); pixelsv1B = vec_ld(line_size, pixels); pixelsv2B = vec_ld(15 + line_size, pixels); pixelsv1C = vec_ld(line_size_2, pixels); pixelsv2C = vec_ld(15 + line_size_2, pixels); pixelsv1D = vec_ld(line_size_3, pixels); pixelsv2D = vec_ld(15 + line_size_3, pixels); vec_st(vec_perm(pixelsv1, pixelsv2, perm), 0, (unsigned char*)block); vec_st(vec_perm(pixelsv1B, pixelsv2B, perm), line_size, (unsigned char*)block); vec_st(vec_perm(pixelsv1C, pixelsv2C, perm), line_size_2, (unsigned char*)block); vec_st(vec_perm(pixelsv1D, pixelsv2D, perm), line_size_3, (unsigned char*)block); pixels+=line_size_4; block +=line_size_4; } #endif POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1); } /* next one assumes that ((line_size % 16) == 0) */ #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) ) void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1); register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv; register vector unsigned char perm = vec_lvsl(0, pixels); int i; POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1); for (i = 0; i < h; i++) { pixelsv1 = vec_ld( 0, pixels); pixelsv2 = vec_ld(16,pixels); blockv = vec_ld(0, block); pixelsv = vec_perm(pixelsv1, pixelsv2, perm); blockv = vec_avg(blockv,pixelsv); vec_st(blockv, 0, (unsigned char*)block); pixels+=line_size; block +=line_size; } POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1); } /* next one assumes that ((line_size % 8) == 0) */ static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1); register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv; int i; POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1); for (i = 0; i < h; i++) { /* block is 8 bytes-aligned, so we're either in the left block (16 bytes-aligned) or in the right block (not) */ int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); pixelsv1 = vec_ld( 0, pixels); pixelsv2 = vec_ld(16, pixels); pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels)); if (rightside) { pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1)); } else { pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3)); } blockv = vec_avg(blockv, pixelsv); vec_st(blockv, 0, block); pixels += line_size; block += line_size; } POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1); } /* next one assumes that ((line_size % 8) == 0) */ static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1); register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsavg; register vector unsigned char blockv, temp1, temp2; register vector unsigned short pixelssum1, pixelssum2, temp3; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2); temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); } pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); for (i = 0; i < h ; i++) { int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); temp1 = vec_ld(line_size, pixels); temp2 = vec_ld(line_size + 16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); } pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum2 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); temp3 = vec_add(pixelssum1, pixelssum2); temp3 = vec_sra(temp3, vctwo); pixelssum1 = vec_add(pixelssum2, vctwo); pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); if (rightside) { blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); } else { blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); } vec_st(blockv, 0, block); block += line_size; pixels += line_size; } POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1); } /* next one assumes that ((line_size % 8) == 0) */ static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1); register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsavg; register vector unsigned char blockv, temp1, temp2; register vector unsigned short pixelssum1, pixelssum2, temp3; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1); register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2); temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); } pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vcone); POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); for (i = 0; i < h ; i++) { int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); temp1 = vec_ld(line_size, pixels); temp2 = vec_ld(line_size + 16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); } pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum2 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); temp3 = vec_add(pixelssum1, pixelssum2); temp3 = vec_sra(temp3, vctwo); pixelssum1 = vec_add(pixelssum2, vcone); pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); if (rightside) { blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); } else { blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); } vec_st(blockv, 0, block); block += line_size; pixels += line_size; } POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); } /* next one assumes that ((line_size % 16) == 0) */ static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1); register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4; register vector unsigned char blockv, temp1, temp2; register vector unsigned short temp3, temp4, pixelssum1, pixelssum2, pixelssum3, pixelssum4; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2); POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); } pixelsv3 = vec_mergel(vczero, pixelsv1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum3 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum3 = vec_add(pixelssum3, vctwo); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); for (i = 0; i < h ; i++) { blockv = vec_ld(0, block); temp1 = vec_ld(line_size, pixels); temp2 = vec_ld(line_size + 16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); } pixelsv3 = vec_mergel(vczero, pixelsv1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum4 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum2 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); temp4 = vec_add(pixelssum3, pixelssum4); temp4 = vec_sra(temp4, vctwo); temp3 = vec_add(pixelssum1, pixelssum2); temp3 = vec_sra(temp3, vctwo); pixelssum3 = vec_add(pixelssum4, vctwo); pixelssum1 = vec_add(pixelssum2, vctwo); blockv = vec_packsu(temp3, temp4); vec_st(blockv, 0, block); block += line_size; pixels += line_size; } POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); } /* next one assumes that ((line_size % 16) == 0) */ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1); register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4; register vector unsigned char blockv, temp1, temp2; register vector unsigned short temp3, temp4, pixelssum1, pixelssum2, pixelssum3, pixelssum4; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1); register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2); POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); } pixelsv3 = vec_mergel(vczero, pixelsv1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum3 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum3 = vec_add(pixelssum3, vcone); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vcone); for (i = 0; i < h ; i++) { blockv = vec_ld(0, block); temp1 = vec_ld(line_size, pixels); temp2 = vec_ld(line_size + 16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); } pixelsv3 = vec_mergel(vczero, pixelsv1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum4 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum2 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); temp4 = vec_add(pixelssum3, pixelssum4); temp4 = vec_sra(temp4, vctwo); temp3 = vec_add(pixelssum1, pixelssum2); temp3 = vec_sra(temp3, vctwo); pixelssum3 = vec_add(pixelssum4, vcone); pixelssum1 = vec_add(pixelssum2, vcone); blockv = vec_packsu(temp3, temp4); vec_st(blockv, 0, block); block += line_size; pixels += line_size; } POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); } static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){ POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1); int sum; register const vector unsigned char vzero = (const vector unsigned char)vec_splat_u8(0); register vector signed short temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); { register const vector signed short vprod1 =(const vector signed short) { 1,-1, 1,-1, 1,-1, 1,-1 }; register const vector signed short vprod2 =(const vector signed short) { 1, 1,-1,-1, 1, 1,-1,-1 }; register const vector signed short vprod3 =(const vector signed short) { 1, 1, 1, 1,-1,-1,-1,-1 }; register const vector unsigned char perm1 = (const vector unsigned char) {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D}; register const vector unsigned char perm2 = (const vector unsigned char) {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B}; register const vector unsigned char perm3 = (const vector unsigned char) {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}; #define ONEITERBUTTERFLY(i, res) \ { \ register vector unsigned char src1, src2, srcO; \ register vector unsigned char dst1, dst2, dstO; \ register vector signed short srcV, dstV; \ register vector signed short but0, but1, but2, op1, op2, op3; \ src1 = vec_ld(stride * i, src); \ src2 = vec_ld((stride * i) + 15, src); \ srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ dst1 = vec_ld(stride * i, dst); \ dst2 = vec_ld((stride * i) + 15, dst); \ dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ /* promote the unsigned chars to signed shorts */ \ /* we're in the 8x8 function, we only care for the first 8 */ \ srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \ (vector signed char)srcO); \ dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \ (vector signed char)dstO); \ /* subtractions inside the first butterfly */ \ but0 = vec_sub(srcV, dstV); \ op1 = vec_perm(but0, but0, perm1); \ but1 = vec_mladd(but0, vprod1, op1); \ op2 = vec_perm(but1, but1, perm2); \ but2 = vec_mladd(but1, vprod2, op2); \ op3 = vec_perm(but2, but2, perm3); \ res = vec_mladd(but2, vprod3, op3); \ } ONEITERBUTTERFLY(0, temp0); ONEITERBUTTERFLY(1, temp1); ONEITERBUTTERFLY(2, temp2); ONEITERBUTTERFLY(3, temp3); ONEITERBUTTERFLY(4, temp4); ONEITERBUTTERFLY(5, temp5); ONEITERBUTTERFLY(6, temp6); ONEITERBUTTERFLY(7, temp7); } #undef ONEITERBUTTERFLY { register vector signed int vsum; register vector signed short line0 = vec_add(temp0, temp1); register vector signed short line1 = vec_sub(temp0, temp1); register vector signed short line2 = vec_add(temp2, temp3); register vector signed short line3 = vec_sub(temp2, temp3); register vector signed short line4 = vec_add(temp4, temp5); register vector signed short line5 = vec_sub(temp4, temp5); register vector signed short line6 = vec_add(temp6, temp7); register vector signed short line7 = vec_sub(temp6, temp7); register vector signed short line0B = vec_add(line0, line2); register vector signed short line2B = vec_sub(line0, line2); register vector signed short line1B = vec_add(line1, line3); register vector signed short line3B = vec_sub(line1, line3); register vector signed short line4B = vec_add(line4, line6); register vector signed short line6B = vec_sub(line4, line6); register vector signed short line5B = vec_add(line5, line7); register vector signed short line7B = vec_sub(line5, line7); register vector signed short line0C = vec_add(line0B, line4B); register vector signed short line4C = vec_sub(line0B, line4B); register vector signed short line1C = vec_add(line1B, line5B); register vector signed short line5C = vec_sub(line1B, line5B); register vector signed short line2C = vec_add(line2B, line6B); register vector signed short line6C = vec_sub(line2B, line6B); register vector signed short line3C = vec_add(line3B, line7B); register vector signed short line7C = vec_sub(line3B, line7B); vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0)); vsum = vec_sum4s(vec_abs(line1C), vsum); vsum = vec_sum4s(vec_abs(line2C), vsum); vsum = vec_sum4s(vec_abs(line3C), vsum); vsum = vec_sum4s(vec_abs(line4C), vsum); vsum = vec_sum4s(vec_abs(line5C), vsum); vsum = vec_sum4s(vec_abs(line6C), vsum); vsum = vec_sum4s(vec_abs(line7C), vsum); vsum = vec_sums(vsum, (vector signed int)vzero); vsum = vec_splat(vsum, 3); vec_ste(vsum, 0, &sum); } POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1); return sum; } /* 16x8 works with 16 elements; it allows to avoid replicating loads, and give the compiler more rooms for scheduling. It's only used from inside hadamard8_diff16_altivec. Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT of spill code, it seems gcc (unlike xlc) cannot keep everything in registers by itself. The following code include hand-made registers allocation. It's not clean, but on a 7450 the resulting code is much faster (best case fall from 700+ cycles to 550). xlc doesn't add spill code, but it doesn't know how to schedule for the 7450, and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less instructions...) On the 970, the hand-made RA is still a win (around 690 vs. around 780), but xlc goes to around 660 on the regular C code... */ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) { int sum; register vector signed short temp0 __asm__ ("v0"), temp1 __asm__ ("v1"), temp2 __asm__ ("v2"), temp3 __asm__ ("v3"), temp4 __asm__ ("v4"), temp5 __asm__ ("v5"), temp6 __asm__ ("v6"), temp7 __asm__ ("v7"); register vector signed short temp0S __asm__ ("v8"), temp1S __asm__ ("v9"), temp2S __asm__ ("v10"), temp3S __asm__ ("v11"), temp4S __asm__ ("v12"), temp5S __asm__ ("v13"), temp6S __asm__ ("v14"), temp7S __asm__ ("v15"); register const vector unsigned char vzero __asm__ ("v31") = (const vector unsigned char)vec_splat_u8(0); { register const vector signed short vprod1 __asm__ ("v16") = (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 }; register const vector signed short vprod2 __asm__ ("v17") = (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 }; register const vector signed short vprod3 __asm__ ("v18") = (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 }; register const vector unsigned char perm1 __asm__ ("v19") = (const vector unsigned char) {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D}; register const vector unsigned char perm2 __asm__ ("v20") = (const vector unsigned char) {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B}; register const vector unsigned char perm3 __asm__ ("v21") = (const vector unsigned char) {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}; #define ONEITERBUTTERFLY(i, res1, res2) \ { \ register vector unsigned char src1 __asm__ ("v22"), \ src2 __asm__ ("v23"), \ dst1 __asm__ ("v24"), \ dst2 __asm__ ("v25"), \ srcO __asm__ ("v22"), \ dstO __asm__ ("v23"); \ \ register vector signed short srcV __asm__ ("v24"), \ dstV __asm__ ("v25"), \ srcW __asm__ ("v26"), \ dstW __asm__ ("v27"), \ but0 __asm__ ("v28"), \ but0S __asm__ ("v29"), \ op1 __asm__ ("v30"), \ but1 __asm__ ("v22"), \ op1S __asm__ ("v23"), \ but1S __asm__ ("v24"), \ op2 __asm__ ("v25"), \ but2 __asm__ ("v26"), \ op2S __asm__ ("v27"), \ but2S __asm__ ("v28"), \ op3 __asm__ ("v29"), \ op3S __asm__ ("v30"); \ \ src1 = vec_ld(stride * i, src); \ src2 = vec_ld((stride * i) + 16, src); \ srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ dst1 = vec_ld(stride * i, dst); \ dst2 = vec_ld((stride * i) + 16, dst); \ dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ /* promote the unsigned chars to signed shorts */ \ srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \ (vector signed char)srcO); \ dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \ (vector signed char)dstO); \ srcW = (vector signed short)vec_mergel((vector signed char)vzero, \ (vector signed char)srcO); \ dstW = (vector signed short)vec_mergel((vector signed char)vzero, \ (vector signed char)dstO); \ /* subtractions inside the first butterfly */ \ but0 = vec_sub(srcV, dstV); \ but0S = vec_sub(srcW, dstW); \ op1 = vec_perm(but0, but0, perm1); \ but1 = vec_mladd(but0, vprod1, op1); \ op1S = vec_perm(but0S, but0S, perm1); \ but1S = vec_mladd(but0S, vprod1, op1S); \ op2 = vec_perm(but1, but1, perm2); \ but2 = vec_mladd(but1, vprod2, op2); \ op2S = vec_perm(but1S, but1S, perm2); \ but2S = vec_mladd(but1S, vprod2, op2S); \ op3 = vec_perm(but2, but2, perm3); \ res1 = vec_mladd(but2, vprod3, op3); \ op3S = vec_perm(but2S, but2S, perm3); \ res2 = vec_mladd(but2S, vprod3, op3S); \ } ONEITERBUTTERFLY(0, temp0, temp0S); ONEITERBUTTERFLY(1, temp1, temp1S); ONEITERBUTTERFLY(2, temp2, temp2S); ONEITERBUTTERFLY(3, temp3, temp3S); ONEITERBUTTERFLY(4, temp4, temp4S); ONEITERBUTTERFLY(5, temp5, temp5S); ONEITERBUTTERFLY(6, temp6, temp6S); ONEITERBUTTERFLY(7, temp7, temp7S); } #undef ONEITERBUTTERFLY { register vector signed int vsum; register vector signed short line0S, line1S, line2S, line3S, line4S, line5S, line6S, line7S, line0BS,line2BS, line1BS,line3BS,line4BS,line6BS,line5BS, line7BS,line0CS,line4CS,line1CS,line5CS, line2CS,line6CS,line3CS,line7CS; register vector signed short line0 = vec_add(temp0, temp1); register vector signed short line1 = vec_sub(temp0, temp1); register vector signed short line2 = vec_add(temp2, temp3); register vector signed short line3 = vec_sub(temp2, temp3); register vector signed short line4 = vec_add(temp4, temp5); register vector signed short line5 = vec_sub(temp4, temp5); register vector signed short line6 = vec_add(temp6, temp7); register vector signed short line7 = vec_sub(temp6, temp7); register vector signed short line0B = vec_add(line0, line2); register vector signed short line2B = vec_sub(line0, line2); register vector signed short line1B = vec_add(line1, line3); register vector signed short line3B = vec_sub(line1, line3); register vector signed short line4B = vec_add(line4, line6); register vector signed short line6B = vec_sub(line4, line6); register vector signed short line5B = vec_add(line5, line7); register vector signed short line7B = vec_sub(line5, line7); register vector signed short line0C = vec_add(line0B, line4B); register vector signed short line4C = vec_sub(line0B, line4B); register vector signed short line1C = vec_add(line1B, line5B); register vector signed short line5C = vec_sub(line1B, line5B); register vector signed short line2C = vec_add(line2B, line6B); register vector signed short line6C = vec_sub(line2B, line6B); register vector signed short line3C = vec_add(line3B, line7B); register vector signed short line7C = vec_sub(line3B, line7B); vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0)); vsum = vec_sum4s(vec_abs(line1C), vsum); vsum = vec_sum4s(vec_abs(line2C), vsum); vsum = vec_sum4s(vec_abs(line3C), vsum); vsum = vec_sum4s(vec_abs(line4C), vsum); vsum = vec_sum4s(vec_abs(line5C), vsum); vsum = vec_sum4s(vec_abs(line6C), vsum); vsum = vec_sum4s(vec_abs(line7C), vsum); line0S = vec_add(temp0S, temp1S); line1S = vec_sub(temp0S, temp1S); line2S = vec_add(temp2S, temp3S); line3S = vec_sub(temp2S, temp3S); line4S = vec_add(temp4S, temp5S); line5S = vec_sub(temp4S, temp5S); line6S = vec_add(temp6S, temp7S); line7S = vec_sub(temp6S, temp7S); line0BS = vec_add(line0S, line2S); line2BS = vec_sub(line0S, line2S); line1BS = vec_add(line1S, line3S); line3BS = vec_sub(line1S, line3S); line4BS = vec_add(line4S, line6S); line6BS = vec_sub(line4S, line6S); line5BS = vec_add(line5S, line7S); line7BS = vec_sub(line5S, line7S); line0CS = vec_add(line0BS, line4BS); line4CS = vec_sub(line0BS, line4BS); line1CS = vec_add(line1BS, line5BS); line5CS = vec_sub(line1BS, line5BS); line2CS = vec_add(line2BS, line6BS); line6CS = vec_sub(line2BS, line6BS); line3CS = vec_add(line3BS, line7BS); line7CS = vec_sub(line3BS, line7BS); vsum = vec_sum4s(vec_abs(line0CS), vsum); vsum = vec_sum4s(vec_abs(line1CS), vsum); vsum = vec_sum4s(vec_abs(line2CS), vsum); vsum = vec_sum4s(vec_abs(line3CS), vsum); vsum = vec_sum4s(vec_abs(line4CS), vsum); vsum = vec_sum4s(vec_abs(line5CS), vsum); vsum = vec_sum4s(vec_abs(line6CS), vsum); vsum = vec_sum4s(vec_abs(line7CS), vsum); vsum = vec_sums(vsum, (vector signed int)vzero); vsum = vec_splat(vsum, 3); vec_ste(vsum, 0, &sum); } return sum; } static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){ POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1); int score; POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1); score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8); if (h==16) { dst += 8*stride; src += 8*stride; score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8); } POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1); return score; } static void vorbis_inverse_coupling_altivec(float *mag, float *ang, int blocksize) { int i; vector float m, a; vector bool int t0, t1; const vector unsigned int v_31 = //XXX vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1)); for (i = 0; i < blocksize; i += 4) { m = vec_ld(0, mag+i); a = vec_ld(0, ang+i); t0 = vec_cmple(m, (vector float)vec_splat_u32(0)); t1 = vec_cmple(a, (vector float)vec_splat_u32(0)); a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31)); t0 = (vector bool int)vec_and(a, t1); t1 = (vector bool int)vec_andc(a, t1); a = vec_sub(m, (vector float)t1); m = vec_add(m, (vector float)t0); vec_stl(a, 0, ang+i); vec_stl(m, 0, mag+i); } } /* next one assumes that ((line_size % 8) == 0) */ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1); register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsavg; register vector unsigned char blockv, temp1, temp2, blocktemp; register vector unsigned short pixelssum1, pixelssum2, temp3; register const vector unsigned char vczero = (const vector unsigned char) vec_splat_u8(0); register const vector unsigned short vctwo = (const vector unsigned short) vec_splat_u16(2); temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); } pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1); for (i = 0; i < h ; i++) { int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); temp1 = vec_ld(line_size, pixels); temp2 = vec_ld(line_size + 16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); } pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum2 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); temp3 = vec_add(pixelssum1, pixelssum2); temp3 = vec_sra(temp3, vctwo); pixelssum1 = vec_add(pixelssum2, vctwo); pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); if (rightside) { blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); } else { blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); } blockv = vec_avg(blocktemp, blockv); vec_st(blockv, 0, block); block += line_size; pixels += line_size; } POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1); } void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx) { c->pix_abs[0][1] = sad16_x2_altivec; c->pix_abs[0][2] = sad16_y2_altivec; c->pix_abs[0][3] = sad16_xy2_altivec; c->pix_abs[0][0] = sad16_altivec; c->pix_abs[1][0] = sad8_altivec; c->sad[0]= sad16_altivec; c->sad[1]= sad8_altivec; c->pix_norm1 = pix_norm1_altivec; c->sse[1]= sse8_altivec; c->sse[0]= sse16_altivec; c->pix_sum = pix_sum_altivec; c->diff_pixels = diff_pixels_altivec; c->get_pixels = get_pixels_altivec; c->clear_block = clear_block_altivec; c->add_bytes= add_bytes_altivec; c->put_pixels_tab[0][0] = put_pixels16_altivec; /* the two functions do the same thing, so use the same code */ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec; c->avg_pixels_tab[0][0] = avg_pixels16_altivec; c->avg_pixels_tab[1][0] = avg_pixels8_altivec; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec; c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec; c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec; c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec; c->hadamard8_diff[0] = hadamard8_diff16_altivec; c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; if (CONFIG_VORBIS_DECODER) c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec; }
123linslouis-android-video-cutter
jni/libavcodec/ppc/dsputil_altivec.c
C
asf20
57,934
/* * simple math operations * Copyright (c) 2001, 2002 Fabrice Bellard * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PPC_MATHOPS_H #define AVCODEC_PPC_MATHOPS_H #include <stdint.h> #include "config.h" #include "libavutil/common.h" #if HAVE_PPC4XX /* signed 16x16 -> 32 multiply add accumulate */ #define MAC16(rt, ra, rb) \ __asm__ ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); /* signed 16x16 -> 32 multiply */ #define MUL16(ra, rb) \ ({ int __rt; \ __asm__ ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ __rt; }) #endif #define MULH MULH static inline av_const int MULH(int a, int b){ int r; __asm__ ("mulhw %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); return r; } #if !ARCH_PPC64 static inline av_const int64_t MAC64(int64_t d, int a, int b) { union { uint64_t x; unsigned hl[2]; } x = { d }; int h, l; __asm__ ("mullw %3, %4, %5 \n\t" "mulhw %2, %4, %5 \n\t" "addc %1, %1, %3 \n\t" "adde %0, %0, %2 \n\t" : "+r"(x.hl[0]), "+r"(x.hl[1]), "=&r"(h), "=&r"(l) : "r"(a), "r"(b)); return x.x; } #define MAC64(d, a, b) ((d) = MAC64(d, a, b)) static inline av_const int64_t MLS64(int64_t d, int a, int b) { union { uint64_t x; unsigned hl[2]; } x = { d }; int h, l; __asm__ ("mullw %3, %4, %5 \n\t" "mulhw %2, %4, %5 \n\t" "subfc %1, %3, %1 \n\t" "subfe %0, %2, %0 \n\t" : "+r"(x.hl[0]), "+r"(x.hl[1]), "=&r"(h), "=&r"(l) : "r"(a), "r"(b)); return x.x; } #define MLS64(d, a, b) ((d) = MLS64(d, a, b)) #endif #endif /* AVCODEC_PPC_MATHOPS_H */
123linslouis-android-video-cutter
jni/libavcodec/ppc/mathops.h
C
asf20
2,486
/* * FFT/IFFT transforms * AltiVec-enabled * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org> * Based on code Copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/fft.h" #include "dsputil_ppc.h" #include "util_altivec.h" #include "dsputil_altivec.h" /** * Do a complex FFT with the parameters defined in ff_fft_init(). The * input data must be permuted before with s->revtab table. No * 1.0/sqrt(n) normalization is done. * AltiVec-enabled * This code assumes that the 'z' pointer is 16 bytes-aligned * It also assumes all FFTComplex are 8 bytes-aligned pair of float * The code is exactly the same as the SSE version, except * that successive MUL + ADD/SUB have been merged into * fused multiply-add ('vec_madd' in altivec) */ static void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z) { POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6); register const vector float vczero = (const vector float)vec_splat_u32(0.); int ln = s->nbits; int j, np, np2; int nblocks, nloops; register FFTComplex *p, *q; FFTComplex *cptr, *cptr1; int k; POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); np = 1 << ln; { vector float *r, a, b, a1, c1, c2; r = (vector float *)&z[0]; c1 = vcii(p,p,n,n); if (s->inverse) { c2 = vcii(p,p,n,p); } else { c2 = vcii(p,p,p,n); } j = (np >> 2); do { a = vec_ld(0, r); a1 = vec_ld(sizeof(vector float), r); b = vec_perm(a,a,vcprmle(1,0,3,2)); a = vec_madd(a,c1,b); /* do the pass 0 butterfly */ b = vec_perm(a1,a1,vcprmle(1,0,3,2)); b = vec_madd(a1,c1,b); /* do the pass 0 butterfly */ /* multiply third by -i */ b = vec_perm(b,b,vcprmle(2,3,1,0)); /* do the pass 1 butterfly */ vec_st(vec_madd(b,c2,a), 0, r); vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r); r += 2; } while (--j != 0); } /* pass 2 .. ln-1 */ nblocks = np >> 3; nloops = 1 << 2; np2 = np >> 1; cptr1 = s->exptab1; do { p = z; q = z + nloops; j = nblocks; do { cptr = cptr1; k = nloops >> 1; do { vector float a,b,c,t1; a = vec_ld(0, (float*)p); b = vec_ld(0, (float*)q); /* complex mul */ c = vec_ld(0, (float*)cptr); /* cre*re cim*re */ t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero); c = vec_ld(sizeof(vector float), (float*)cptr); /* -cim*im cre*im */ b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1); /* butterfly */ vec_st(vec_add(a,b), 0, (float*)p); vec_st(vec_sub(a,b), 0, (float*)q); p += 2; q += 2; cptr += 4; } while (--k); p += nloops; q += nloops; } while (--j); cptr1 += nloops * 2; nblocks = nblocks >> 1; nloops = nloops << 1; } while (nblocks != 0); POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6); } av_cold void ff_fft_init_altivec(FFTContext *s) { s->fft_calc = ff_fft_calc_altivec; s->split_radix = 0; }
123linslouis-android-video-cutter
jni/libavcodec/ppc/fft_altivec.c
C
asf20
4,191
/* * Copyright (c) 2002 Brian Foley * Copyright (c) 2002 Dieter Shirley * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "dsputil_ppc.h" #include "dsputil_altivec.h" int mm_flags = 0; int mm_support(void) { int result = 0; #if HAVE_ALTIVEC if (has_altivec()) { result |= FF_MM_ALTIVEC; } #endif /* result */ return result; } #if CONFIG_POWERPC_PERF unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][powerpc_data_total]; /* list below must match enum in dsputil_ppc.h */ static unsigned char* perfname[] = { "ff_fft_calc_altivec", "gmc1_altivec", "dct_unquantize_h263_altivec", "fdct_altivec", "idct_add_altivec", "idct_put_altivec", "put_pixels16_altivec", "avg_pixels16_altivec", "avg_pixels8_altivec", "put_pixels8_xy2_altivec", "put_no_rnd_pixels8_xy2_altivec", "put_pixels16_xy2_altivec", "put_no_rnd_pixels16_xy2_altivec", "hadamard8_diff8x8_altivec", "hadamard8_diff16_altivec", "avg_pixels8_xy2_altivec", "clear_blocks_dcbz32_ppc", "clear_blocks_dcbz128_ppc", "put_h264_chroma_mc8_altivec", "avg_h264_chroma_mc8_altivec", "put_h264_qpel16_h_lowpass_altivec", "avg_h264_qpel16_h_lowpass_altivec", "put_h264_qpel16_v_lowpass_altivec", "avg_h264_qpel16_v_lowpass_altivec", "put_h264_qpel16_hv_lowpass_altivec", "avg_h264_qpel16_hv_lowpass_altivec", "" }; #include <stdio.h> #endif #if CONFIG_POWERPC_PERF void powerpc_display_perf_report(void) { int i, j; av_log(NULL, AV_LOG_INFO, "PowerPC performance report\n Values are from the PMC registers, and represent whatever the registers are set to record.\n"); for(i = 0 ; i < powerpc_perf_total ; i++) { for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) { if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0) av_log(NULL, AV_LOG_INFO, " Function \"%s\" (pmc%d):\n\tmin: %"PRIu64"\n\tmax: %"PRIu64"\n\tavg: %1.2lf (%"PRIu64")\n", perfname[i], j+1, perfdata[j][i][powerpc_data_min], perfdata[j][i][powerpc_data_max], (double)perfdata[j][i][powerpc_data_sum] / (double)perfdata[j][i][powerpc_data_num], perfdata[j][i][powerpc_data_num]); } } } #endif /* CONFIG_POWERPC_PERF */ /* ***** WARNING ***** WARNING ***** WARNING ***** */ /* clear_blocks_dcbz32_ppc will not work properly on PowerPC processors with a cache line size not equal to 32 bytes. Fortunately all processor used by Apple up to at least the 7450 (aka second generation G4) use 32 bytes cache line. This is due to the use of the 'dcbz' instruction. It simply clear to zero a single cache line, so you need to know the cache line size to use it ! It's absurd, but it's fast... update 24/06/2003 : Apple released yesterday the G5, with a PPC970. cache line size: 128 bytes. Oups. The semantic of dcbz was changed, it always clear 32 bytes. so the function below will work, but will be slow. So I fixed check_dcbz_effect to use dcbzl, which is defined to clear a cache line (as dcbz before). So we still can distinguish, and use dcbz (32 bytes) or dcbzl (one cache line) as required. see <http://developer.apple.com/technotes/tn/tn2087.html> and <http://developer.apple.com/technotes/tn/tn2086.html> */ static void clear_blocks_dcbz32_ppc(DCTELEM *blocks) { POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz32, 1); register int misal = ((unsigned long)blocks & 0x00000010); register int i = 0; POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz32, 1); #if 1 if (misal) { ((unsigned long*)blocks)[0] = 0L; ((unsigned long*)blocks)[1] = 0L; ((unsigned long*)blocks)[2] = 0L; ((unsigned long*)blocks)[3] = 0L; i += 16; } for ( ; i < sizeof(DCTELEM)*6*64-31 ; i += 32) { __asm__ volatile("dcbz %0,%1" : : "b" (blocks), "r" (i) : "memory"); } if (misal) { ((unsigned long*)blocks)[188] = 0L; ((unsigned long*)blocks)[189] = 0L; ((unsigned long*)blocks)[190] = 0L; ((unsigned long*)blocks)[191] = 0L; i += 16; } #else memset(blocks, 0, sizeof(DCTELEM)*6*64); #endif POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz32, 1); } /* same as above, when dcbzl clear a whole 128B cache line i.e. the PPC970 aka G5 */ #if HAVE_DCBZL static void clear_blocks_dcbz128_ppc(DCTELEM *blocks) { POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz128, 1); register int misal = ((unsigned long)blocks & 0x0000007f); register int i = 0; POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz128, 1); #if 1 if (misal) { // we could probably also optimize this case, // but there's not much point as the machines // aren't available yet (2003-06-26) memset(blocks, 0, sizeof(DCTELEM)*6*64); } else for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) { __asm__ volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); } #else memset(blocks, 0, sizeof(DCTELEM)*6*64); #endif POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz128, 1); } #else static void clear_blocks_dcbz128_ppc(DCTELEM *blocks) { memset(blocks, 0, sizeof(DCTELEM)*6*64); } #endif #if HAVE_DCBZL /* check dcbz report how many bytes are set to 0 by dcbz */ /* update 24/06/2003 : replace dcbz by dcbzl to get the intended effect (Apple "fixed" dcbz) unfortunately this cannot be used unless the assembler knows about dcbzl ... */ static long check_dcbzl_effect(void) { register char *fakedata = av_malloc(1024); register char *fakedata_middle; register long zero = 0; register long i = 0; long count = 0; if (!fakedata) { return 0L; } fakedata_middle = (fakedata + 512); memset(fakedata, 0xFF, 1024); /* below the constraint "b" seems to mean "Address base register" in gcc-3.3 / RS/6000 speaks. seems to avoid using r0, so.... */ __asm__ volatile("dcbzl %0, %1" : : "b" (fakedata_middle), "r" (zero)); for (i = 0; i < 1024 ; i ++) { if (fakedata[i] == (char)0) count++; } av_free(fakedata); return count; } #else static long check_dcbzl_effect(void) { return 0; } #endif static void prefetch_ppc(void *mem, int stride, int h) { register const uint8_t *p = mem; do { __asm__ volatile ("dcbt 0,%0" : : "r" (p)); p+= stride; } while(--h); } void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) { // Common optimizations whether AltiVec is available or not c->prefetch = prefetch_ppc; switch (check_dcbzl_effect()) { case 32: c->clear_blocks = clear_blocks_dcbz32_ppc; break; case 128: c->clear_blocks = clear_blocks_dcbz128_ppc; break; default: break; } #if HAVE_ALTIVEC if(CONFIG_H264_DECODER) dsputil_h264_init_ppc(c, avctx); if (has_altivec()) { mm_flags |= FF_MM_ALTIVEC; dsputil_init_altivec(c, avctx); if(CONFIG_VC1_DECODER) vc1dsp_init_altivec(c, avctx); float_init_altivec(c, avctx); int_init_altivec(c, avctx); c->gmc1 = gmc1_altivec; #if CONFIG_ENCODERS if (avctx->dct_algo == FF_DCT_AUTO || avctx->dct_algo == FF_DCT_ALTIVEC) { c->fdct = fdct_altivec; } #endif //CONFIG_ENCODERS if (avctx->lowres==0) { if ((avctx->idct_algo == FF_IDCT_AUTO) || (avctx->idct_algo == FF_IDCT_ALTIVEC)) { c->idct_put = idct_put_altivec; c->idct_add = idct_add_altivec; c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) && avctx->idct_algo==FF_IDCT_VP3){ c->idct_put = ff_vp3_idct_put_altivec; c->idct_add = ff_vp3_idct_add_altivec; c->idct = ff_vp3_idct_altivec; c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; } } #if CONFIG_POWERPC_PERF { int i, j; for (i = 0 ; i < powerpc_perf_total ; i++) { for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) { perfdata[j][i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFFULL; perfdata[j][i][powerpc_data_max] = 0x0000000000000000ULL; perfdata[j][i][powerpc_data_sum] = 0x0000000000000000ULL; perfdata[j][i][powerpc_data_num] = 0x0000000000000000ULL; } } } #endif /* CONFIG_POWERPC_PERF */ } #endif /* HAVE_ALTIVEC */ }
123linslouis-android-video-cutter
jni/libavcodec/ppc/dsputil_ppc.c
C
asf20
9,640
/* * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "dsputil_altivec.h" #include "util_altivec.h" static void vector_fmul_altivec(float *dst, const float *src, int len) { int i; vector float d0, d1, s, zero = (vector float)vec_splat_u32(0); for(i=0; i<len-7; i+=8) { d0 = vec_ld(0, dst+i); s = vec_ld(0, src+i); d1 = vec_ld(16, dst+i); d0 = vec_madd(d0, s, zero); d1 = vec_madd(d1, vec_ld(16,src+i), zero); vec_st(d0, 0, dst+i); vec_st(d1, 16, dst+i); } } static void vector_fmul_reverse_altivec(float *dst, const float *src0, const float *src1, int len) { int i; vector float d, s0, s1, h0, l0, s2, s3, zero = (vector float)vec_splat_u32(0); src1 += len-4; for(i=0; i<len-7; i+=8) { s1 = vec_ld(0, src1-i); // [a,b,c,d] s0 = vec_ld(0, src0+i); l0 = vec_mergel(s1, s1); // [c,c,d,d] s3 = vec_ld(-16, src1-i); h0 = vec_mergeh(s1, s1); // [a,a,b,b] s2 = vec_ld(16, src0+i); s1 = vec_mergeh(vec_mergel(l0,h0), // [d,b,d,b] vec_mergeh(l0,h0)); // [c,a,c,a] // [d,c,b,a] l0 = vec_mergel(s3, s3); d = vec_madd(s0, s1, zero); h0 = vec_mergeh(s3, s3); vec_st(d, 0, dst+i); s3 = vec_mergeh(vec_mergel(l0,h0), vec_mergeh(l0,h0)); d = vec_madd(s2, s3, zero); vec_st(d, 16, dst+i); } } static void vector_fmul_add_altivec(float *dst, const float *src0, const float *src1, const float *src2, int len) { int i; vector float d, s0, s1, s2, t0, t1, edges; vector unsigned char align = vec_lvsr(0,dst), mask = vec_lvsl(0, dst); for (i=0; i<len-3; i+=4) { t0 = vec_ld(0, dst+i); t1 = vec_ld(15, dst+i); s0 = vec_ld(0, src0+i); s1 = vec_ld(0, src1+i); s2 = vec_ld(0, src2+i); edges = vec_perm(t1 ,t0, mask); d = vec_madd(s0,s1,s2); t1 = vec_perm(d, edges, align); t0 = vec_perm(edges, d, align); vec_st(t1, 15, dst+i); vec_st(t0, 0, dst+i); } } static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len) { union { vector float v; float s[4]; } vadd; vector float vadd_bias, zero, t0, t1, s0, s1, wi, wj; const vector unsigned char reverse = vcprm(3,2,1,0); int i,j; dst += len; win += len; src0+= len; vadd.s[0] = add_bias; vadd_bias = vec_splat(vadd.v, 0); zero = (vector float)vec_splat_u32(0); for(i=-len*4, j=len*4-16; i<0; i+=16, j-=16) { s0 = vec_ld(i, src0); s1 = vec_ld(j, src1); wi = vec_ld(i, win); wj = vec_ld(j, win); s1 = vec_perm(s1, s1, reverse); wj = vec_perm(wj, wj, reverse); t0 = vec_madd(s0, wj, vadd_bias); t0 = vec_nmsub(s1, wi, t0); t1 = vec_madd(s0, wi, vadd_bias); t1 = vec_madd(s1, wj, t1); t1 = vec_perm(t1, t1, reverse); vec_st(t0, i, dst); vec_st(t1, j, dst); } } static void int32_to_float_fmul_scalar_altivec(float *dst, const int *src, float mul, int len) { union { vector float v; float s[4]; } mul_u; int i; vector float src1, src2, dst1, dst2, mul_v, zero; zero = (vector float)vec_splat_u32(0); mul_u.s[0] = mul; mul_v = vec_splat(mul_u.v, 0); for(i=0; i<len; i+=8) { src1 = vec_ctf(vec_ld(0, src+i), 0); src2 = vec_ctf(vec_ld(16, src+i), 0); dst1 = vec_madd(src1, mul_v, zero); dst2 = vec_madd(src2, mul_v, zero); vec_st(dst1, 0, dst+i); vec_st(dst2, 16, dst+i); } } static vector signed short float_to_int16_one_altivec(const float *src) { vector float s0 = vec_ld(0, src); vector float s1 = vec_ld(16, src); vector signed int t0 = vec_cts(s0, 0); vector signed int t1 = vec_cts(s1, 0); return vec_packs(t0,t1); } static void float_to_int16_altivec(int16_t *dst, const float *src, long len) { int i; vector signed short d0, d1, d; vector unsigned char align; if(((long)dst)&15) //FIXME for(i=0; i<len-7; i+=8) { d0 = vec_ld(0, dst+i); d = float_to_int16_one_altivec(src+i); d1 = vec_ld(15, dst+i); d1 = vec_perm(d1, d0, vec_lvsl(0,dst+i)); align = vec_lvsr(0, dst+i); d0 = vec_perm(d1, d, align); d1 = vec_perm(d, d1, align); vec_st(d0, 0, dst+i); vec_st(d1,15, dst+i); } else for(i=0; i<len-7; i+=8) { d = float_to_int16_one_altivec(src+i); vec_st(d, 0, dst+i); } } static void float_to_int16_interleave_altivec(int16_t *dst, const float **src, long len, int channels) { int i; vector signed short d0, d1, d2, c0, c1, t0, t1; vector unsigned char align; if(channels == 1) float_to_int16_altivec(dst, src[0], len); else if (channels == 2) { if(((long)dst)&15) for(i=0; i<len-7; i+=8) { d0 = vec_ld(0, dst + i); t0 = float_to_int16_one_altivec(src[0] + i); d1 = vec_ld(31, dst + i); t1 = float_to_int16_one_altivec(src[1] + i); c0 = vec_mergeh(t0, t1); c1 = vec_mergel(t0, t1); d2 = vec_perm(d1, d0, vec_lvsl(0, dst + i)); align = vec_lvsr(0, dst + i); d0 = vec_perm(d2, c0, align); d1 = vec_perm(c0, c1, align); vec_st(d0, 0, dst + i); d0 = vec_perm(c1, d2, align); vec_st(d1, 15, dst + i); vec_st(d0, 31, dst + i); dst+=8; } else for(i=0; i<len-7; i+=8) { t0 = float_to_int16_one_altivec(src[0] + i); t1 = float_to_int16_one_altivec(src[1] + i); d0 = vec_mergeh(t0, t1); d1 = vec_mergel(t0, t1); vec_st(d0, 0, dst + i); vec_st(d1, 16, dst + i); dst+=8; } } else { DECLARE_ALIGNED(16, int16_t, tmp)[len]; int c, j; for (c = 0; c < channels; c++) { float_to_int16_altivec(tmp, src[c], len); for (i = 0, j = c; i < len; i++, j+=channels) { dst[j] = tmp[i]; } } } } void float_init_altivec(DSPContext* c, AVCodecContext *avctx) { c->vector_fmul = vector_fmul_altivec; c->vector_fmul_reverse = vector_fmul_reverse_altivec; c->vector_fmul_add = vector_fmul_add_altivec; c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_altivec; if(!(avctx->flags & CODEC_FLAG_BITEXACT)) { c->vector_fmul_window = vector_fmul_window_altivec; c->float_to_int16 = float_to_int16_altivec; c->float_to_int16_interleave = float_to_int16_interleave_altivec; } }
123linslouis-android-video-cutter
jni/libavcodec/ppc/float_altivec.c
C
asf20
7,907
/* * Copyright (c) 2002 Dieter Shirley * * dct_unquantize_h263_altivec: * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <stdio.h> #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "dsputil_ppc.h" #include "util_altivec.h" #include "types_altivec.h" #include "dsputil_altivec.h" // Swaps two variables (used for altivec registers) #define SWAP(a,b) \ do { \ __typeof__(a) swap_temp=a; \ a=b; \ b=swap_temp; \ } while (0) // transposes a matrix consisting of four vectors with four elements each #define TRANSPOSE4(a,b,c,d) \ do { \ __typeof__(a) _trans_ach = vec_mergeh(a, c); \ __typeof__(a) _trans_acl = vec_mergel(a, c); \ __typeof__(a) _trans_bdh = vec_mergeh(b, d); \ __typeof__(a) _trans_bdl = vec_mergel(b, d); \ \ a = vec_mergeh(_trans_ach, _trans_bdh); \ b = vec_mergel(_trans_ach, _trans_bdh); \ c = vec_mergeh(_trans_acl, _trans_bdl); \ d = vec_mergel(_trans_acl, _trans_bdl); \ } while (0) // Loads a four-byte value (int or float) from the target address // into every element in the target vector. Only works if the // target address is four-byte aligned (which should be always). #define LOAD4(vec, address) \ { \ __typeof__(vec)* _load_addr = (__typeof__(vec)*)(address); \ vector unsigned char _perm_vec = vec_lvsl(0,(address)); \ vec = vec_ld(0, _load_addr); \ vec = vec_perm(vec, vec, _perm_vec); \ vec = vec_splat(vec, 0); \ } #define FOUROF(a) {a,a,a,a} static int dct_quantize_altivec(MpegEncContext* s, DCTELEM* data, int n, int qscale, int* overflow) { int lastNonZero; vector float row0, row1, row2, row3, row4, row5, row6, row7; vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7; const vector float zero = (const vector float)FOUROF(0.); // used after quantize step int oldBaseValue = 0; // Load the data into the row/alt vectors { vector signed short data0, data1, data2, data3, data4, data5, data6, data7; data0 = vec_ld(0, data); data1 = vec_ld(16, data); data2 = vec_ld(32, data); data3 = vec_ld(48, data); data4 = vec_ld(64, data); data5 = vec_ld(80, data); data6 = vec_ld(96, data); data7 = vec_ld(112, data); // Transpose the data before we start TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7); // load the data into floating point vectors. We load // the high half of each row into the main row vectors // and the low half into the alt vectors. row0 = vec_ctf(vec_unpackh(data0), 0); alt0 = vec_ctf(vec_unpackl(data0), 0); row1 = vec_ctf(vec_unpackh(data1), 0); alt1 = vec_ctf(vec_unpackl(data1), 0); row2 = vec_ctf(vec_unpackh(data2), 0); alt2 = vec_ctf(vec_unpackl(data2), 0); row3 = vec_ctf(vec_unpackh(data3), 0); alt3 = vec_ctf(vec_unpackl(data3), 0); row4 = vec_ctf(vec_unpackh(data4), 0); alt4 = vec_ctf(vec_unpackl(data4), 0); row5 = vec_ctf(vec_unpackh(data5), 0); alt5 = vec_ctf(vec_unpackl(data5), 0); row6 = vec_ctf(vec_unpackh(data6), 0); alt6 = vec_ctf(vec_unpackl(data6), 0); row7 = vec_ctf(vec_unpackh(data7), 0); alt7 = vec_ctf(vec_unpackl(data7), 0); } // The following block could exist as a separate an altivec dct // function. However, if we put it inline, the DCT data can remain // in the vector local variables, as floats, which we'll use during the // quantize step... { const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f); const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f); const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f); const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f); const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f); const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f); const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f); const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f); const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f); const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f); const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f); const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f); int whichPass, whichHalf; for(whichPass = 1; whichPass<=2; whichPass++) { for(whichHalf = 1; whichHalf<=2; whichHalf++) { vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; vector float tmp10, tmp11, tmp12, tmp13; vector float z1, z2, z3, z4, z5; tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7]; tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7]; tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4]; tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4]; tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6]; tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6]; tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5]; tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5]; tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3; tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3; tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2; tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2; // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); row0 = vec_add(tmp10, tmp11); // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); row4 = vec_sub(tmp10, tmp11); // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero); // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), // CONST_BITS-PASS1_BITS); row2 = vec_madd(tmp13, vec_0_765366865, z1); // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), // CONST_BITS-PASS1_BITS); row6 = vec_madd(tmp12, vec_1_847759065, z1); z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7; z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6; z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6; z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7; // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero); // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z3 = vec_madd(z3, vec_1_961570560, z5); // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z4 = vec_madd(z4, vec_0_390180644, z5); // The following adds are rolled into the multiplies above // z3 = vec_add(z3, z5); // z3 += z5; // z4 = vec_add(z4, z5); // z4 += z5; // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ // Wow! It's actually more efficient to roll this multiply // into the adds below, even thought the multiply gets done twice! // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero); // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ // Same with this one... // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero); // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3)); // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4)); // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3)); // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4)); // Swap the row values with the alts. If this is the first half, // this sets up the low values to be acted on in the second half. // If this is the second half, it puts the high values back in // the row values where they are expected to be when we're done. SWAP(row0, alt0); SWAP(row1, alt1); SWAP(row2, alt2); SWAP(row3, alt3); SWAP(row4, alt4); SWAP(row5, alt5); SWAP(row6, alt6); SWAP(row7, alt7); } if (whichPass == 1) { // transpose the data for the second pass // First, block transpose the upper right with lower left. SWAP(row4, alt0); SWAP(row5, alt1); SWAP(row6, alt2); SWAP(row7, alt3); // Now, transpose each block of four TRANSPOSE4(row0, row1, row2, row3); TRANSPOSE4(row4, row5, row6, row7); TRANSPOSE4(alt0, alt1, alt2, alt3); TRANSPOSE4(alt4, alt5, alt6, alt7); } } } // perform the quantize step, using the floating point data // still in the row/alt registers { const int* biasAddr; const vector signed int* qmat; vector float bias, negBias; if (s->mb_intra) { vector signed int baseVector; // We must cache element 0 in the intra case // (it needs special handling). baseVector = vec_cts(vec_splat(row0, 0), 0); vec_ste(baseVector, 0, &oldBaseValue); qmat = (vector signed int*)s->q_intra_matrix[qscale]; biasAddr = &(s->intra_quant_bias); } else { qmat = (vector signed int*)s->q_inter_matrix[qscale]; biasAddr = &(s->inter_quant_bias); } // Load the bias vector (We add 0.5 to the bias so that we're // rounding when we convert to int, instead of flooring.) { vector signed int biasInt; const vector float negOneFloat = (vector float)FOUROF(-1.0f); LOAD4(biasInt, biasAddr); bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT); negBias = vec_madd(bias, negOneFloat, zero); } { vector float q0, q1, q2, q3, q4, q5, q6, q7; q0 = vec_ctf(qmat[0], QMAT_SHIFT); q1 = vec_ctf(qmat[2], QMAT_SHIFT); q2 = vec_ctf(qmat[4], QMAT_SHIFT); q3 = vec_ctf(qmat[6], QMAT_SHIFT); q4 = vec_ctf(qmat[8], QMAT_SHIFT); q5 = vec_ctf(qmat[10], QMAT_SHIFT); q6 = vec_ctf(qmat[12], QMAT_SHIFT); q7 = vec_ctf(qmat[14], QMAT_SHIFT); row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias), vec_cmpgt(row0, zero)); row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias), vec_cmpgt(row1, zero)); row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias), vec_cmpgt(row2, zero)); row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias), vec_cmpgt(row3, zero)); row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias), vec_cmpgt(row4, zero)); row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias), vec_cmpgt(row5, zero)); row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias), vec_cmpgt(row6, zero)); row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias), vec_cmpgt(row7, zero)); q0 = vec_ctf(qmat[1], QMAT_SHIFT); q1 = vec_ctf(qmat[3], QMAT_SHIFT); q2 = vec_ctf(qmat[5], QMAT_SHIFT); q3 = vec_ctf(qmat[7], QMAT_SHIFT); q4 = vec_ctf(qmat[9], QMAT_SHIFT); q5 = vec_ctf(qmat[11], QMAT_SHIFT); q6 = vec_ctf(qmat[13], QMAT_SHIFT); q7 = vec_ctf(qmat[15], QMAT_SHIFT); alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias), vec_cmpgt(alt0, zero)); alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias), vec_cmpgt(alt1, zero)); alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias), vec_cmpgt(alt2, zero)); alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias), vec_cmpgt(alt3, zero)); alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias), vec_cmpgt(alt4, zero)); alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias), vec_cmpgt(alt5, zero)); alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias), vec_cmpgt(alt6, zero)); alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias), vec_cmpgt(alt7, zero)); } } // Store the data back into the original block { vector signed short data0, data1, data2, data3, data4, data5, data6, data7; data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0)); data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0)); data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0)); data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0)); data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0)); data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0)); data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0)); data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0)); { // Clamp for overflow vector signed int max_q_int, min_q_int; vector signed short max_q, min_q; LOAD4(max_q_int, &(s->max_qcoeff)); LOAD4(min_q_int, &(s->min_qcoeff)); max_q = vec_pack(max_q_int, max_q_int); min_q = vec_pack(min_q_int, min_q_int); data0 = vec_max(vec_min(data0, max_q), min_q); data1 = vec_max(vec_min(data1, max_q), min_q); data2 = vec_max(vec_min(data2, max_q), min_q); data4 = vec_max(vec_min(data4, max_q), min_q); data5 = vec_max(vec_min(data5, max_q), min_q); data6 = vec_max(vec_min(data6, max_q), min_q); data7 = vec_max(vec_min(data7, max_q), min_q); } { vector bool char zero_01, zero_23, zero_45, zero_67; vector signed char scanIndexes_01, scanIndexes_23, scanIndexes_45, scanIndexes_67; vector signed char negOne = vec_splat_s8(-1); vector signed char* scanPtr = (vector signed char*)(s->intra_scantable.inverse); signed char lastNonZeroChar; // Determine the largest non-zero index. zero_01 = vec_pack(vec_cmpeq(data0, (vector signed short)zero), vec_cmpeq(data1, (vector signed short)zero)); zero_23 = vec_pack(vec_cmpeq(data2, (vector signed short)zero), vec_cmpeq(data3, (vector signed short)zero)); zero_45 = vec_pack(vec_cmpeq(data4, (vector signed short)zero), vec_cmpeq(data5, (vector signed short)zero)); zero_67 = vec_pack(vec_cmpeq(data6, (vector signed short)zero), vec_cmpeq(data7, (vector signed short)zero)); // 64 biggest values scanIndexes_01 = vec_sel(scanPtr[0], negOne, zero_01); scanIndexes_23 = vec_sel(scanPtr[1], negOne, zero_23); scanIndexes_45 = vec_sel(scanPtr[2], negOne, zero_45); scanIndexes_67 = vec_sel(scanPtr[3], negOne, zero_67); // 32 largest values scanIndexes_01 = vec_max(scanIndexes_01, scanIndexes_23); scanIndexes_45 = vec_max(scanIndexes_45, scanIndexes_67); // 16 largest values scanIndexes_01 = vec_max(scanIndexes_01, scanIndexes_45); // 8 largest values scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne), vec_mergel(scanIndexes_01, negOne)); // 4 largest values scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne), vec_mergel(scanIndexes_01, negOne)); // 2 largest values scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne), vec_mergel(scanIndexes_01, negOne)); // largest value scanIndexes_01 = vec_max(vec_mergeh(scanIndexes_01, negOne), vec_mergel(scanIndexes_01, negOne)); scanIndexes_01 = vec_splat(scanIndexes_01, 0); vec_ste(scanIndexes_01, 0, &lastNonZeroChar); lastNonZero = lastNonZeroChar; // While the data is still in vectors we check for the transpose IDCT permute // and handle it using the vector unit if we can. This is the permute used // by the altivec idct, so it is common when using the altivec dct. if ((lastNonZero > 0) && (s->dsp.idct_permutation_type == FF_TRANSPOSE_IDCT_PERM)) { TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7); } vec_st(data0, 0, data); vec_st(data1, 16, data); vec_st(data2, 32, data); vec_st(data3, 48, data); vec_st(data4, 64, data); vec_st(data5, 80, data); vec_st(data6, 96, data); vec_st(data7, 112, data); } } // special handling of block[0] if (s->mb_intra) { if (!s->h263_aic) { if (n < 4) oldBaseValue /= s->y_dc_scale; else oldBaseValue /= s->c_dc_scale; } // Divide by 8, rounding the result data[0] = (oldBaseValue + 4) >> 3; } // We handled the transpose permutation above and we don't // need to permute the "no" permutation case. if ((lastNonZero > 0) && (s->dsp.idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) && (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)) { ff_block_permute(data, s->dsp.idct_permutation, s->intra_scantable.scantable, lastNonZero); } return lastNonZero; } /* AltiVec version of dct_unquantize_h263 this code assumes `block' is 16 bytes-aligned */ static void dct_unquantize_h263_altivec(MpegEncContext *s, DCTELEM *block, int n, int qscale) { POWERPC_PERF_DECLARE(altivec_dct_unquantize_h263_num, 1); int i, level, qmul, qadd; int nCoeffs; assert(s->block_last_index[n]>=0); POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); qadd = (qscale - 1) | 1; qmul = qscale << 1; if (s->mb_intra) { if (!s->h263_aic) { if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; }else qadd = 0; i = 1; nCoeffs= 63; //does not always use zigzag table } else { i = 0; nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; } { register const vector signed short vczero = (const vector signed short)vec_splat_s16(0); DECLARE_ALIGNED(16, short, qmul8) = qmul; DECLARE_ALIGNED(16, short, qadd8) = qadd; register vector signed short blockv, qmulv, qaddv, nqaddv, temp1; register vector bool short blockv_null, blockv_neg; register short backup_0 = block[0]; register int j = 0; qmulv = vec_splat((vec_s16)vec_lde(0, &qmul8), 0); qaddv = vec_splat((vec_s16)vec_lde(0, &qadd8), 0); nqaddv = vec_sub(vczero, qaddv); #if 0 // block *is* 16 bytes-aligned, it seems. // first make sure block[j] is 16 bytes-aligned for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) { level = block[j]; if (level) { if (level < 0) { level = level * qmul - qadd; } else { level = level * qmul + qadd; } block[j] = level; } } #endif // vectorize all the 16 bytes-aligned blocks // of 8 elements for(; (j + 7) <= nCoeffs ; j+=8) { blockv = vec_ld(j << 1, block); blockv_neg = vec_cmplt(blockv, vczero); blockv_null = vec_cmpeq(blockv, vczero); // choose between +qadd or -qadd as the third operand temp1 = vec_sel(qaddv, nqaddv, blockv_neg); // multiply & add (block{i,i+7} * qmul [+-] qadd) temp1 = vec_mladd(blockv, qmulv, temp1); // put 0 where block[{i,i+7} used to have 0 blockv = vec_sel(temp1, blockv, blockv_null); vec_st(blockv, j << 1, block); } // if nCoeffs isn't a multiple of 8, finish the job // using good old scalar units. // (we could do it using a truncated vector, // but I'm not sure it's worth the hassle) for(; j <= nCoeffs ; j++) { level = block[j]; if (level) { if (level < 0) { level = level * qmul - qadd; } else { level = level * qmul + qadd; } block[j] = level; } } if (i == 1) { // cheat. this avoid special-casing the first iteration block[0] = backup_0; } } POWERPC_PERF_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63); } void MPV_common_init_altivec(MpegEncContext *s) { if ((mm_flags & FF_MM_ALTIVEC) == 0) return; if (s->avctx->lowres==0) { if ((s->avctx->idct_algo == FF_IDCT_AUTO) || (s->avctx->idct_algo == FF_IDCT_ALTIVEC)) { s->dsp.idct_put = idct_put_altivec; s->dsp.idct_add = idct_add_altivec; s->dsp.idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; } } // Test to make sure that the dct required alignments are met. if ((((long)(s->q_intra_matrix) & 0x0f) != 0) || (((long)(s->q_inter_matrix) & 0x0f) != 0)) { av_log(s->avctx, AV_LOG_INFO, "Internal Error: q-matrix blocks must be 16-byte aligned " "to use AltiVec DCT. Reverting to non-AltiVec version.\n"); return; } if (((long)(s->intra_scantable.inverse) & 0x0f) != 0) { av_log(s->avctx, AV_LOG_INFO, "Internal Error: scan table blocks must be 16-byte aligned " "to use AltiVec DCT. Reverting to non-AltiVec version.\n"); return; } if ((s->avctx->dct_algo == FF_DCT_AUTO) || (s->avctx->dct_algo == FF_DCT_ALTIVEC)) { #if 0 /* seems to cause trouble under some circumstances */ s->dct_quantize = dct_quantize_altivec; #endif s->dct_unquantize_h263_intra = dct_unquantize_h263_altivec; s->dct_unquantize_h263_inter = dct_unquantize_h263_altivec; } }
123linslouis-android-video-cutter
jni/libavcodec/ppc/mpegvideo_altivec.c
C
asf20
25,341
/* * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ //#define DEBUG_ALIGNMENT #ifdef DEBUG_ALIGNMENT #define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F)); #else #define ASSERT_ALIGNED(ptr) ; #endif /* this code assume that stride % 16 == 0 */ #define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \ vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\ vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\ \ psum = vec_mladd(vA, vsrc0ssH, BIAS1);\ psum = vec_mladd(vB, vsrc1ssH, psum);\ psum = vec_mladd(vC, vsrc2ssH, psum);\ psum = vec_mladd(vD, vsrc3ssH, psum);\ psum = BIAS2(psum);\ psum = vec_sr(psum, v6us);\ \ vdst = vec_ld(0, dst);\ ppsum = (vec_u8)vec_pack(psum, psum);\ vfdst = vec_perm(vdst, ppsum, fperm);\ \ OP_U8_ALTIVEC(fsum, vfdst, vdst);\ \ vec_st(fsum, 0, dst);\ \ vsrc0ssH = vsrc2ssH;\ vsrc1ssH = vsrc3ssH;\ \ dst += stride;\ src += stride; #define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \ \ vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\ vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\ \ psum = vec_mladd(vA, vsrc0ssH, v32ss);\ psum = vec_mladd(vE, vsrc1ssH, psum);\ psum = vec_sr(psum, v6us);\ \ vdst = vec_ld(0, dst);\ ppsum = (vec_u8)vec_pack(psum, psum);\ vfdst = vec_perm(vdst, ppsum, fperm);\ \ OP_U8_ALTIVEC(fsum, vfdst, vdst);\ \ vec_st(fsum, 0, dst);\ \ dst += stride;\ src += stride; #define noop(a) a #define add28(a) vec_add(v28ss, a) static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1); DECLARE_ALIGNED(16, signed int, ABCD)[4] = {((8 - x) * (8 - y)), (( x) * (8 - y)), ((8 - x) * ( y)), (( x) * ( y))}; register int i; vec_u8 fperm; const vec_s32 vABCD = vec_ld(0, ABCD); const vec_s16 vA = vec_splat((vec_s16)vABCD, 1); const vec_s16 vB = vec_splat((vec_s16)vABCD, 3); const vec_s16 vC = vec_splat((vec_s16)vABCD, 5); const vec_s16 vD = vec_splat((vec_s16)vABCD, 7); LOAD_ZERO; const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5)); const vec_u16 v6us = vec_splat_u16(6); register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1; vec_u8 vsrc0uc, vsrc1uc; vec_s16 vsrc0ssH, vsrc1ssH; vec_u8 vsrcCuc, vsrc2uc, vsrc3uc; vec_s16 vsrc2ssH, vsrc3ssH, psum; vec_u8 vdst, ppsum, vfdst, fsum; POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1); if (((unsigned long)dst) % 16 == 0) { fperm = (vec_u8){0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; } else { fperm = (vec_u8){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; } vsrcAuc = vec_ld(0, src); if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc); vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc); if (ABCD[3]) { if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE(v32ss, noop) } } else { vec_u8 vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE(v32ss, noop) } } } else { const vec_s16 vE = vec_add(vB, vC); if (ABCD[2]) { // x == 0 B == 0 if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); CHROMA_MC8_ALTIVEC_CORE_SIMPLE vsrc0uc = vsrc1uc; } } else { vec_u8 vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 15, src); vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); CHROMA_MC8_ALTIVEC_CORE_SIMPLE vsrc0uc = vsrc1uc; } } } else { // y == 0 C == 0 if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(0, src); vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE_SIMPLE } } else { vec_u8 vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(0, src); vsrcDuc = vec_ld(15, src); vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcDuc; else vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE_SIMPLE } } } } POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1); } /* this code assume that stride % 16 == 0 */ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { DECLARE_ALIGNED(16, signed int, ABCD)[4] = {((8 - x) * (8 - y)), (( x) * (8 - y)), ((8 - x) * ( y)), (( x) * ( y))}; register int i; vec_u8 fperm; const vec_s32 vABCD = vec_ld(0, ABCD); const vec_s16 vA = vec_splat((vec_s16)vABCD, 1); const vec_s16 vB = vec_splat((vec_s16)vABCD, 3); const vec_s16 vC = vec_splat((vec_s16)vABCD, 5); const vec_s16 vD = vec_splat((vec_s16)vABCD, 7); LOAD_ZERO; const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4)); const vec_u16 v6us = vec_splat_u16(6); register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1; vec_u8 vsrc0uc, vsrc1uc; vec_s16 vsrc0ssH, vsrc1ssH; vec_u8 vsrcCuc, vsrc2uc, vsrc3uc; vec_s16 vsrc2ssH, vsrc3ssH, psum; vec_u8 vdst, ppsum, vfdst, fsum; if (((unsigned long)dst) % 16 == 0) { fperm = (vec_u8){0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; } else { fperm = (vec_u8){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; } vsrcAuc = vec_ld(0, src); if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc); vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc); if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28) } } else { vec_u8 vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28) } } } #undef noop #undef add28 #undef CHROMA_MC8_ALTIVEC_CORE /* this code assume stride % 16 == 0 */ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1); register int i; LOAD_ZERO; const vec_u8 permM2 = vec_lvsl(-2, src); const vec_u8 permM1 = vec_lvsl(-1, src); const vec_u8 permP0 = vec_lvsl(+0, src); const vec_u8 permP1 = vec_lvsl(+1, src); const vec_u8 permP2 = vec_lvsl(+2, src); const vec_u8 permP3 = vec_lvsl(+3, src); const vec_s16 v5ss = vec_splat_s16(5); const vec_u16 v5us = vec_splat_u16(5); const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; register int align = ((((unsigned long)src) - 2) % 16); vec_s16 srcP0A, srcP0B, srcP1A, srcP1B, srcP2A, srcP2B, srcP3A, srcP3B, srcM1A, srcM1B, srcM2A, srcM2B, sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, psumA, psumB, sumA, sumB; vec_u8 sum, vdst, fsum; POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); for (i = 0 ; i < 16 ; i ++) { vec_u8 srcR1 = vec_ld(-2, src); vec_u8 srcR2 = vec_ld(14, src); switch (align) { default: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = vec_perm(srcR1, srcR2, permP3); } break; case 11: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = srcR2; } break; case 12: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = srcR2; srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 13: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = srcR2; srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 14: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = srcR2; srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 15: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = srcR2; srcP0 = vec_perm(srcR2, srcR3, permP0); srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; } srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0); srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0); srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1); srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1); srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2); srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2); srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3); srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3); srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1); srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1); srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2); srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2); sum1A = vec_adds(srcP0A, srcP1A); sum1B = vec_adds(srcP0B, srcP1B); sum2A = vec_adds(srcM1A, srcP2A); sum2B = vec_adds(srcM1B, srcP2B); sum3A = vec_adds(srcM2A, srcP3A); sum3B = vec_adds(srcM2B, srcP3B); pp1A = vec_mladd(sum1A, v20ss, v16ss); pp1B = vec_mladd(sum1B, v20ss, v16ss); pp2A = vec_mladd(sum2A, v5ss, zero_s16v); pp2B = vec_mladd(sum2B, v5ss, zero_s16v); pp3A = vec_add(sum3A, pp1A); pp3B = vec_add(sum3B, pp1B); psumA = vec_sub(pp3A, pp2A); psumB = vec_sub(pp3B, pp2B); sumA = vec_sra(psumA, v5us); sumB = vec_sra(psumB, v5us); sum = vec_packsu(sumA, sumB); ASSERT_ALIGNED(dst); vdst = vec_ld(0, dst); OP_U8_ALTIVEC(fsum, sum, vdst); vec_st(fsum, 0, dst); src += srcStride; dst += dstStride; } POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); } /* this code assume stride % 16 == 0 */ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1); register int i; LOAD_ZERO; const vec_u8 perm = vec_lvsl(0, src); const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vec_u16 v5us = vec_splat_u16(5); const vec_s16 v5ss = vec_splat_s16(5); const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); uint8_t *srcbis = src - (srcStride * 2); const vec_u8 srcM2a = vec_ld(0, srcbis); const vec_u8 srcM2b = vec_ld(16, srcbis); const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm); //srcbis += srcStride; const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride); const vec_u8 srcM1b = vec_ld(16, srcbis); const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm); //srcbis += srcStride; const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride); const vec_u8 srcP0b = vec_ld(16, srcbis); const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm); //srcbis += srcStride; const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride); const vec_u8 srcP1b = vec_ld(16, srcbis); const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm); //srcbis += srcStride; const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride); const vec_u8 srcP2b = vec_ld(16, srcbis); const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm); //srcbis += srcStride; vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2); vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2); vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1); vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1); vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0); vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0); vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1); vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1); vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2); vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2); vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, psumA, psumB, sumA, sumB, srcP3ssA, srcP3ssB, sum1A, sum1B, sum2A, sum2B, sum3A, sum3B; vec_u8 sum, vdst, fsum, srcP3a, srcP3b, srcP3; POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); for (i = 0 ; i < 16 ; i++) { srcP3a = vec_ld(0, srcbis += srcStride); srcP3b = vec_ld(16, srcbis); srcP3 = vec_perm(srcP3a, srcP3b, perm); srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3); srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3); //srcbis += srcStride; sum1A = vec_adds(srcP0ssA, srcP1ssA); sum1B = vec_adds(srcP0ssB, srcP1ssB); sum2A = vec_adds(srcM1ssA, srcP2ssA); sum2B = vec_adds(srcM1ssB, srcP2ssB); sum3A = vec_adds(srcM2ssA, srcP3ssA); sum3B = vec_adds(srcM2ssB, srcP3ssB); srcM2ssA = srcM1ssA; srcM2ssB = srcM1ssB; srcM1ssA = srcP0ssA; srcM1ssB = srcP0ssB; srcP0ssA = srcP1ssA; srcP0ssB = srcP1ssB; srcP1ssA = srcP2ssA; srcP1ssB = srcP2ssB; srcP2ssA = srcP3ssA; srcP2ssB = srcP3ssB; pp1A = vec_mladd(sum1A, v20ss, v16ss); pp1B = vec_mladd(sum1B, v20ss, v16ss); pp2A = vec_mladd(sum2A, v5ss, zero_s16v); pp2B = vec_mladd(sum2B, v5ss, zero_s16v); pp3A = vec_add(sum3A, pp1A); pp3B = vec_add(sum3B, pp1B); psumA = vec_sub(pp3A, pp2A); psumB = vec_sub(pp3B, pp2B); sumA = vec_sra(psumA, v5us); sumB = vec_sra(psumB, v5us); sum = vec_packsu(sumA, sumB); ASSERT_ALIGNED(dst); vdst = vec_ld(0, dst); OP_U8_ALTIVEC(fsum, sum, vdst); vec_st(fsum, 0, dst); dst += dstStride; } POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); } /* this code assume stride % 16 == 0 *and* tmp is properly aligned */ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1); register int i; LOAD_ZERO; const vec_u8 permM2 = vec_lvsl(-2, src); const vec_u8 permM1 = vec_lvsl(-1, src); const vec_u8 permP0 = vec_lvsl(+0, src); const vec_u8 permP1 = vec_lvsl(+1, src); const vec_u8 permP2 = vec_lvsl(+2, src); const vec_u8 permP3 = vec_lvsl(+3, src); const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vec_u32 v10ui = vec_splat_u32(10); const vec_s16 v5ss = vec_splat_s16(5); const vec_s16 v1ss = vec_splat_s16(1); const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9)); const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4)); register int align = ((((unsigned long)src) - 2) % 16); vec_s16 srcP0A, srcP0B, srcP1A, srcP1B, srcP2A, srcP2B, srcP3A, srcP3B, srcM1A, srcM1B, srcM2A, srcM2B, sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, pp1A, pp1B, pp2A, pp2B, psumA, psumB; const vec_u8 mperm = (const vec_u8) {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F}; int16_t *tmpbis = tmp; vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB, tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB, tmpP2ssA, tmpP2ssB; vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo, pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo, pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo, ssumAe, ssumAo, ssumBe, ssumBo; vec_u8 fsum, sumv, sum, vdst; vec_s16 ssume, ssumo; POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); src -= (2 * srcStride); for (i = 0 ; i < 21 ; i ++) { vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; vec_u8 srcR1 = vec_ld(-2, src); vec_u8 srcR2 = vec_ld(14, src); switch (align) { default: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = vec_perm(srcR1, srcR2, permP3); } break; case 11: { srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = vec_perm(srcR1, srcR2, permP2); srcP3 = srcR2; } break; case 12: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = vec_perm(srcR1, srcR2, permP1); srcP2 = srcR2; srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 13: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = vec_perm(srcR1, srcR2, permP0); srcP1 = srcR2; srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 14: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = vec_perm(srcR1, srcR2, permM1); srcP0 = srcR2; srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; case 15: { vec_u8 srcR3 = vec_ld(30, src); srcM2 = vec_perm(srcR1, srcR2, permM2); srcM1 = srcR2; srcP0 = vec_perm(srcR2, srcR3, permP0); srcP1 = vec_perm(srcR2, srcR3, permP1); srcP2 = vec_perm(srcR2, srcR3, permP2); srcP3 = vec_perm(srcR2, srcR3, permP3); } break; } srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0); srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0); srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1); srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1); srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2); srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2); srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3); srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3); srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1); srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1); srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2); srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2); sum1A = vec_adds(srcP0A, srcP1A); sum1B = vec_adds(srcP0B, srcP1B); sum2A = vec_adds(srcM1A, srcP2A); sum2B = vec_adds(srcM1B, srcP2B); sum3A = vec_adds(srcM2A, srcP3A); sum3B = vec_adds(srcM2B, srcP3B); pp1A = vec_mladd(sum1A, v20ss, sum3A); pp1B = vec_mladd(sum1B, v20ss, sum3B); pp2A = vec_mladd(sum2A, v5ss, zero_s16v); pp2B = vec_mladd(sum2B, v5ss, zero_s16v); psumA = vec_sub(pp1A, pp2A); psumB = vec_sub(pp1B, pp2B); vec_st(psumA, 0, tmp); vec_st(psumB, 16, tmp); src += srcStride; tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */ } tmpM2ssA = vec_ld(0, tmpbis); tmpM2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpM1ssA = vec_ld(0, tmpbis); tmpM1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpP0ssA = vec_ld(0, tmpbis); tmpP0ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpP1ssA = vec_ld(0, tmpbis); tmpP1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; tmpP2ssA = vec_ld(0, tmpbis); tmpP2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; for (i = 0 ; i < 16 ; i++) { const vec_s16 tmpP3ssA = vec_ld(0, tmpbis); const vec_s16 tmpP3ssB = vec_ld(16, tmpbis); const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA); const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB); const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA); const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB); const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA); const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB); tmpbis += tmpStride; tmpM2ssA = tmpM1ssA; tmpM2ssB = tmpM1ssB; tmpM1ssA = tmpP0ssA; tmpM1ssB = tmpP0ssB; tmpP0ssA = tmpP1ssA; tmpP0ssB = tmpP1ssB; tmpP1ssA = tmpP2ssA; tmpP1ssB = tmpP2ssB; tmpP2ssA = tmpP3ssA; tmpP2ssB = tmpP3ssB; pp1Ae = vec_mule(sum1A, v20ss); pp1Ao = vec_mulo(sum1A, v20ss); pp1Be = vec_mule(sum1B, v20ss); pp1Bo = vec_mulo(sum1B, v20ss); pp2Ae = vec_mule(sum2A, v5ss); pp2Ao = vec_mulo(sum2A, v5ss); pp2Be = vec_mule(sum2B, v5ss); pp2Bo = vec_mulo(sum2B, v5ss); pp3Ae = vec_sra((vec_s32)sum3A, v16ui); pp3Ao = vec_mulo(sum3A, v1ss); pp3Be = vec_sra((vec_s32)sum3B, v16ui); pp3Bo = vec_mulo(sum3B, v1ss); pp1cAe = vec_add(pp1Ae, v512si); pp1cAo = vec_add(pp1Ao, v512si); pp1cBe = vec_add(pp1Be, v512si); pp1cBo = vec_add(pp1Bo, v512si); pp32Ae = vec_sub(pp3Ae, pp2Ae); pp32Ao = vec_sub(pp3Ao, pp2Ao); pp32Be = vec_sub(pp3Be, pp2Be); pp32Bo = vec_sub(pp3Bo, pp2Bo); sumAe = vec_add(pp1cAe, pp32Ae); sumAo = vec_add(pp1cAo, pp32Ao); sumBe = vec_add(pp1cBe, pp32Be); sumBo = vec_add(pp1cBo, pp32Bo); ssumAe = vec_sra(sumAe, v10ui); ssumAo = vec_sra(sumAo, v10ui); ssumBe = vec_sra(sumBe, v10ui); ssumBo = vec_sra(sumBo, v10ui); ssume = vec_packs(ssumAe, ssumBe); ssumo = vec_packs(ssumAo, ssumBo); sumv = vec_packsu(ssume, ssumo); sum = vec_perm(sumv, sumv, mperm); ASSERT_ALIGNED(dst); vdst = vec_ld(0, dst); OP_U8_ALTIVEC(fsum, sum, vdst); vec_st(fsum, 0, dst); dst += dstStride; } POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); }
123linslouis-android-video-cutter
jni/libavcodec/ppc/h264_template_altivec.c
C
asf20
27,956
OBJS += ppc/dsputil_ppc.o \ ALTIVEC-OBJS-$(CONFIG_H264DSP) += ppc/h264_altivec.o ALTIVEC-OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o ALTIVEC-OBJS-$(CONFIG_VP3_DECODER) += ppc/vp3dsp_altivec.o ALTIVEC-OBJS-$(CONFIG_VP5_DECODER) += ppc/vp3dsp_altivec.o ALTIVEC-OBJS-$(CONFIG_VP6_DECODER) += ppc/vp3dsp_altivec.o OBJS-$(HAVE_ALTIVEC) += ppc/check_altivec.o \ ppc/dsputil_altivec.o \ ppc/fdct_altivec.o \ ppc/fft_altivec.o \ ppc/float_altivec.o \ ppc/gmc_altivec.o \ ppc/idct_altivec.o \ ppc/int_altivec.o \ ppc/mpegvideo_altivec.o \ $(ALTIVEC-OBJS-yes)
123linslouis-android-video-cutter
jni/libavcodec/ppc/Makefile
Makefile
asf20
1,117
/* * GMC (Global Motion Compensation) * AltiVec-enabled * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "dsputil_ppc.h" #include "util_altivec.h" #include "types_altivec.h" #include "dsputil_altivec.h" /* altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8, to preserve proper dst alignment. */ #define GMC1_PERF_COND (h==8) void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder) { POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND); const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder; const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = { (16-x16)*(16-y16), /* A */ ( x16)*(16-y16), /* B */ (16-x16)*( y16), /* C */ ( x16)*( y16), /* D */ 0, 0, 0, 0 /* padding */ }; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8); register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD; register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD; int i; unsigned long dst_odd = (unsigned long)dst & 0x0000000F; unsigned long src_really_odd = (unsigned long)src & 0x0000000F; POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); tempA = vec_ld(0, (unsigned short*)ABCD); Av = vec_splat(tempA, 0); Bv = vec_splat(tempA, 1); Cv = vec_splat(tempA, 2); Dv = vec_splat(tempA, 3); rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0); // we'll be able to pick-up our 9 char elements // at src from those 32 bytes // we load the first batch here, as inside the loop // we can re-use 'src+stride' from one iteration // as the 'src' of the next. src_0 = vec_ld(0, src); src_1 = vec_ld(16, src); srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); if (src_really_odd != 0x0000000F) { // if src & 0xF == 0xF, then (src+1) is properly aligned // on the second vector. srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); } else { srcvB = src_1; } srcvA = vec_mergeh(vczero, srcvA); srcvB = vec_mergeh(vczero, srcvB); for(i=0; i<h; i++) { dst_odd = (unsigned long)dst & 0x0000000F; src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; dstv = vec_ld(0, dst); // we we'll be able to pick-up our 9 char elements // at src + stride from those 32 bytes // then reuse the resulting 2 vectors srvcC and srcvD // as the next srcvA and srcvB src_0 = vec_ld(stride + 0, src); src_1 = vec_ld(stride + 16, src); srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); if (src_really_odd != 0x0000000F) { // if src & 0xF == 0xF, then (src+1) is properly aligned // on the second vector. srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); } else { srcvD = src_1; } srcvC = vec_mergeh(vczero, srcvC); srcvD = vec_mergeh(vczero, srcvD); // OK, now we (finally) do the math :-) // those four instructions replaces 32 int muls & 32 int adds. // isn't AltiVec nice ? tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV); tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); srcvA = srcvC; srcvB = srcvD; tempD = vec_sr(tempD, vcsr8); dstv2 = vec_pack(tempD, (vector unsigned short)vczero); if (dst_odd) { dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); } else { dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); } vec_st(dstv2, 0, dst); dst += stride; src += stride; } POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); }
123linslouis-android-video-cutter
jni/libavcodec/ppc/gmc_altivec.c
C
asf20
4,950
/* * Copyright (C) 2009 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "util_altivec.h" #include "types_altivec.h" #include "dsputil_altivec.h" static const vec_s16 constants = {0, 64277, 60547, 54491, 46341, 36410, 25080, 12785}; static const vec_u8 interleave_high = {0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}; #define IDCT_START \ vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\ vec_s16 Ed, Gd, Add, Bdd, Fd, Hd;\ vec_s16 eight = vec_splat_s16(8);\ vec_u16 four = vec_splat_u16(4);\ \ vec_s16 C1 = vec_splat(constants, 1);\ vec_s16 C2 = vec_splat(constants, 2);\ vec_s16 C3 = vec_splat(constants, 3);\ vec_s16 C4 = vec_splat(constants, 4);\ vec_s16 C5 = vec_splat(constants, 5);\ vec_s16 C6 = vec_splat(constants, 6);\ vec_s16 C7 = vec_splat(constants, 7);\ \ vec_s16 b0 = vec_ld(0x00, block);\ vec_s16 b1 = vec_ld(0x10, block);\ vec_s16 b2 = vec_ld(0x20, block);\ vec_s16 b3 = vec_ld(0x30, block);\ vec_s16 b4 = vec_ld(0x40, block);\ vec_s16 b5 = vec_ld(0x50, block);\ vec_s16 b6 = vec_ld(0x60, block);\ vec_s16 b7 = vec_ld(0x70, block); // these functions do (a*C)>>16 // things are tricky because a is signed, but C unsigned. // M15 is used if C fits in 15 bit unsigned (C6,C7) // M16 is used if C requires 16 bits unsigned static inline vec_s16 M15(vec_s16 a, vec_s16 C) { return (vec_s16)vec_perm(vec_mule(a,C), vec_mulo(a,C), interleave_high); } static inline vec_s16 M16(vec_s16 a, vec_s16 C) { return vec_add(a, M15(a, C)); } #define IDCT_1D(ADD, SHIFT)\ A = vec_add(M16(b1, C1), M15(b7, C7));\ B = vec_sub(M15(b1, C7), M16(b7, C1));\ C = vec_add(M16(b3, C3), M16(b5, C5));\ D = vec_sub(M16(b5, C3), M16(b3, C5));\ \ Ad = M16(vec_sub(A, C), C4);\ Bd = M16(vec_sub(B, D), C4);\ \ Cd = vec_add(A, C);\ Dd = vec_add(B, D);\ \ E = ADD(M16(vec_add(b0, b4), C4));\ F = ADD(M16(vec_sub(b0, b4), C4));\ \ G = vec_add(M16(b2, C2), M15(b6, C6));\ H = vec_sub(M15(b2, C6), M16(b6, C2));\ \ Ed = vec_sub(E, G);\ Gd = vec_add(E, G);\ \ Add = vec_add(F, Ad);\ Bdd = vec_sub(Bd, H);\ \ Fd = vec_sub(F, Ad);\ Hd = vec_add(Bd, H);\ \ b0 = SHIFT(vec_add(Gd, Cd));\ b7 = SHIFT(vec_sub(Gd, Cd));\ \ b1 = SHIFT(vec_add(Add, Hd));\ b2 = SHIFT(vec_sub(Add, Hd));\ \ b3 = SHIFT(vec_add(Ed, Dd));\ b4 = SHIFT(vec_sub(Ed, Dd));\ \ b5 = SHIFT(vec_add(Fd, Bdd));\ b6 = SHIFT(vec_sub(Fd, Bdd)); #define NOP(a) a #define ADD8(a) vec_add(a, eight) #define SHIFT4(a) vec_sra(a, four) void ff_vp3_idct_altivec(DCTELEM block[64]) { IDCT_START IDCT_1D(NOP, NOP) TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7); IDCT_1D(ADD8, SHIFT4) vec_st(b0, 0x00, block); vec_st(b1, 0x10, block); vec_st(b2, 0x20, block); vec_st(b3, 0x30, block); vec_st(b4, 0x40, block); vec_st(b5, 0x50, block); vec_st(b6, 0x60, block); vec_st(b7, 0x70, block); } void ff_vp3_idct_put_altivec(uint8_t *dst, int stride, DCTELEM block[64]) { vec_u8 t; IDCT_START // pixels are signed; so add 128*16 in addition to the normal 8 vec_s16 v2048 = vec_sl(vec_splat_s16(1), vec_splat_u16(11)); eight = vec_add(eight, v2048); IDCT_1D(NOP, NOP) TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7); IDCT_1D(ADD8, SHIFT4) #define PUT(a)\ t = vec_packsu(a, a);\ vec_ste((vec_u32)t, 0, (unsigned int *)dst);\ vec_ste((vec_u32)t, 4, (unsigned int *)dst); PUT(b0) dst += stride; PUT(b1) dst += stride; PUT(b2) dst += stride; PUT(b3) dst += stride; PUT(b4) dst += stride; PUT(b5) dst += stride; PUT(b6) dst += stride; PUT(b7) } void ff_vp3_idct_add_altivec(uint8_t *dst, int stride, DCTELEM block[64]) { LOAD_ZERO; vec_u8 t, vdst; vec_s16 vdst_16; vec_u8 vdst_mask = vec_mergeh(vec_splat_u8(-1), vec_lvsl(0, dst)); IDCT_START IDCT_1D(NOP, NOP) TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7); IDCT_1D(ADD8, SHIFT4) #define ADD(a)\ vdst = vec_ld(0, dst);\ vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\ vdst_16 = vec_adds(a, vdst_16);\ t = vec_packsu(vdst_16, vdst_16);\ vec_ste((vec_u32)t, 0, (unsigned int *)dst);\ vec_ste((vec_u32)t, 4, (unsigned int *)dst); ADD(b0) dst += stride; ADD(b1) dst += stride; ADD(b2) dst += stride; ADD(b3) dst += stride; ADD(b4) dst += stride; ADD(b5) dst += stride; ADD(b6) dst += stride; ADD(b7) }
123linslouis-android-video-cutter
jni/libavcodec/ppc/vp3dsp_altivec.c
C
asf20
5,324
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Checks for AltiVec presence. */ #ifdef __APPLE__ #undef _POSIX_C_SOURCE #include <sys/sysctl.h> #elif defined(__OpenBSD__) #include <sys/param.h> #include <sys/sysctl.h> #include <machine/cpu.h> #elif defined(__AMIGAOS4__) #include <exec/exec.h> #include <interfaces/exec.h> #include <proto/exec.h> #endif /* __APPLE__ */ #include "config.h" #include "dsputil_altivec.h" /** * This function MAY rely on signal() or fork() in order to make sure AltiVec * is present. */ int has_altivec(void) { #ifdef __AMIGAOS4__ ULONG result = 0; extern struct ExecIFace *IExec; IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); if (result == VECTORTYPE_ALTIVEC) return 1; return 0; #elif defined(__APPLE__) || defined(__OpenBSD__) #ifdef __OpenBSD__ int sels[2] = {CTL_MACHDEP, CPU_ALTIVEC}; #else int sels[2] = {CTL_HW, HW_VECTORUNIT}; #endif int has_vu = 0; size_t len = sizeof(has_vu); int err; err = sysctl(sels, 2, &has_vu, &len, NULL, 0); if (err == 0) return has_vu != 0; return 0; #elif CONFIG_RUNTIME_CPUDETECT int proc_ver; // Support of mfspr PVR emulation added in Linux 2.6.17. __asm__ volatile("mfspr %0, 287" : "=r" (proc_ver)); proc_ver >>= 16; if (proc_ver & 0x8000 || proc_ver == 0x000c || proc_ver == 0x0039 || proc_ver == 0x003c || proc_ver == 0x0044 || proc_ver == 0x0045 || proc_ver == 0x0070) return 1; return 0; #else // Since we were compiled for AltiVec, just assume we have it // until someone comes up with a proper way (not involving signal hacks). return 1; #endif /* __AMIGAOS4__ */ }
123linslouis-android-video-cutter
jni/libavcodec/ppc/check_altivec.c
C
asf20
2,432
/* * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "libavcodec/h264data.h" #include "libavcodec/h264dsp.h" #include "dsputil_ppc.h" #include "dsputil_altivec.h" #include "util_altivec.h" #include "types_altivec.h" #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num #include "h264_template_altivec.c" #undef OP_U8_ALTIVEC #undef PREFIX_h264_chroma_mc8_altivec #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec #undef PREFIX_h264_chroma_mc8_num #undef PREFIX_h264_qpel16_h_lowpass_altivec #undef PREFIX_h264_qpel16_h_lowpass_num #undef PREFIX_h264_qpel16_v_lowpass_altivec #undef PREFIX_h264_qpel16_v_lowpass_num #undef PREFIX_h264_qpel16_hv_lowpass_altivec #undef PREFIX_h264_qpel16_hv_lowpass_num #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num #include "h264_template_altivec.c" #undef OP_U8_ALTIVEC #undef PREFIX_h264_chroma_mc8_altivec #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec #undef PREFIX_h264_chroma_mc8_num #undef PREFIX_h264_qpel16_h_lowpass_altivec #undef PREFIX_h264_qpel16_h_lowpass_num #undef PREFIX_h264_qpel16_v_lowpass_altivec #undef PREFIX_h264_qpel16_v_lowpass_num #undef PREFIX_h264_qpel16_hv_lowpass_altivec #undef PREFIX_h264_qpel16_hv_lowpass_num #define H264_MC(OPNAME, SIZE, CODETYPE) \ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ }\ static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int h) { int i; vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align; mask_ = vec_lvsl(0, src2); for (i = 0; i < h; i++) { tmp1 = vec_ld(i * src_stride1, src1); mask = vec_lvsl(i * src_stride1, src1); tmp2 = vec_ld(i * src_stride1 + 15, src1); a = vec_perm(tmp1, tmp2, mask); tmp1 = vec_ld(i * 16, src2); tmp2 = vec_ld(i * 16 + 15, src2); b = vec_perm(tmp1, tmp2, mask_); tmp1 = vec_ld(0, dst); mask = vec_lvsl(0, dst); tmp2 = vec_ld(15, dst); d = vec_avg(a, b); edges = vec_perm(tmp2, tmp1, mask); align = vec_lvsr(0, dst); tmp2 = vec_perm(d, edges, align); tmp1 = vec_perm(edges, d, align); vec_st(tmp2, 15, dst); vec_st(tmp1, 0 , dst); dst += dst_stride; } } static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int h) { int i; vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align; mask_ = vec_lvsl(0, src2); for (i = 0; i < h; i++) { tmp1 = vec_ld(i * src_stride1, src1); mask = vec_lvsl(i * src_stride1, src1); tmp2 = vec_ld(i * src_stride1 + 15, src1); a = vec_perm(tmp1, tmp2, mask); tmp1 = vec_ld(i * 16, src2); tmp2 = vec_ld(i * 16 + 15, src2); b = vec_perm(tmp1, tmp2, mask_); tmp1 = vec_ld(0, dst); mask = vec_lvsl(0, dst); tmp2 = vec_ld(15, dst); d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b)); edges = vec_perm(tmp2, tmp1, mask); align = vec_lvsr(0, dst); tmp2 = vec_perm(d, edges, align); tmp1 = vec_perm(edges, d, align); vec_st(tmp2, 15, dst); vec_st(tmp1, 0 , dst); dst += dst_stride; } } /* Implemented but could be faster #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h) #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h) */ H264_MC(put_, 16, altivec) H264_MC(avg_, 16, altivec) /**************************************************************************** * IDCT transform: ****************************************************************************/ #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \ /* 1st stage */ \ vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \ vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \ vz2 = vec_sra(vb1,vec_splat_u16(1)); \ vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \ vz3 = vec_sra(vb3,vec_splat_u16(1)); \ vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \ /* 2nd stage: output */ \ va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \ va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \ va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \ va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */ #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ b0 = vec_mergeh( a0, a0 ); \ b1 = vec_mergeh( a1, a0 ); \ b2 = vec_mergeh( a2, a0 ); \ b3 = vec_mergeh( a3, a0 ); \ a0 = vec_mergeh( b0, b2 ); \ a1 = vec_mergel( b0, b2 ); \ a2 = vec_mergeh( b1, b3 ); \ a3 = vec_mergel( b1, b3 ); \ b0 = vec_mergeh( a0, a2 ); \ b1 = vec_mergel( a0, a2 ); \ b2 = vec_mergeh( a1, a3 ); \ b3 = vec_mergel( a1, a3 ) #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \ vdst_orig = vec_ld(0, dst); \ vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \ vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \ va = vec_add(va, vdst_ss); \ va_u8 = vec_packsu(va, zero_s16v); \ va_u32 = vec_splat((vec_u32)va_u8, 0); \ vec_ste(va_u32, element, (uint32_t*)dst); static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride) { vec_s16 va0, va1, va2, va3; vec_s16 vz0, vz1, vz2, vz3; vec_s16 vtmp0, vtmp1, vtmp2, vtmp3; vec_u8 va_u8; vec_u32 va_u32; vec_s16 vdst_ss; const vec_u16 v6us = vec_splat_u16(6); vec_u8 vdst, vdst_orig; vec_u8 vdst_mask = vec_lvsl(0, dst); int element = ((unsigned long)dst & 0xf) >> 2; LOAD_ZERO; block[0] += 32; /* add 32 as a DC-level for rounding */ vtmp0 = vec_ld(0,block); vtmp1 = vec_sld(vtmp0, vtmp0, 8); vtmp2 = vec_ld(16,block); vtmp3 = vec_sld(vtmp2, vtmp2, 8); VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); va0 = vec_sra(va0,v6us); va1 = vec_sra(va1,v6us); va2 = vec_sra(va2,v6us); va3 = vec_sra(va3,v6us); VEC_LOAD_U8_ADD_S16_STORE_U8(va0); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va1); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va2); dst += stride; VEC_LOAD_U8_ADD_S16_STORE_U8(va3); } #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\ /* a0 = SRC(0) + SRC(4); */ \ vec_s16 a0v = vec_add(s0, s4); \ /* a2 = SRC(0) - SRC(4); */ \ vec_s16 a2v = vec_sub(s0, s4); \ /* a4 = (SRC(2)>>1) - SRC(6); */ \ vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \ /* a6 = (SRC(6)>>1) + SRC(2); */ \ vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \ /* b0 = a0 + a6; */ \ vec_s16 b0v = vec_add(a0v, a6v); \ /* b2 = a2 + a4; */ \ vec_s16 b2v = vec_add(a2v, a4v); \ /* b4 = a2 - a4; */ \ vec_s16 b4v = vec_sub(a2v, a4v); \ /* b6 = a0 - a6; */ \ vec_s16 b6v = vec_sub(a0v, a6v); \ /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \ /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \ vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \ /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \ vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \ /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \ vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \ vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ /* b1 = (a7>>2) + a1; */ \ vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \ /* b3 = a3 + (a5>>2); */ \ vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \ /* b5 = (a3>>2) - a5; */ \ vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \ /* b7 = a7 - (a1>>2); */ \ vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ /* DST(0, b0 + b7); */ \ d0 = vec_add(b0v, b7v); \ /* DST(1, b2 + b5); */ \ d1 = vec_add(b2v, b5v); \ /* DST(2, b4 + b3); */ \ d2 = vec_add(b4v, b3v); \ /* DST(3, b6 + b1); */ \ d3 = vec_add(b6v, b1v); \ /* DST(4, b6 - b1); */ \ d4 = vec_sub(b6v, b1v); \ /* DST(5, b4 - b3); */ \ d5 = vec_sub(b4v, b3v); \ /* DST(6, b2 - b5); */ \ d6 = vec_sub(b2v, b5v); \ /* DST(7, b0 - b7); */ \ d7 = vec_sub(b0v, b7v); \ } #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ /* unaligned load */ \ vec_u8 hv = vec_ld( 0, dest ); \ vec_u8 lv = vec_ld( 7, dest ); \ vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \ vec_s16 idct_sh6 = vec_sra(idctv, sixv); \ vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \ vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \ vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \ vec_u8 edgehv; \ /* unaligned store */ \ vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\ vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \ lv = vec_sel( lv, bodyv, edgelv ); \ vec_st( lv, 7, dest ); \ hv = vec_ld( 0, dest ); \ edgehv = vec_perm( zero_u8v, sel, perm_stv ); \ hv = vec_sel( hv, bodyv, edgehv ); \ vec_st( hv, 0, dest ); \ } static void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) { vec_s16 s0, s1, s2, s3, s4, s5, s6, s7; vec_s16 d0, d1, d2, d3, d4, d5, d6, d7; vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; vec_u8 perm_ldv = vec_lvsl(0, dst); vec_u8 perm_stv = vec_lvsr(8, dst); const vec_u16 onev = vec_splat_u16(1); const vec_u16 twov = vec_splat_u16(2); const vec_u16 sixv = vec_splat_u16(6); const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1}; LOAD_ZERO; dct[0] += 32; // rounding for the >>6 at the end s0 = vec_ld(0x00, (int16_t*)dct); s1 = vec_ld(0x10, (int16_t*)dct); s2 = vec_ld(0x20, (int16_t*)dct); s3 = vec_ld(0x30, (int16_t*)dct); s4 = vec_ld(0x40, (int16_t*)dct); s5 = vec_ld(0x50, (int16_t*)dct); s6 = vec_ld(0x60, (int16_t*)dct); s7 = vec_ld(0x70, (int16_t*)dct); IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7); TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 ); IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7, idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel); ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel); } static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size) { vec_s16 dc16; vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner; LOAD_ZERO; DECLARE_ALIGNED(16, int, dc); int i; dc = (block[0] + 32) >> 6; dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1); if (size == 4) dc16 = vec_sld(dc16, zero_s16v, 8); dcplus = vec_packsu(dc16, zero_s16v); dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v); aligner = vec_lvsr(0, dst); dcplus = vec_perm(dcplus, dcplus, aligner); dcminus = vec_perm(dcminus, dcminus, aligner); for (i = 0; i < size; i += 4) { v0 = vec_ld(0, dst+0*stride); v1 = vec_ld(0, dst+1*stride); v2 = vec_ld(0, dst+2*stride); v3 = vec_ld(0, dst+3*stride); v0 = vec_adds(v0, dcplus); v1 = vec_adds(v1, dcplus); v2 = vec_adds(v2, dcplus); v3 = vec_adds(v3, dcplus); v0 = vec_subs(v0, dcminus); v1 = vec_subs(v1, dcminus); v2 = vec_subs(v2, dcminus); v3 = vec_subs(v3, dcminus); vec_st(v0, 0, dst+0*stride); vec_st(v1, 0, dst+1*stride); vec_st(v2, 0, dst+2*stride); vec_st(v3, 0, dst+3*stride); dst += 4*stride; } } static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride) { h264_idct_dc_add_internal(dst, block, stride, 4); } static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride) { h264_idct_dc_add_internal(dst, block, stride, 8); } static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); } } } static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); } } static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=4){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride); } } } static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=16; i<16+8; i++){ if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); else if(block[i*16]) h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); } } #define transpose4x16(r0, r1, r2, r3) { \ register vec_u8 r4; \ register vec_u8 r5; \ register vec_u8 r6; \ register vec_u8 r7; \ \ r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \ r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \ r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \ r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \ \ r0 = vec_mergeh(r4, r6); /*all set 0*/ \ r1 = vec_mergel(r4, r6); /*all set 1*/ \ r2 = vec_mergeh(r5, r7); /*all set 2*/ \ r3 = vec_mergel(r5, r7); /*all set 3*/ \ } static inline void write16x4(uint8_t *dst, int dst_stride, register vec_u8 r0, register vec_u8 r1, register vec_u8 r2, register vec_u8 r3) { DECLARE_ALIGNED(16, unsigned char, result)[64]; uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst; int int_dst_stride = dst_stride/4; vec_st(r0, 0, result); vec_st(r1, 16, result); vec_st(r2, 32, result); vec_st(r3, 48, result); /* FIXME: there has to be a better way!!!! */ *dst_int = *src_int; *(dst_int+ int_dst_stride) = *(src_int + 1); *(dst_int+ 2*int_dst_stride) = *(src_int + 2); *(dst_int+ 3*int_dst_stride) = *(src_int + 3); *(dst_int+ 4*int_dst_stride) = *(src_int + 4); *(dst_int+ 5*int_dst_stride) = *(src_int + 5); *(dst_int+ 6*int_dst_stride) = *(src_int + 6); *(dst_int+ 7*int_dst_stride) = *(src_int + 7); *(dst_int+ 8*int_dst_stride) = *(src_int + 8); *(dst_int+ 9*int_dst_stride) = *(src_int + 9); *(dst_int+10*int_dst_stride) = *(src_int + 10); *(dst_int+11*int_dst_stride) = *(src_int + 11); *(dst_int+12*int_dst_stride) = *(src_int + 12); *(dst_int+13*int_dst_stride) = *(src_int + 13); *(dst_int+14*int_dst_stride) = *(src_int + 14); *(dst_int+15*int_dst_stride) = *(src_int + 15); } /** \brief performs a 6x16 transpose of data in src, and stores it to dst \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing out of unaligned_load() */ #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\ register vec_u8 r0 = unaligned_load(0, src); \ register vec_u8 r1 = unaligned_load( src_stride, src); \ register vec_u8 r2 = unaligned_load(2* src_stride, src); \ register vec_u8 r3 = unaligned_load(3* src_stride, src); \ register vec_u8 r4 = unaligned_load(4* src_stride, src); \ register vec_u8 r5 = unaligned_load(5* src_stride, src); \ register vec_u8 r6 = unaligned_load(6* src_stride, src); \ register vec_u8 r7 = unaligned_load(7* src_stride, src); \ register vec_u8 r14 = unaligned_load(14*src_stride, src); \ register vec_u8 r15 = unaligned_load(15*src_stride, src); \ \ r8 = unaligned_load( 8*src_stride, src); \ r9 = unaligned_load( 9*src_stride, src); \ r10 = unaligned_load(10*src_stride, src); \ r11 = unaligned_load(11*src_stride, src); \ r12 = unaligned_load(12*src_stride, src); \ r13 = unaligned_load(13*src_stride, src); \ \ /*Merge first pairs*/ \ r0 = vec_mergeh(r0, r8); /*0, 8*/ \ r1 = vec_mergeh(r1, r9); /*1, 9*/ \ r2 = vec_mergeh(r2, r10); /*2,10*/ \ r3 = vec_mergeh(r3, r11); /*3,11*/ \ r4 = vec_mergeh(r4, r12); /*4,12*/ \ r5 = vec_mergeh(r5, r13); /*5,13*/ \ r6 = vec_mergeh(r6, r14); /*6,14*/ \ r7 = vec_mergeh(r7, r15); /*7,15*/ \ \ /*Merge second pairs*/ \ r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \ r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \ r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \ r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \ r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \ r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \ r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \ r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \ \ /*Third merge*/ \ r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \ r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \ r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \ r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \ r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \ r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \ /* Don't need to compute 3 and 7*/ \ \ /*Final merge*/ \ r8 = vec_mergeh(r0, r4); /*all set 0*/ \ r9 = vec_mergel(r0, r4); /*all set 1*/ \ r10 = vec_mergeh(r1, r5); /*all set 2*/ \ r11 = vec_mergel(r1, r5); /*all set 3*/ \ r12 = vec_mergeh(r2, r6); /*all set 4*/ \ r13 = vec_mergel(r2, r6); /*all set 5*/ \ /* Don't need to compute 14 and 15*/ \ \ } // out: o = |x-y| < a static inline vec_u8 diff_lt_altivec ( register vec_u8 x, register vec_u8 y, register vec_u8 a) { register vec_u8 diff = vec_subs(x, y); register vec_u8 diffneg = vec_subs(y, x); register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */ o = (vec_u8)vec_cmplt(o, a); return o; } static inline vec_u8 h264_deblock_mask ( register vec_u8 p0, register vec_u8 p1, register vec_u8 q0, register vec_u8 q1, register vec_u8 alpha, register vec_u8 beta) { register vec_u8 mask; register vec_u8 tempmask; mask = diff_lt_altivec(p0, q0, alpha); tempmask = diff_lt_altivec(p1, p0, beta); mask = vec_and(mask, tempmask); tempmask = diff_lt_altivec(q1, q0, beta); mask = vec_and(mask, tempmask); return mask; } // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0) static inline vec_u8 h264_deblock_q1(register vec_u8 p0, register vec_u8 p1, register vec_u8 p2, register vec_u8 q0, register vec_u8 tc0) { register vec_u8 average = vec_avg(p0, q0); register vec_u8 temp; register vec_u8 uncliped; register vec_u8 ones; register vec_u8 max; register vec_u8 min; register vec_u8 newp1; temp = vec_xor(average, p2); average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */ ones = vec_splat_u8(1); temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */ uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */ max = vec_adds(p1, tc0); min = vec_subs(p1, tc0); newp1 = vec_max(min, uncliped); newp1 = vec_min(max, newp1); return newp1; } #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \ \ const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \ \ register vec_u8 pq0bit = vec_xor(p0,q0); \ register vec_u8 q1minus; \ register vec_u8 p0minus; \ register vec_u8 stage1; \ register vec_u8 stage2; \ register vec_u8 vec160; \ register vec_u8 delta; \ register vec_u8 deltaneg; \ \ q1minus = vec_nor(q1, q1); /* 255 - q1 */ \ stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \ stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \ p0minus = vec_nor(p0, p0); /* 255 - p0 */ \ stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \ pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \ stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \ stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \ vec160 = vec_ld(0, &A0v); \ deltaneg = vec_subs(vec160, stage2); /* -d */ \ delta = vec_subs(stage2, vec160); /* d */ \ deltaneg = vec_min(tc0masked, deltaneg); \ delta = vec_min(tc0masked, delta); \ p0 = vec_subs(p0, deltaneg); \ q0 = vec_subs(q0, delta); \ p0 = vec_adds(p0, delta); \ q0 = vec_adds(q0, deltaneg); \ } #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \ DECLARE_ALIGNED(16, unsigned char, temp)[16]; \ register vec_u8 alphavec; \ register vec_u8 betavec; \ register vec_u8 mask; \ register vec_u8 p1mask; \ register vec_u8 q1mask; \ register vector signed char tc0vec; \ register vec_u8 finaltc0; \ register vec_u8 tc0masked; \ register vec_u8 newp1; \ register vec_u8 newq1; \ \ temp[0] = alpha; \ temp[1] = beta; \ alphavec = vec_ld(0, temp); \ betavec = vec_splat(alphavec, 0x1); \ alphavec = vec_splat(alphavec, 0x0); \ mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \ \ *((int *)temp) = *((int *)tc0); \ tc0vec = vec_ld(0, (signed char*)temp); \ tc0vec = vec_mergeh(tc0vec, tc0vec); \ tc0vec = vec_mergeh(tc0vec, tc0vec); \ mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \ finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \ \ p1mask = diff_lt_altivec(p2, p0, betavec); \ p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \ tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \ finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \ newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \ /*end if*/ \ \ q1mask = diff_lt_altivec(q2, q0, betavec); \ q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\ tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \ finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \ newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \ /*end if*/ \ \ h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \ p1 = newp1; \ q1 = newq1; \ } static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { register vec_u8 p2 = vec_ld(-3*stride, pix); register vec_u8 p1 = vec_ld(-2*stride, pix); register vec_u8 p0 = vec_ld(-1*stride, pix); register vec_u8 q0 = vec_ld(0, pix); register vec_u8 q1 = vec_ld(stride, pix); register vec_u8 q2 = vec_ld(2*stride, pix); h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0); vec_st(p1, -2*stride, pix); vec_st(p0, -1*stride, pix); vec_st(q0, 0, pix); vec_st(q1, stride, pix); } } static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { register vec_u8 line0, line1, line2, line3, line4, line5; if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) return; readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5); h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0); transpose4x16(line1, line2, line3, line4); write16x4(pix-2, stride, line1, line2, line3, line4); } static av_always_inline void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h) { int y, aligned; vec_u8 vblock; vec_s16 vtemp, vweight, voffset, v0, v1; vec_u16 vlog2_denom; DECLARE_ALIGNED(16, int32_t, temp)[4]; LOAD_ZERO; offset <<= log2_denom; if(log2_denom) offset += 1<<(log2_denom-1); temp[0] = log2_denom; temp[1] = weight; temp[2] = offset; vtemp = (vec_s16)vec_ld(0, temp); vlog2_denom = (vec_u16)vec_splat(vtemp, 1); vweight = vec_splat(vtemp, 3); voffset = vec_splat(vtemp, 5); aligned = !((unsigned long)block & 0xf); for (y=0; y<h; y++) { vblock = vec_ld(0, block); v0 = (vec_s16)vec_mergeh(zero_u8v, vblock); v1 = (vec_s16)vec_mergel(zero_u8v, vblock); if (w == 16 || aligned) { v0 = vec_mladd(v0, vweight, zero_s16v); v0 = vec_adds(v0, voffset); v0 = vec_sra(v0, vlog2_denom); } if (w == 16 || !aligned) { v1 = vec_mladd(v1, vweight, zero_s16v); v1 = vec_adds(v1, voffset); v1 = vec_sra(v1, vlog2_denom); } vblock = vec_packsu(v0, v1); vec_st(vblock, 0, block); block += stride; } } static av_always_inline void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h) { int y, dst_aligned, src_aligned; vec_u8 vsrc, vdst; vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3; vec_u16 vlog2_denom; DECLARE_ALIGNED(16, int32_t, temp)[4]; LOAD_ZERO; offset = ((offset + 1) | 1) << log2_denom; temp[0] = log2_denom+1; temp[1] = weights; temp[2] = weightd; temp[3] = offset; vtemp = (vec_s16)vec_ld(0, temp); vlog2_denom = (vec_u16)vec_splat(vtemp, 1); vweights = vec_splat(vtemp, 3); vweightd = vec_splat(vtemp, 5); voffset = vec_splat(vtemp, 7); dst_aligned = !((unsigned long)dst & 0xf); src_aligned = !((unsigned long)src & 0xf); for (y=0; y<h; y++) { vdst = vec_ld(0, dst); vsrc = vec_ld(0, src); v0 = (vec_s16)vec_mergeh(zero_u8v, vdst); v1 = (vec_s16)vec_mergel(zero_u8v, vdst); v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc); v3 = (vec_s16)vec_mergel(zero_u8v, vsrc); if (w == 8) { if (src_aligned) v3 = v2; else v2 = v3; } if (w == 16 || dst_aligned) { v0 = vec_mladd(v0, vweightd, zero_s16v); v2 = vec_mladd(v2, vweights, zero_s16v); v0 = vec_adds(v0, voffset); v0 = vec_adds(v0, v2); v0 = vec_sra(v0, vlog2_denom); } if (w == 16 || !dst_aligned) { v1 = vec_mladd(v1, vweightd, zero_s16v); v3 = vec_mladd(v3, vweights, zero_s16v); v1 = vec_adds(v1, voffset); v1 = vec_adds(v1, v3); v1 = vec_sra(v1, vlog2_denom); } vdst = vec_packsu(v0, v1); vec_st(vdst, 0, dst); dst += stride; src += stride; } } #define H264_WEIGHT(W,H) \ static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \ weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \ }\ static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ } H264_WEIGHT(16,16) H264_WEIGHT(16, 8) H264_WEIGHT( 8,16) H264_WEIGHT( 8, 8) H264_WEIGHT( 8, 4) void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { if (has_altivec()) { c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec; c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec; c->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec; c->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec; #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec dspfunc(put_h264_qpel, 0, 16); dspfunc(avg_h264_qpel, 0, 16); #undef dspfunc } } void ff_h264dsp_init_ppc(H264DSPContext *c) { if (has_altivec()) { c->h264_idct_add = ff_h264_idct_add_altivec; c->h264_idct_add8 = ff_h264_idct_add8_altivec; c->h264_idct_add16 = ff_h264_idct_add16_altivec; c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec; c->h264_idct_dc_add= h264_idct_dc_add_altivec; c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec; c->h264_idct8_add = ff_h264_idct8_add_altivec; c->h264_idct8_add4 = ff_h264_idct8_add4_altivec; c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec; c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec; c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec; c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec; c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec; c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec; c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec; c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec; c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec; c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec; } }
123linslouis-android-video-cutter
jni/libavcodec/ppc/h264_altivec.c
C
asf20
48,184
/* * Copyright (c) 2001 Michel Lespinasse * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * NOTE: This code is based on GPL code from the libmpeg2 project. The * author, Michel Lespinasses, has given explicit permission to release * under LGPL as part of FFmpeg. */ /* * FFmpeg integration by Dieter Shirley * * This file is a direct copy of the AltiVec IDCT module from the libmpeg2 * project. I've deleted all of the libmpeg2-specific code, renamed the * functions and reordered the function parameters. The only change to the * IDCT function itself was to factor out the partial transposition, and to * perform a full transpose at the end of the function. */ #include <stdlib.h> /* malloc(), free() */ #include <string.h> #include "config.h" #if HAVE_ALTIVEC_H #include <altivec.h> #endif #include "libavcodec/dsputil.h" #include "types_altivec.h" #include "dsputil_ppc.h" #include "dsputil_altivec.h" #define IDCT_HALF \ /* 1st stage */ \ t1 = vec_mradds (a1, vx7, vx1 ); \ t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \ t7 = vec_mradds (a2, vx5, vx3); \ t3 = vec_mradds (ma2, vx3, vx5); \ \ /* 2nd stage */ \ t5 = vec_adds (vx0, vx4); \ t0 = vec_subs (vx0, vx4); \ t2 = vec_mradds (a0, vx6, vx2); \ t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \ t6 = vec_adds (t8, t3); \ t3 = vec_subs (t8, t3); \ t8 = vec_subs (t1, t7); \ t1 = vec_adds (t1, t7); \ \ /* 3rd stage */ \ t7 = vec_adds (t5, t2); \ t2 = vec_subs (t5, t2); \ t5 = vec_adds (t0, t4); \ t0 = vec_subs (t0, t4); \ t4 = vec_subs (t8, t3); \ t3 = vec_adds (t8, t3); \ \ /* 4th stage */ \ vy0 = vec_adds (t7, t1); \ vy7 = vec_subs (t7, t1); \ vy1 = vec_mradds (c4, t3, t5); \ vy6 = vec_mradds (mc4, t3, t5); \ vy2 = vec_mradds (c4, t4, t0); \ vy5 = vec_mradds (mc4, t4, t0); \ vy3 = vec_adds (t2, t6); \ vy4 = vec_subs (t2, t6); #define IDCT \ vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \ vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \ vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias; \ vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \ vec_u16 shift; \ \ c4 = vec_splat (constants[0], 0); \ a0 = vec_splat (constants[0], 1); \ a1 = vec_splat (constants[0], 2); \ a2 = vec_splat (constants[0], 3); \ mc4 = vec_splat (constants[0], 4); \ ma2 = vec_splat (constants[0], 5); \ bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3); \ \ zero = vec_splat_s16 (0); \ shift = vec_splat_u16 (4); \ \ vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \ vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \ vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \ vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \ vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \ vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \ vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \ vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \ \ IDCT_HALF \ \ vx0 = vec_mergeh (vy0, vy4); \ vx1 = vec_mergel (vy0, vy4); \ vx2 = vec_mergeh (vy1, vy5); \ vx3 = vec_mergel (vy1, vy5); \ vx4 = vec_mergeh (vy2, vy6); \ vx5 = vec_mergel (vy2, vy6); \ vx6 = vec_mergeh (vy3, vy7); \ vx7 = vec_mergel (vy3, vy7); \ \ vy0 = vec_mergeh (vx0, vx4); \ vy1 = vec_mergel (vx0, vx4); \ vy2 = vec_mergeh (vx1, vx5); \ vy3 = vec_mergel (vx1, vx5); \ vy4 = vec_mergeh (vx2, vx6); \ vy5 = vec_mergel (vx2, vx6); \ vy6 = vec_mergeh (vx3, vx7); \ vy7 = vec_mergel (vx3, vx7); \ \ vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \ vx1 = vec_mergel (vy0, vy4); \ vx2 = vec_mergeh (vy1, vy5); \ vx3 = vec_mergel (vy1, vy5); \ vx4 = vec_mergeh (vy2, vy6); \ vx5 = vec_mergel (vy2, vy6); \ vx6 = vec_mergeh (vy3, vy7); \ vx7 = vec_mergel (vy3, vy7); \ \ IDCT_HALF \ \ shift = vec_splat_u16 (6); \ vx0 = vec_sra (vy0, shift); \ vx1 = vec_sra (vy1, shift); \ vx2 = vec_sra (vy2, shift); \ vx3 = vec_sra (vy3, shift); \ vx4 = vec_sra (vy4, shift); \ vx5 = vec_sra (vy5, shift); \ vx6 = vec_sra (vy6, shift); \ vx7 = vec_sra (vy7, shift); static const vec_s16 constants[5] = { {23170, 13573, 6518, 21895, -23170, -21895, 32, 31}, {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725}, {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521}, {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692}, {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722} }; void idct_put_altivec(uint8_t* dest, int stride, int16_t *blk) { POWERPC_PERF_DECLARE(altivec_idct_put_num, 1); vec_s16 *block = (vec_s16*)blk; vec_u8 tmp; #if CONFIG_POWERPC_PERF POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1); #endif IDCT #define COPY(dest,src) \ tmp = vec_packsu (src, src); \ vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \ vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest); COPY (dest, vx0) dest += stride; COPY (dest, vx1) dest += stride; COPY (dest, vx2) dest += stride; COPY (dest, vx3) dest += stride; COPY (dest, vx4) dest += stride; COPY (dest, vx5) dest += stride; COPY (dest, vx6) dest += stride; COPY (dest, vx7) POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1); } void idct_add_altivec(uint8_t* dest, int stride, int16_t *blk) { POWERPC_PERF_DECLARE(altivec_idct_add_num, 1); vec_s16 *block = (vec_s16*)blk; vec_u8 tmp; vec_s16 tmp2, tmp3; vec_u8 perm0; vec_u8 perm1; vec_u8 p0, p1, p; #if CONFIG_POWERPC_PERF POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1); #endif IDCT p0 = vec_lvsl (0, dest); p1 = vec_lvsl (stride, dest); p = vec_splat_u8 (-1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); #define ADD(dest,src,perm) \ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ tmp = vec_ld (0, dest); \ tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm); \ tmp3 = vec_adds (tmp2, src); \ tmp = vec_packsu (tmp3, tmp3); \ vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \ vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest); ADD (dest, vx0, perm0) dest += stride; ADD (dest, vx1, perm1) dest += stride; ADD (dest, vx2, perm0) dest += stride; ADD (dest, vx3, perm1) dest += stride; ADD (dest, vx4, perm0) dest += stride; ADD (dest, vx5, perm1) dest += stride; ADD (dest, vx6, perm0) dest += stride; ADD (dest, vx7, perm1) POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1); }
123linslouis-android-video-cutter
jni/libavcodec/ppc/idct_altivec.c
C
asf20
11,198
/* * Copyright (c) 2002 Brian Foley * Copyright (c) 2002 Dieter Shirley * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PPC_DSPUTIL_ALTIVEC_H #define AVCODEC_PPC_DSPUTIL_ALTIVEC_H #include <stdint.h> #include "libavcodec/dsputil.h" void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); int has_altivec(void); void fdct_altivec(int16_t *block); void gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder); void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block); void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block); void ff_vp3_idct_altivec(DCTELEM *block); void ff_vp3_idct_put_altivec(uint8_t *dest, int line_size, DCTELEM *block); void ff_vp3_idct_add_altivec(uint8_t *dest, int line_size, DCTELEM *block); void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx); void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx); void vc1dsp_init_altivec(DSPContext* c, AVCodecContext *avctx); void float_init_altivec(DSPContext* c, AVCodecContext *avctx); void int_init_altivec(DSPContext* c, AVCodecContext *avctx); #endif /* AVCODEC_PPC_DSPUTIL_ALTIVEC_H */
123linslouis-android-video-cutter
jni/libavcodec/ppc/dsputil_altivec.h
C
asf20
2,081
/* * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** ** @file ** integer misc ops. **/ #include "config.h" #if HAVE_ALTIVEC_H #include <altivec.h> #endif #include "libavcodec/dsputil.h" #include "dsputil_altivec.h" #include "types_altivec.h" static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2, int size) { int i, size16; vector signed char vpix1; vector signed short vpix2, vdiff, vpix1l,vpix1h; union { vector signed int vscore; int32_t score[4]; } u; u.vscore = vec_splat_s32(0); // //XXX lazy way, fix it later #define vec_unaligned_load(b) \ vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b)); size16 = size >> 4; while(size16) { // score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]); //load pix1 and the first batch of pix2 vpix1 = vec_unaligned_load(pix1); vpix2 = vec_unaligned_load(pix2); pix2 += 8; //unpack vpix1h = vec_unpackh(vpix1); vdiff = vec_sub(vpix1h, vpix2); vpix1l = vec_unpackl(vpix1); // load another batch from pix2 vpix2 = vec_unaligned_load(pix2); u.vscore = vec_msum(vdiff, vdiff, u.vscore); vdiff = vec_sub(vpix1l, vpix2); u.vscore = vec_msum(vdiff, vdiff, u.vscore); pix1 += 16; pix2 += 8; size16--; } u.vscore = vec_sums(u.vscore, vec_splat_s32(0)); size %= 16; for (i = 0; i < size; i++) { u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]); } return u.score[3]; } static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift) { int i; LOAD_ZERO; register vec_s16 vec1, *pv; register vec_s32 res = vec_splat_s32(0), t; register vec_u32 shifts; int32_t ires; shifts = zero_u32v; if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1))); if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08)); if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04)); if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02)); if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01)); for(i = 0; i < order; i += 8){ pv = (vec_s16*)v1; vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1)); t = vec_msum(vec1, vec_ld(0, v2), zero_s32v); t = vec_sr(t, shifts); res = vec_sums(t, res); v1 += 8; v2 += 8; } res = vec_splat(res, 3); vec_ste(res, 0, &ires); return ires; } static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) { LOAD_ZERO; vec_s16 *pv1 = (vec_s16*)v1; vec_s16 *pv2 = (vec_s16*)v2; vec_s16 *pv3 = (vec_s16*)v3; register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul}; register vec_s16 t0, t1, i0, i1; register vec_s16 i2 = pv2[0], i3 = pv3[0]; register vec_s32 res = zero_s32v; register vec_u8 align = vec_lvsl(0, v2); int32_t ires; order >>= 4; do { t0 = vec_perm(i2, pv2[1], align); i2 = pv2[2]; t1 = vec_perm(pv2[1], i2, align); i0 = pv1[0]; i1 = pv1[1]; res = vec_msum(t0, i0, res); res = vec_msum(t1, i1, res); t0 = vec_perm(i3, pv3[1], align); i3 = pv3[2]; t1 = vec_perm(pv3[1], i3, align); pv1[0] = vec_mladd(t0, muls, i0); pv1[1] = vec_mladd(t1, muls, i1); pv1 += 2; pv2 += 2; pv3 += 2; } while(--order); res = vec_splat(vec_sums(res, zero_s32v), 3); vec_ste(res, 0, &ires); return ires; } void int_init_altivec(DSPContext* c, AVCodecContext *avctx) { c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec; c->scalarproduct_int16 = scalarproduct_int16_altivec; c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec; }
123linslouis-android-video-cutter
jni/libavcodec/ppc/int_altivec.c
C
asf20
4,697
/* * Copyright (c) 2006 Guillaume Poirier <gpoirier@mplayerhq.hu> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PPC_TYPES_ALTIVEC_H #define AVCODEC_PPC_TYPES_ALTIVEC_H /*********************************************************************** * Vector types **********************************************************************/ #define vec_u8 vector unsigned char #define vec_s8 vector signed char #define vec_u16 vector unsigned short #define vec_s16 vector signed short #define vec_u32 vector unsigned int #define vec_s32 vector signed int /*********************************************************************** * Null vector **********************************************************************/ #define LOAD_ZERO const vec_u8 zerov = vec_splat_u8( 0 ) #define zero_u8v (vec_u8) zerov #define zero_s8v (vec_s8) zerov #define zero_u16v (vec_u16) zerov #define zero_s16v (vec_s16) zerov #define zero_u32v (vec_u32) zerov #define zero_s32v (vec_s32) zerov #endif /* AVCODEC_PPC_TYPES_ALTIVEC_H */
123linslouis-android-video-cutter
jni/libavcodec/ppc/types_altivec.h
C
asf20
1,748
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Contains misc utility macros and inline functions */ #ifndef AVCODEC_PPC_UTIL_ALTIVEC_H #define AVCODEC_PPC_UTIL_ALTIVEC_H #include <stdint.h> #include "config.h" #if HAVE_ALTIVEC_H #include <altivec.h> #endif // used to build registers permutation vectors (vcprm) // the 's' are for words in the _s_econd vector #define WORD_0 0x00,0x01,0x02,0x03 #define WORD_1 0x04,0x05,0x06,0x07 #define WORD_2 0x08,0x09,0x0a,0x0b #define WORD_3 0x0c,0x0d,0x0e,0x0f #define WORD_s0 0x10,0x11,0x12,0x13 #define WORD_s1 0x14,0x15,0x16,0x17 #define WORD_s2 0x18,0x19,0x1a,0x1b #define WORD_s3 0x1c,0x1d,0x1e,0x1f #define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d} #define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d} // vcprmle is used to keep the same index as in the SSE version. // it's the same as vcprm, with the index inversed // ('le' is Little Endian) #define vcprmle(a,b,c,d) vcprm(d,c,b,a) // used to build inverse/identity vectors (vcii) // n is _n_egative, p is _p_ositive #define FLOAT_n -1. #define FLOAT_p 1. // Transpose 8x8 matrix of 16-bit elements (in-place) #define TRANSPOSE8(a,b,c,d,e,f,g,h) \ do { \ vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \ vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \ \ A1 = vec_mergeh (a, e); \ B1 = vec_mergel (a, e); \ C1 = vec_mergeh (b, f); \ D1 = vec_mergel (b, f); \ E1 = vec_mergeh (c, g); \ F1 = vec_mergel (c, g); \ G1 = vec_mergeh (d, h); \ H1 = vec_mergel (d, h); \ \ A2 = vec_mergeh (A1, E1); \ B2 = vec_mergel (A1, E1); \ C2 = vec_mergeh (B1, F1); \ D2 = vec_mergel (B1, F1); \ E2 = vec_mergeh (C1, G1); \ F2 = vec_mergel (C1, G1); \ G2 = vec_mergeh (D1, H1); \ H2 = vec_mergel (D1, H1); \ \ a = vec_mergeh (A2, E2); \ b = vec_mergel (A2, E2); \ c = vec_mergeh (B2, F2); \ d = vec_mergel (B2, F2); \ e = vec_mergeh (C2, G2); \ f = vec_mergel (C2, G2); \ g = vec_mergeh (D2, H2); \ h = vec_mergel (D2, H2); \ } while (0) /** \brief loads unaligned vector \a *src with offset \a offset and returns it */ static inline vector unsigned char unaligned_load(int offset, uint8_t *src) { register vector unsigned char first = vec_ld(offset, src); register vector unsigned char second = vec_ld(offset+15, src); register vector unsigned char mask = vec_lvsl(offset, src); return vec_perm(first, second, mask); } #endif /* AVCODEC_PPC_UTIL_ALTIVEC_H */
123linslouis-android-video-cutter
jni/libavcodec/ppc/util_altivec.h
C
asf20
3,315
/* * Copyright (C) 2003 James Klicman <james@klicman.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #if HAVE_ALTIVEC_H #include <altivec.h> #endif #include "libavutil/common.h" #include "libavcodec/dsputil.h" #include "dsputil_ppc.h" #include "dsputil_altivec.h" #define vs16(v) ((vector signed short)(v)) #define vs32(v) ((vector signed int)(v)) #define vu8(v) ((vector unsigned char)(v)) #define vu16(v) ((vector unsigned short)(v)) #define vu32(v) ((vector unsigned int)(v)) #define C1 0.98078525066375732421875000 /* cos(1*PI/16) */ #define C2 0.92387950420379638671875000 /* cos(2*PI/16) */ #define C3 0.83146959543228149414062500 /* cos(3*PI/16) */ #define C4 0.70710676908493041992187500 /* cos(4*PI/16) */ #define C5 0.55557024478912353515625000 /* cos(5*PI/16) */ #define C6 0.38268342614173889160156250 /* cos(6*PI/16) */ #define C7 0.19509032368659973144531250 /* cos(7*PI/16) */ #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */ #define W0 -(2 * C2) #define W1 (2 * C6) #define W2 (SQRT_2 * C6) #define W3 (SQRT_2 * C3) #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7)) #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7)) #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7)) #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7)) #define W8 (SQRT_2 * ( C7 - C3)) #define W9 (SQRT_2 * (-C1 - C3)) #define WA (SQRT_2 * (-C3 - C5)) #define WB (SQRT_2 * ( C5 - C3)) static vector float fdctconsts[3] = { { W0, W1, W2, W3 }, { W4, W5, W6, W7 }, { W8, W9, WA, WB } }; #define LD_W0 vec_splat(cnsts0, 0) #define LD_W1 vec_splat(cnsts0, 1) #define LD_W2 vec_splat(cnsts0, 2) #define LD_W3 vec_splat(cnsts0, 3) #define LD_W4 vec_splat(cnsts1, 0) #define LD_W5 vec_splat(cnsts1, 1) #define LD_W6 vec_splat(cnsts1, 2) #define LD_W7 vec_splat(cnsts1, 3) #define LD_W8 vec_splat(cnsts2, 0) #define LD_W9 vec_splat(cnsts2, 1) #define LD_WA vec_splat(cnsts2, 2) #define LD_WB vec_splat(cnsts2, 3) #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \ x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \ x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \ x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \ x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \ x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \ x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \ x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \ x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \ \ b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \ b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \ b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \ b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \ \ b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \ b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \ b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \ cnst = LD_W2; \ b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \ cnst = LD_W1; \ b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \ cnst = LD_W0; \ b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \ \ x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \ x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \ x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \ x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \ x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \ cnst = LD_W3; \ x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \ \ cnst = LD_W8; \ x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \ cnst = LD_W9; \ x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \ cnst = LD_WA; \ x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \ cnst = LD_WB; \ x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \ \ cnst = LD_W4; \ b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \ cnst = LD_W5; \ b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \ cnst = LD_W6; \ b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \ cnst = LD_W7; \ b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \ \ b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \ b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \ b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \ b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \ /* }}} */ #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \ x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \ x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \ x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \ x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \ x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \ x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \ x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \ x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \ \ b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \ b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \ b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \ b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \ \ b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \ b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \ b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \ cnst = LD_W2; \ b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \ cnst = LD_W1; \ b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \ cnst = LD_W0; \ b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \ \ x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \ x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \ x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \ x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \ x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \ cnst = LD_W3; \ x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \ \ cnst = LD_W8; \ x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \ cnst = LD_W9; \ x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \ cnst = LD_WA; \ x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \ cnst = LD_WB; \ x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \ \ cnst = LD_W4; \ b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \ cnst = LD_W5; \ b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \ cnst = LD_W6; \ b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \ cnst = LD_W7; \ b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \ \ b7 = vec_add(b7, x2); /* b7 += x2; */ \ b5 = vec_add(b5, x3); /* b5 += x3; */ \ b3 = vec_add(b3, x2); /* b3 += x2; */ \ b1 = vec_add(b1, x3); /* b1 += x3; */ \ /* }}} */ /* two dimensional discrete cosine transform */ void fdct_altivec(int16_t *block) { POWERPC_PERF_DECLARE(altivec_fdct, 1); vector signed short *bp; vector float *cp; vector float b00, b10, b20, b30, b40, b50, b60, b70; vector float b01, b11, b21, b31, b41, b51, b61, b71; vector float mzero, cnst, cnsts0, cnsts1, cnsts2; vector float x0, x1, x2, x3, x4, x5, x6, x7, x8; POWERPC_PERF_START_COUNT(altivec_fdct, 1); /* setup constants {{{ */ /* mzero = -0.0 */ mzero = ((vector float)vec_splat_u32(-1)); mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero))); cp = fdctconsts; cnsts0 = vec_ld(0, cp); cp++; cnsts1 = vec_ld(0, cp); cp++; cnsts2 = vec_ld(0, cp); /* }}} */ /* 8x8 matrix transpose (vector short[8]) {{{ */ #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b)) bp = (vector signed short*)block; b00 = ((vector float)vec_ld(0, bp)); b40 = ((vector float)vec_ld(16*4, bp)); b01 = ((vector float)MERGE_S16(h, b00, b40)); b11 = ((vector float)MERGE_S16(l, b00, b40)); bp++; b10 = ((vector float)vec_ld(0, bp)); b50 = ((vector float)vec_ld(16*4, bp)); b21 = ((vector float)MERGE_S16(h, b10, b50)); b31 = ((vector float)MERGE_S16(l, b10, b50)); bp++; b20 = ((vector float)vec_ld(0, bp)); b60 = ((vector float)vec_ld(16*4, bp)); b41 = ((vector float)MERGE_S16(h, b20, b60)); b51 = ((vector float)MERGE_S16(l, b20, b60)); bp++; b30 = ((vector float)vec_ld(0, bp)); b70 = ((vector float)vec_ld(16*4, bp)); b61 = ((vector float)MERGE_S16(h, b30, b70)); b71 = ((vector float)MERGE_S16(l, b30, b70)); x0 = ((vector float)MERGE_S16(h, b01, b41)); x1 = ((vector float)MERGE_S16(l, b01, b41)); x2 = ((vector float)MERGE_S16(h, b11, b51)); x3 = ((vector float)MERGE_S16(l, b11, b51)); x4 = ((vector float)MERGE_S16(h, b21, b61)); x5 = ((vector float)MERGE_S16(l, b21, b61)); x6 = ((vector float)MERGE_S16(h, b31, b71)); x7 = ((vector float)MERGE_S16(l, b31, b71)); b00 = ((vector float)MERGE_S16(h, x0, x4)); b10 = ((vector float)MERGE_S16(l, x0, x4)); b20 = ((vector float)MERGE_S16(h, x1, x5)); b30 = ((vector float)MERGE_S16(l, x1, x5)); b40 = ((vector float)MERGE_S16(h, x2, x6)); b50 = ((vector float)MERGE_S16(l, x2, x6)); b60 = ((vector float)MERGE_S16(h, x3, x7)); b70 = ((vector float)MERGE_S16(l, x3, x7)); #undef MERGE_S16 /* }}} */ /* Some of the initial calculations can be done as vector short before * conversion to vector float. The following code section takes advantage * of this. */ #if 1 /* fdct rows {{{ */ x0 = ((vector float)vec_add(vs16(b00), vs16(b70))); x7 = ((vector float)vec_sub(vs16(b00), vs16(b70))); x1 = ((vector float)vec_add(vs16(b10), vs16(b60))); x6 = ((vector float)vec_sub(vs16(b10), vs16(b60))); x2 = ((vector float)vec_add(vs16(b20), vs16(b50))); x5 = ((vector float)vec_sub(vs16(b20), vs16(b50))); x3 = ((vector float)vec_add(vs16(b30), vs16(b40))); x4 = ((vector float)vec_sub(vs16(b30), vs16(b40))); b70 = ((vector float)vec_add(vs16(x0), vs16(x3))); b10 = ((vector float)vec_add(vs16(x1), vs16(x2))); b00 = ((vector float)vec_add(vs16(b70), vs16(b10))); b40 = ((vector float)vec_sub(vs16(b70), vs16(b10))); #define CTF0(n) \ b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \ b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \ b##n##1 = vec_ctf(vs32(b##n##1), 0); \ b##n##0 = vec_ctf(vs32(b##n##0), 0); CTF0(0); CTF0(4); b20 = ((vector float)vec_sub(vs16(x0), vs16(x3))); b60 = ((vector float)vec_sub(vs16(x1), vs16(x2))); CTF0(2); CTF0(6); #undef CTF0 x0 = vec_add(b60, b20); x1 = vec_add(b61, b21); cnst = LD_W2; x0 = vec_madd(cnst, x0, mzero); x1 = vec_madd(cnst, x1, mzero); cnst = LD_W1; b20 = vec_madd(cnst, b20, x0); b21 = vec_madd(cnst, b21, x1); cnst = LD_W0; b60 = vec_madd(cnst, b60, x0); b61 = vec_madd(cnst, b61, x1); #define CTFX(x,b) \ b##0 = ((vector float)vec_unpackh(vs16(x))); \ b##1 = ((vector float)vec_unpackl(vs16(x))); \ b##0 = vec_ctf(vs32(b##0), 0); \ b##1 = vec_ctf(vs32(b##1), 0); \ CTFX(x4, b7); CTFX(x5, b5); CTFX(x6, b3); CTFX(x7, b1); #undef CTFX x0 = vec_add(b70, b10); x1 = vec_add(b50, b30); x2 = vec_add(b70, b30); x3 = vec_add(b50, b10); x8 = vec_add(x2, x3); cnst = LD_W3; x8 = vec_madd(cnst, x8, mzero); cnst = LD_W8; x0 = vec_madd(cnst, x0, mzero); cnst = LD_W9; x1 = vec_madd(cnst, x1, mzero); cnst = LD_WA; x2 = vec_madd(cnst, x2, x8); cnst = LD_WB; x3 = vec_madd(cnst, x3, x8); cnst = LD_W4; b70 = vec_madd(cnst, b70, x0); cnst = LD_W5; b50 = vec_madd(cnst, b50, x1); cnst = LD_W6; b30 = vec_madd(cnst, b30, x1); cnst = LD_W7; b10 = vec_madd(cnst, b10, x0); b70 = vec_add(b70, x2); b50 = vec_add(b50, x3); b30 = vec_add(b30, x2); b10 = vec_add(b10, x3); x0 = vec_add(b71, b11); x1 = vec_add(b51, b31); x2 = vec_add(b71, b31); x3 = vec_add(b51, b11); x8 = vec_add(x2, x3); cnst = LD_W3; x8 = vec_madd(cnst, x8, mzero); cnst = LD_W8; x0 = vec_madd(cnst, x0, mzero); cnst = LD_W9; x1 = vec_madd(cnst, x1, mzero); cnst = LD_WA; x2 = vec_madd(cnst, x2, x8); cnst = LD_WB; x3 = vec_madd(cnst, x3, x8); cnst = LD_W4; b71 = vec_madd(cnst, b71, x0); cnst = LD_W5; b51 = vec_madd(cnst, b51, x1); cnst = LD_W6; b31 = vec_madd(cnst, b31, x1); cnst = LD_W7; b11 = vec_madd(cnst, b11, x0); b71 = vec_add(b71, x2); b51 = vec_add(b51, x3); b31 = vec_add(b31, x2); b11 = vec_add(b11, x3); /* }}} */ #else /* convert to float {{{ */ #define CTF(n) \ vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \ vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \ b##n##1 = vec_ctf(vs32(b##n##1), 0); \ b##n##0 = vec_ctf(vs32(b##n##0), 0); \ CTF(0); CTF(1); CTF(2); CTF(3); CTF(4); CTF(5); CTF(6); CTF(7); #undef CTF /* }}} */ FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70); FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71); #endif /* 8x8 matrix transpose (vector float[8][2]) {{{ */ x0 = vec_mergel(b00, b20); x1 = vec_mergeh(b00, b20); x2 = vec_mergel(b10, b30); x3 = vec_mergeh(b10, b30); b00 = vec_mergeh(x1, x3); b10 = vec_mergel(x1, x3); b20 = vec_mergeh(x0, x2); b30 = vec_mergel(x0, x2); x4 = vec_mergel(b41, b61); x5 = vec_mergeh(b41, b61); x6 = vec_mergel(b51, b71); x7 = vec_mergeh(b51, b71); b41 = vec_mergeh(x5, x7); b51 = vec_mergel(x5, x7); b61 = vec_mergeh(x4, x6); b71 = vec_mergel(x4, x6); x0 = vec_mergel(b01, b21); x1 = vec_mergeh(b01, b21); x2 = vec_mergel(b11, b31); x3 = vec_mergeh(b11, b31); x4 = vec_mergel(b40, b60); x5 = vec_mergeh(b40, b60); x6 = vec_mergel(b50, b70); x7 = vec_mergeh(b50, b70); b40 = vec_mergeh(x1, x3); b50 = vec_mergel(x1, x3); b60 = vec_mergeh(x0, x2); b70 = vec_mergel(x0, x2); b01 = vec_mergeh(x5, x7); b11 = vec_mergel(x5, x7); b21 = vec_mergeh(x4, x6); b31 = vec_mergel(x4, x6); /* }}} */ FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70); FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71); /* round, convert back to short {{{ */ #define CTS(n) \ b##n##0 = vec_round(b##n##0); \ b##n##1 = vec_round(b##n##1); \ b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \ b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \ b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \ vec_st(vs16(b##n##0), 0, bp); bp = (vector signed short*)block; CTS(0); bp++; CTS(1); bp++; CTS(2); bp++; CTS(3); bp++; CTS(4); bp++; CTS(5); bp++; CTS(6); bp++; CTS(7); #undef CTS /* }}} */ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1); } /* vim:set foldmethod=marker foldlevel=0: */
123linslouis-android-video-cutter
jni/libavcodec/ppc/fdct_altivec.c
C
asf20
18,511
/* * simple math operations * Copyright (c) 2001, 2002 Fabrice Bellard * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_MATHOPS_H #define AVCODEC_MATHOPS_H #include "libavutil/common.h" #if ARCH_ARM # include "arm/mathops.h" #elif ARCH_AVR32 # include "avr32/mathops.h" #elif ARCH_BFIN # include "bfin/mathops.h" #elif ARCH_MIPS # include "mips/mathops.h" #elif ARCH_PPC # include "ppc/mathops.h" #elif ARCH_X86 # include "x86/mathops.h" #endif /* generic implementation */ #ifndef MULL # define MULL(a,b,s) (((int64_t)(a) * (int64_t)(b)) >> (s)) #endif #ifndef MULH //gcc 3.4 creates an incredibly bloated mess out of this //# define MULH(a,b) (((int64_t)(a) * (int64_t)(b))>>32) static av_always_inline int MULH(int a, int b){ return ((int64_t)(a) * (int64_t)(b))>>32; } #endif #ifndef UMULH static av_always_inline unsigned UMULH(unsigned a, unsigned b){ return ((uint64_t)(a) * (uint64_t)(b))>>32; } #endif #ifndef MUL64 # define MUL64(a,b) ((int64_t)(a) * (int64_t)(b)) #endif #ifndef MAC64 # define MAC64(d, a, b) ((d) += MUL64(a, b)) #endif #ifndef MLS64 # define MLS64(d, a, b) ((d) -= MUL64(a, b)) #endif /* signed 16x16 -> 32 multiply add accumulate */ #ifndef MAC16 # define MAC16(rt, ra, rb) rt += (ra) * (rb) #endif /* signed 16x16 -> 32 multiply */ #ifndef MUL16 # define MUL16(ra, rb) ((ra) * (rb)) #endif #ifndef MLS16 # define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb)) #endif /* median of 3 */ #ifndef mid_pred #define mid_pred mid_pred static inline av_const int mid_pred(int a, int b, int c) { #if 0 int t= (a-b)&((a-b)>>31); a-=t; b+=t; b-= (b-c)&((b-c)>>31); b+= (a-b)&((a-b)>>31); return b; #else if(a>b){ if(c>b){ if(c>a) b=a; else b=c; } }else{ if(b>c){ if(c>a) b=c; else b=a; } } return b; #endif } #endif #ifndef sign_extend static inline av_const int sign_extend(int val, unsigned bits) { return (val << (INT_BIT - bits)) >> (INT_BIT - bits); } #endif #ifndef zero_extend static inline av_const unsigned zero_extend(unsigned val, unsigned bits) { return (val << (INT_BIT - bits)) >> (INT_BIT - bits); } #endif #ifndef COPY3_IF_LT #define COPY3_IF_LT(x, y, a, b, c, d)\ if ((y) < (x)) {\ (x) = (y);\ (a) = (b);\ (c) = (d);\ } #endif #ifndef NEG_SSR32 # define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s))) #endif #ifndef NEG_USR32 # define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s))) #endif #endif /* AVCODEC_MATHOPS_H */
123linslouis-android-video-cutter
jni/libavcodec/mathops.h
C
asf20
3,352
/* * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_CAVS_H #define AVCODEC_CAVS_H #include "dsputil.h" #include "mpegvideo.h" #define SLICE_MAX_START_CODE 0x000001af #define EXT_START_CODE 0x000001b5 #define USER_START_CODE 0x000001b2 #define CAVS_START_CODE 0x000001b0 #define PIC_I_START_CODE 0x000001b3 #define PIC_PB_START_CODE 0x000001b6 #define A_AVAIL 1 #define B_AVAIL 2 #define C_AVAIL 4 #define D_AVAIL 8 #define NOT_AVAIL -1 #define REF_INTRA -2 #define REF_DIR -3 #define ESCAPE_CODE 59 #define FWD0 0x01 #define FWD1 0x02 #define BWD0 0x04 #define BWD1 0x08 #define SYM0 0x10 #define SYM1 0x20 #define SPLITH 0x40 #define SPLITV 0x80 #define MV_BWD_OFFS 12 #define MV_STRIDE 4 enum cavs_mb { I_8X8 = 0, P_SKIP, P_16X16, P_16X8, P_8X16, P_8X8, B_SKIP, B_DIRECT, B_FWD_16X16, B_BWD_16X16, B_SYM_16X16, B_8X8 = 29 }; enum cavs_sub_mb { B_SUB_DIRECT, B_SUB_FWD, B_SUB_BWD, B_SUB_SYM }; enum cavs_intra_luma { INTRA_L_VERT, INTRA_L_HORIZ, INTRA_L_LP, INTRA_L_DOWN_LEFT, INTRA_L_DOWN_RIGHT, INTRA_L_LP_LEFT, INTRA_L_LP_TOP, INTRA_L_DC_128 }; enum cavs_intra_chroma { INTRA_C_LP, INTRA_C_HORIZ, INTRA_C_VERT, INTRA_C_PLANE, INTRA_C_LP_LEFT, INTRA_C_LP_TOP, INTRA_C_DC_128, }; enum cavs_mv_pred { MV_PRED_MEDIAN, MV_PRED_LEFT, MV_PRED_TOP, MV_PRED_TOPRIGHT, MV_PRED_PSKIP, MV_PRED_BSKIP }; enum cavs_block { BLK_16X16, BLK_16X8, BLK_8X16, BLK_8X8 }; enum cavs_mv_loc { MV_FWD_D3 = 0, MV_FWD_B2, MV_FWD_B3, MV_FWD_C2, MV_FWD_A1, MV_FWD_X0, MV_FWD_X1, MV_FWD_A3 = 8, MV_FWD_X2, MV_FWD_X3, MV_BWD_D3 = MV_BWD_OFFS, MV_BWD_B2, MV_BWD_B3, MV_BWD_C2, MV_BWD_A1, MV_BWD_X0, MV_BWD_X1, MV_BWD_A3 = MV_BWD_OFFS+8, MV_BWD_X2, MV_BWD_X3 }; DECLARE_ALIGNED(8, typedef, struct) { int16_t x; int16_t y; int16_t dist; int16_t ref; } cavs_vector; struct dec_2dvlc { int8_t rltab[59][3]; int8_t level_add[27]; int8_t golomb_order; int inc_limit; int8_t max_run; }; typedef struct { MpegEncContext s; Picture picture; ///< currently decoded frame Picture DPB[2]; ///< reference frames int dist[2]; ///< temporal distances from current frame to ref frames int profile, level; int aspect_ratio; int mb_width, mb_height; int pic_type; int stream_revision; ///<0 for samples from 2006, 1 for rm52j encoder int progressive; int pic_structure; int skip_mode_flag; ///< select between skip_count or one skip_flag per MB int loop_filter_disable; int alpha_offset, beta_offset; int ref_flag; int mbx, mby, mbidx; ///< macroblock coordinates int flags; ///< availability flags of neighbouring macroblocks int stc; ///< last start code uint8_t *cy, *cu, *cv; ///< current MB sample pointers int left_qp; uint8_t *top_qp; /** mv motion vector cache 0: D3 B2 B3 C2 4: A1 X0 X1 - 8: A3 X2 X3 - X are the vectors in the current macroblock (5,6,9,10) A is the macroblock to the left (4,8) B is the macroblock to the top (1,2) C is the macroblock to the top-right (3) D is the macroblock to the top-left (0) the same is repeated for backward motion vectors */ cavs_vector mv[2*4*3]; cavs_vector *top_mv[2]; cavs_vector *col_mv; /** luma pred mode cache 0: -- B2 B3 3: A1 X0 X1 6: A3 X2 X3 */ int pred_mode_Y[3*3]; int *top_pred_Y; int l_stride, c_stride; int luma_scan[4]; int qp; int qp_fixed; int cbp; ScanTable scantable; /** intra prediction is done with un-deblocked samples they are saved here before deblocking the MB */ uint8_t *top_border_y, *top_border_u, *top_border_v; uint8_t left_border_y[26], left_border_u[10], left_border_v[10]; uint8_t intern_border_y[26]; uint8_t topleft_border_y, topleft_border_u, topleft_border_v; void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride); void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride); uint8_t *col_type_base; /* scaling factors for MV prediction */ int sym_factor; ///< for scaling in symmetrical B block int direct_den[2]; ///< for scaling in direct B block int scale_den[2]; ///< for scaling neighbouring MVs int got_keyframe; DCTELEM *block; } AVSContext; extern const uint8_t ff_cavs_dequant_shift[64]; extern const uint16_t ff_cavs_dequant_mul[64]; extern const struct dec_2dvlc ff_cavs_intra_dec[7]; extern const struct dec_2dvlc ff_cavs_inter_dec[7]; extern const struct dec_2dvlc ff_cavs_chroma_dec[5]; extern const uint8_t ff_cavs_chroma_qp[64]; extern const uint8_t ff_cavs_scan3x3[4]; extern const uint8_t ff_cavs_partition_flags[30]; extern const int_fast8_t ff_left_modifier_l[8]; extern const int_fast8_t ff_top_modifier_l[8]; extern const int_fast8_t ff_left_modifier_c[7]; extern const int_fast8_t ff_top_modifier_c[7]; extern const cavs_vector ff_cavs_intra_mv; extern const cavs_vector ff_cavs_un_mv; extern const cavs_vector ff_cavs_dir_mv; static inline void modify_pred(const int_fast8_t *mod_table, int *mode) { *mode = mod_table[*mode]; if(*mode < 0) { av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n"); *mode = 0; } } static inline void set_intra_mode_default(AVSContext *h) { if(h->stream_revision > 0) { h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL; h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = NOT_AVAIL; } else { h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP; h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP; } } static inline void set_mvs(cavs_vector *mv, enum cavs_block size) { switch(size) { case BLK_16X16: mv[MV_STRIDE ] = mv[0]; mv[MV_STRIDE+1] = mv[0]; case BLK_16X8: mv[1] = mv[0]; break; case BLK_8X16: mv[MV_STRIDE] = mv[0]; break; } } static inline void set_mv_intra(AVSContext *h) { h->mv[MV_FWD_X0] = ff_cavs_intra_mv; set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); h->mv[MV_BWD_X0] = ff_cavs_intra_mv; set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); if(h->pic_type != FF_B_TYPE) h->col_type_base[h->mbidx] = I_8X8; } static inline int dequant(AVSContext *h, DCTELEM *level_buf, uint8_t *run_buf, DCTELEM *dst, int mul, int shift, int coeff_num) { int round = 1 << (shift - 1); int pos = -1; const uint8_t *scantab = h->scantable.permutated; /* inverse scan and dequantization */ while(--coeff_num >= 0){ pos += run_buf[coeff_num]; if(pos > 63) { av_log(h->s.avctx, AV_LOG_ERROR, "position out of block bounds at pic %d MB(%d,%d)\n", h->picture.poc, h->mbx, h->mby); return -1; } dst[scantab[pos]] = (level_buf[coeff_num]*mul + round) >> shift; } return 0; } void ff_cavs_filter(AVSContext *h, enum cavs_mb mb_type); void ff_cavs_load_intra_pred_luma(AVSContext *h, uint8_t *top, uint8_t **left, int block); void ff_cavs_load_intra_pred_chroma(AVSContext *h); void ff_cavs_modify_mb_i(AVSContext *h, int *pred_mode_uv); void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type); void ff_cavs_mv(AVSContext *h, enum cavs_mv_loc nP, enum cavs_mv_loc nC, enum cavs_mv_pred mode, enum cavs_block size, int ref); void ff_cavs_init_mb(AVSContext *h); int ff_cavs_next_mb(AVSContext *h); void ff_cavs_init_pic(AVSContext *h); void ff_cavs_init_top_lines(AVSContext *h); int ff_cavs_init(AVCodecContext *avctx); int ff_cavs_end (AVCodecContext *avctx); #endif /* AVCODEC_CAVS_H */
123linslouis-android-video-cutter
jni/libavcodec/cavs.h
C
asf20
9,145
/* * RealAudio 2.0 (28.8K) * Copyright (c) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_RA288_H #define AVCODEC_RA288_H #include <stdint.h> static const float amptable[8]={ 0.515625, 0.90234375, 1.57910156, 2.76342773, -0.515625, -0.90234375, -1.57910156, -2.76342773 }; static const int16_t codetable[128][5]={ { 668, -2950, -1254, -1790, -2553}, { -5032, -4577, -1045, 2908, 3318}, { -2819, -2677, -948, -2825, -4450}, { -6679, -340, 1482, -1276, 1262}, { -562, -6757, 1281, 179, -1274}, { -2512, -7130, -4925, 6913, 2411}, { -2478, -156, 4683, -3873, 0}, { -8208, 2140, -478, -2785, 533}, { 1889, 2759, 1381, -6955, -5913}, { 5082, -2460, -5778, 1797, 568}, { -2208, -3309, -4523, -6236, -7505}, { -2719, 4358, -2988, -1149, 2664}, { 1259, 995, 2711, -2464,-10390}, { 1722, -7569, -2742, 2171, -2329}, { 1032, 747, -858, -7946,-12843}, { 3106, 4856, -4193, -2541, 1035}, { 1862, -960, -6628, 410, 5882}, { -2493, -2628, -4000, -60, 7202}, { -2672, 1446, 1536, -3831, 1233}, { -5302, 6912, 1589, -4187, 3665}, { -3456, -8170, -7709, 1384, 4698}, { -4699, -6209,-11176, 8104, 16830}, { 930, 7004, 1269, -8977, 2567}, { 4649, 11804, 3441, -5657, 1199}, { 2542, -183, -8859, -7976, 3230}, { -2872, -2011, -9713, -8385, 12983}, { 3086, 2140, -3680, -9643, -2896}, { -7609, 6515, -2283, -2522, 6332}, { -3333, -5620, -9130,-11131, 5543}, { -407, -6721,-17466, -2889, 11568}, { 3692, 6796, -262,-10846, -1856}, { 7275, 13404, -2989,-10595, 4936}, { 244, -2219, 2656, 3776, -5412}, { -4043, -5934, 2131, 863, -2866}, { -3302, 1743, -2006, -128, -2052}, { -6361, 3342, -1583, -21, 1142}, { -3837, -1831, 6397, 2545, -2848}, { -9332, -6528, 5309, 1986, -2245}, { -4490, 748, 1935, -3027, -493}, { -9255, 5366, 3193, -4493, 1784}, { 4784, -370, 1866, 1057, -1889}, { 7342, -2690, -2577, 676, -611}, { -502, 2235, -1850, -1777, -2049}, { 1011, 3880, -2465, 2209, -152}, { 2592, 2829, 5588, 2839, -7306}, { -3049, -4918, 5955, 9201, -4447}, { 697, 3908, 5798, -4451, -4644}, { -2121, 5444, -2570, 321, -1202}, { 2846, -2086, 3532, 566, -708}, { -4279, 950, 4980, 3749, 452}, { -2484, 3502, 1719, -170, 238}, { -3435, 263, 2114, -2005, 2361}, { -7338, -1208, 9347, -1216, -4013}, {-13498, -439, 8028, -4232, 361}, { -3729, 5433, 2004, -4727, -1259}, { -3986, 7743, 8429, -3691, -987}, { 5198, -423, 1150, -1281, 816}, { 7409, 4109, -3949, 2690, 30}, { 1246, 3055, -35, -1370, -246}, { -1489, 5635, -678, -2627, 3170}, { 4830, -4585, 2008, -1062, 799}, { -129, 717, 4594, 14937, 10706}, { 417, 2759, 1850, -5057, -1153}, { -3887, 7361, -5768, 4285, 666}, { 1443, -938, 20, -2119, -1697}, { -3712, -3402, -2212, 110, 2136}, { -2952, 12, -1568, -3500, -1855}, { -1315, -1731, 1160, -558, 1709}, { 88, -4569, 194, -454, -2957}, { -2839, -1666, -273, 2084, -155}, { -189, -2376, 1663, -1040, -2449}, { -2842, -1369, 636, -248, -2677}, { 1517, 79, -3013, -3669, -973}, { 1913, -2493, -5312, -749, 1271}, { -2903, -3324, -3756, -3690, -1829}, { -2913, -1547, -2760, -1406, 1124}, { 1844, -1834, 456, 706, -4272}, { 467, -4256, -1909, 1521, 1134}, { -127, -994, -637, -1491, -6494}, { 873, -2045, -3828, -2792, -578}, { 2311, -1817, 2632, -3052, 1968}, { 641, 1194, 1893, 4107, 6342}, { -45, 1198, 2160, -1449, 2203}, { -2004, 1713, 3518, 2652, 4251}, { 2936, -3968, 1280, 131, -1476}, { 2827, 8, -1928, 2658, 3513}, { 3199, -816, 2687, -1741, -1407}, { 2948, 4029, 394, -253, 1298}, { 4286, 51, -4507, -32, -659}, { 3903, 5646, -5588, -2592, 5707}, { -606, 1234, -1607, -5187, 664}, { -525, 3620, -2192, -2527, 1707}, { 4297, -3251, -2283, 812, -2264}, { 5765, 528, -3287, 1352, 1672}, { 2735, 1241, -1103, -3273, -3407}, { 4033, 1648, -2965, -1174, 1444}, { 74, 918, 1999, 915, -1026}, { -2496, -1605, 2034, 2950, 229}, { -2168, 2037, 15, -1264, -208}, { -3552, 1530, 581, 1491, 962}, { -2613, -2338, 3621, -1488, -2185}, { -1747, 81, 5538, 1432, -2257}, { -1019, 867, 214, -2284, -1510}, { -1684, 2816, -229, 2551, -1389}, { 2707, 504, 479, 2783, -1009}, { 2517, -1487, -1596, 621, 1929}, { -148, 2206, -4288, 1292, -1401}, { -527, 1243, -2731, 1909, 1280}, { 2149, -1501, 3688, 610, -4591}, { 3306, -3369, 1875, 3636, -1217}, { 2574, 2513, 1449, -3074, -4979}, { 814, 1826, -2497, 4234, -4077}, { 1664, -220, 3418, 1002, 1115}, { 781, 1658, 3919, 6130, 3140}, { 1148, 4065, 1516, 815, 199}, { 1191, 2489, 2561, 2421, 2443}, { 770, -5915, 5515, -368, -3199}, { 1190, 1047, 3742, 6927, -2089}, { 292, 3099, 4308, -758, -2455}, { 523, 3921, 4044, 1386, 85}, { 4367, 1006, -1252, -1466, -1383}, { 3852, 1579, -77, 2064, 868}, { 5109, 2919, -202, 359, -509}, { 3650, 3206, 2303, 1693, 1296}, { 2905, -3907, 229, -1196, -2332}, { 5977, -3585, 805, 3825, -3138}, { 3746, -606, 53, -269, -3301}, { 606, 2018, -1316, 4064, 398} }; static const float syn_window[111]={ 0.576690972, 0.580838025, 0.585013986, 0.589219987, 0.59345597, 0.597723007, 0.602020264, 0.606384277, 0.610748291, 0.615142822, 0.619598389, 0.624084473, 0.628570557, 0.633117676, 0.637695313, 0.642272949, 0.646911621, 0.651580811, 0.656280518, 0.66104126, 0.665802002, 0.670593262, 0.675445557, 0.680328369, 0.685241699, 0.690185547, 0.695159912, 0.700164795, 0.705230713, 0.710327148, 0.715454102, 0.720611572, 0.725830078, 0.731048584, 0.736328125, 0.741638184, 0.747009277, 0.752380371, 0.7578125, 0.763305664, 0.768798828, 0.774353027, 0.779937744, 0.785583496, 0.791229248, 0.796936035, 0.802703857, 0.808502197, 0.814331055, 0.820220947, 0.826141357, 0.832092285, 0.838104248, 0.844146729, 0.850250244, 0.856384277, 0.862548828, 0.868774414, 0.875061035, 0.881378174, 0.88772583, 0.894134521, 0.900604248, 0.907104492, 0.913635254, 0.920227051, 0.926879883, 0.933563232, 0.940307617, 0.94708252, 0.953918457, 0.96081543, 0.96774292, 0.974731445, 0.981781006, 0.988861084, 0.994842529, 0.998565674, 0.999969482, 0.99911499, 0.996002197, 0.990600586, 0.982910156, 0.973022461, 0.960876465, 0.946533203, 0.930053711, 0.911437988, 0.89074707, 0.868041992, 0.843322754, 0.816680908, 0.788208008, 0.757904053, 0.725891113, 0.692199707, 0.656921387, 0.620178223, 0.582000732, 0.542480469, 0.501739502, 0.459838867, 0.416900635, 0.373016357, 0.328277588, 0.282775879, 0.236663818, 0.189971924, 0.142852783, 0.0954284668,0.0477600098 }; static const float gain_window[38]={ 0.505699992, 0.524200022, 0.54339999, 0.563300014, 0.583953857, 0.60534668, 0.627502441, 0.650482178, 0.674316406, 0.699005127, 0.724578857, 0.75112915, 0.778625488, 0.807128906, 0.836669922, 0.86730957, 0.899078369, 0.932006836, 0.961486816, 0.982757568, 0.995635986, 1, 0.995819092, 0.983154297, 0.96206665, 0.932769775, 0.895507813, 0.850585938, 0.798400879, 0.739379883, 0.674072266, 0.602996826, 0.526763916, 0.446014404, 0.361480713, 0.273834229, 0.183868408, 0.0923461914 }; /** synthesis bandwidth broadening table */ static const float syn_bw_tab[36]={ 0.98828125, 0.976699829, 0.965254128, 0.953942537, 0.942763507, 0.931715488, 0.920796931, 0.910006344, 0.899342179, 0.888803005, 0.878387332, 0.868093729, 0.857920766, 0.847867012, 0.837931097, 0.828111589, 0.818407178, 0.808816493, 0.799338162, 0.789970934, 0.780713439, 0.771564424, 0.762522638, 0.753586829, 0.744755745, 0.736028135, 0.727402806, 0.718878567, 0.710454226, 0.702128589, 0.693900526, 0.685768902, 0.677732527, 0.669790328, 0.66194123, 0.654184103 }; /** gain bandwidth broadening table */ static const float gain_bw_tab[10]={ 0.90625, 0.821289063, 0.74432373, 0.674499512, 0.61126709, 0.553955078, 0.50201416, 0.454956055, 0.41229248, 0.373657227 }; #endif /* AVCODEC_RA288_H */
123linslouis-android-video-cutter
jni/libavcodec/ra288.h
C
asf20
9,083
/* * PCM codecs * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * PCM codecs */ #include "avcodec.h" #include "libavutil/common.h" /* for av_reverse */ #include "bytestream.h" #include "pcm_tablegen.h" #define MAX_CHANNELS 64 static av_cold int pcm_encode_init(AVCodecContext *avctx) { avctx->frame_size = 1; switch(avctx->codec->id) { case CODEC_ID_PCM_ALAW: pcm_alaw_tableinit(); break; case CODEC_ID_PCM_MULAW: pcm_ulaw_tableinit(); break; default: break; } avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id); avctx->block_align = avctx->channels * avctx->bits_per_coded_sample/8; avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; } static av_cold int pcm_encode_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); return 0; } /** * Write PCM samples macro * @param type Datatype of native machine format * @param endian bytestream_put_xxx() suffix * @param src Source pointer (variable name) * @param dst Destination pointer (variable name) * @param n Total number of samples (variable name) * @param shift Bitshift (bits) * @param offset Sample value offset */ #define ENCODE(type, endian, src, dst, n, shift, offset) \ samples_##type = (type*)src; \ for(;n>0;n--) { \ register type v = (*samples_##type++ >> shift) + offset; \ bytestream_put_##endian(&dst, v); \ } static int pcm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { int n, sample_size, v; short *samples; unsigned char *dst; uint8_t *srcu8; int16_t *samples_int16_t; int32_t *samples_int32_t; int64_t *samples_int64_t; uint16_t *samples_uint16_t; uint32_t *samples_uint32_t; sample_size = av_get_bits_per_sample(avctx->codec->id)/8; n = buf_size / sample_size; samples = data; dst = frame; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, "invalid sample_fmt\n"); return -1; } switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: ENCODE(uint32_t, le32, samples, dst, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: ENCODE(uint32_t, be32, samples, dst, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: ENCODE(int32_t, le24, samples, dst, n, 8, 0) break; case CODEC_ID_PCM_S24BE: ENCODE(int32_t, be24, samples, dst, n, 8, 0) break; case CODEC_ID_PCM_U24LE: ENCODE(uint32_t, le24, samples, dst, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: ENCODE(uint32_t, be24, samples, dst, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t tmp = av_reverse[(*samples >> 8) & 0xff] + (av_reverse[*samples & 0xff] << 8); tmp <<= 4; // sync flags would go here bytestream_put_be24(&dst, tmp); samples++; } break; case CODEC_ID_PCM_U16LE: ENCODE(uint16_t, le16, samples, dst, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: ENCODE(uint16_t, be16, samples, dst, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: srcu8= data; for(;n>0;n--) { v = *srcu8++; *dst++ = v - 128; } break; #if HAVE_BIGENDIAN case CODEC_ID_PCM_F64LE: ENCODE(int64_t, le64, samples, dst, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: ENCODE(int32_t, le32, samples, dst, n, 0, 0) break; case CODEC_ID_PCM_S16LE: ENCODE(int16_t, le16, samples, dst, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: ENCODE(int64_t, be64, samples, dst, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: ENCODE(int32_t, be32, samples, dst, n, 0, 0) break; case CODEC_ID_PCM_S16BE: ENCODE(int16_t, be16, samples, dst, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* HAVE_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(dst, samples, n*sample_size); dst += n*sample_size; break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { v= *samples++ >> 8; if(v<0) v = -v; else v+= 128; *dst++ = v; } break; case CODEC_ID_PCM_ALAW: for(;n>0;n--) { v = *samples++; *dst++ = linear_to_alaw[(v + 32768) >> 2]; } break; case CODEC_ID_PCM_MULAW: for(;n>0;n--) { v = *samples++; *dst++ = linear_to_ulaw[(v + 32768) >> 2]; } break; default: return -1; } //avctx->frame_size = (dst - frame) / (sample_size * avctx->channels); return dst - frame; } typedef struct PCMDecode { short table[256]; } PCMDecode; static av_cold int pcm_decode_init(AVCodecContext * avctx) { PCMDecode *s = avctx->priv_data; int i; switch(avctx->codec->id) { case CODEC_ID_PCM_ALAW: for(i=0;i<256;i++) s->table[i] = alaw2linear(i); break; case CODEC_ID_PCM_MULAW: for(i=0;i<256;i++) s->table[i] = ulaw2linear(i); break; default: break; } avctx->sample_fmt = avctx->codec->sample_fmts[0]; return 0; } /** * Read PCM samples macro * @param type Datatype of native machine format * @param endian bytestream_get_xxx() endian suffix * @param src Source pointer (variable name) * @param dst Destination pointer (variable name) * @param n Total number of samples (variable name) * @param shift Bitshift (bits) * @param offset Sample value offset */ #define DECODE(type, endian, src, dst, n, shift, offset) \ dst_##type = (type*)dst; \ for(;n>0;n--) { \ register type v = bytestream_get_##endian(&src); \ *dst_##type++ = (v - offset) << shift; \ } \ dst = (short*)dst_##type; static int pcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PCMDecode *s = avctx->priv_data; int sample_size, c, n; short *samples; const uint8_t *src, *src8, *src2[MAX_CHANNELS]; uint8_t *dstu8; int16_t *dst_int16_t; int32_t *dst_int32_t; int64_t *dst_int64_t; uint16_t *dst_uint16_t; uint32_t *dst_uint32_t; samples = data; src = buf; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, "invalid sample_fmt\n"); return -1; } if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){ av_log(avctx, AV_LOG_ERROR, "PCM channels out of bounds\n"); return -1; } sample_size = av_get_bits_per_sample(avctx->codec_id)/8; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ if (CODEC_ID_PCM_DVD == avctx->codec_id) /* 2 samples are interleaved per block in PCM_DVD */ sample_size = avctx->bits_per_coded_sample * 2 / 8; n = avctx->channels * sample_size; if(n && buf_size % n){ if (buf_size < n) { av_log(avctx, AV_LOG_ERROR, "invalid PCM packet\n"); return -1; }else buf_size -= buf_size % n; } buf_size= FFMIN(buf_size, *data_size/2); *data_size=0; n = buf_size/sample_size; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: DECODE(uint32_t, le32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: DECODE(uint32_t, be32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: DECODE(int32_t, le24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_S24BE: DECODE(int32_t, be24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_U24LE: DECODE(uint32_t, le24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: DECODE(uint32_t, be24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here *samples++ = av_reverse[(v >> 8) & 0xff] + (av_reverse[v & 0xff] << 8); } break; case CODEC_ID_PCM_S16LE_PLANAR: n /= avctx->channels; for(c=0;c<avctx->channels;c++) src2[c] = &src[c*n*2]; for(;n>0;n--) for(c=0;c<avctx->channels;c++) *samples++ = bytestream_get_le16(&src2[c]); src = src2[avctx->channels-1]; break; case CODEC_ID_PCM_U16LE: DECODE(uint16_t, le16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: DECODE(uint16_t, be16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: dstu8= (uint8_t*)samples; for(;n>0;n--) { *dstu8++ = *src++ + 128; } samples= (short*)dstu8; break; #if HAVE_BIGENDIAN case CODEC_ID_PCM_F64LE: DECODE(int64_t, le64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: DECODE(int32_t, le32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16LE: DECODE(int16_t, le16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: DECODE(int64_t, be64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: DECODE(int32_t, be32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16BE: DECODE(int16_t, be16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* HAVE_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(samples, src, n*sample_size); src += n*sample_size; samples = (short*)((uint8_t*)data + n*sample_size); break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { int x= *src++; if(x&128) x-= 128; else x = -x; *samples++ = x << 8; } break; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: for(;n>0;n--) { *samples++ = s->table[*src++]; } break; case CODEC_ID_PCM_DVD: dst_int32_t = data; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 20: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 &0xf0) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ &0x0f) << 12); } src = src8; } break; case 24: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); } src = src8; } break; default: av_log(avctx, AV_LOG_ERROR, "PCM DVD unsupported sample depth\n"); return -1; break; } samples = (short *) dst_int32_t; break; default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; } #if CONFIG_ENCODERS #define PCM_ENCODER(id,sample_fmt_,name,long_name_) \ AVCodec name ## _encoder = { \ #name, \ AVMEDIA_TYPE_AUDIO, \ id, \ 0, \ pcm_encode_init, \ pcm_encode_frame, \ pcm_encode_close, \ NULL, \ .sample_fmts = (const enum SampleFormat[]){sample_fmt_,SAMPLE_FMT_NONE}, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ }; #else #define PCM_ENCODER(id,sample_fmt_,name,long_name_) #endif #if CONFIG_DECODERS #define PCM_DECODER(id,sample_fmt_,name,long_name_) \ AVCodec name ## _decoder = { \ #name, \ AVMEDIA_TYPE_AUDIO, \ id, \ sizeof(PCMDecode), \ pcm_decode_init, \ NULL, \ NULL, \ pcm_decode_frame, \ .sample_fmts = (const enum SampleFormat[]){sample_fmt_,SAMPLE_FMT_NONE}, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ }; #else #define PCM_DECODER(id,sample_fmt_,name,long_name_) #endif #define PCM_CODEC(id, sample_fmt_, name, long_name_) \ PCM_ENCODER(id,sample_fmt_,name,long_name_) PCM_DECODER(id,sample_fmt_,name,long_name_) /* Note: Do not forget to add new entries to the Makefile as well. */ PCM_CODEC (CODEC_ID_PCM_ALAW, SAMPLE_FMT_S16, pcm_alaw, "PCM A-law"); PCM_CODEC (CODEC_ID_PCM_DVD, SAMPLE_FMT_S32, pcm_dvd, "PCM signed 20|24-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_F32BE, SAMPLE_FMT_FLT, pcm_f32be, "PCM 32-bit floating point big-endian"); PCM_CODEC (CODEC_ID_PCM_F32LE, SAMPLE_FMT_FLT, pcm_f32le, "PCM 32-bit floating point little-endian"); PCM_CODEC (CODEC_ID_PCM_F64BE, SAMPLE_FMT_DBL, pcm_f64be, "PCM 64-bit floating point big-endian"); PCM_CODEC (CODEC_ID_PCM_F64LE, SAMPLE_FMT_DBL, pcm_f64le, "PCM 64-bit floating point little-endian"); PCM_CODEC (CODEC_ID_PCM_MULAW, SAMPLE_FMT_S16, pcm_mulaw, "PCM mu-law"); PCM_CODEC (CODEC_ID_PCM_S8, SAMPLE_FMT_U8, pcm_s8, "PCM signed 8-bit"); PCM_CODEC (CODEC_ID_PCM_S16BE, SAMPLE_FMT_S16, pcm_s16be, "PCM signed 16-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_S16LE, SAMPLE_FMT_S16, pcm_s16le, "PCM signed 16-bit little-endian"); PCM_DECODER(CODEC_ID_PCM_S16LE_PLANAR, SAMPLE_FMT_S16, pcm_s16le_planar, "PCM 16-bit little-endian planar"); PCM_CODEC (CODEC_ID_PCM_S24BE, SAMPLE_FMT_S32, pcm_s24be, "PCM signed 24-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_S24DAUD, SAMPLE_FMT_S16, pcm_s24daud, "PCM D-Cinema audio signed 24-bit"); PCM_CODEC (CODEC_ID_PCM_S24LE, SAMPLE_FMT_S32, pcm_s24le, "PCM signed 24-bit little-endian"); PCM_CODEC (CODEC_ID_PCM_S32BE, SAMPLE_FMT_S32, pcm_s32be, "PCM signed 32-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_S32LE, SAMPLE_FMT_S32, pcm_s32le, "PCM signed 32-bit little-endian"); PCM_CODEC (CODEC_ID_PCM_U8, SAMPLE_FMT_U8, pcm_u8, "PCM unsigned 8-bit"); PCM_CODEC (CODEC_ID_PCM_U16BE, SAMPLE_FMT_S16, pcm_u16be, "PCM unsigned 16-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_U16LE, SAMPLE_FMT_S16, pcm_u16le, "PCM unsigned 16-bit little-endian"); PCM_CODEC (CODEC_ID_PCM_U24BE, SAMPLE_FMT_S32, pcm_u24be, "PCM unsigned 24-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_U24LE, SAMPLE_FMT_S32, pcm_u24le, "PCM unsigned 24-bit little-endian"); PCM_CODEC (CODEC_ID_PCM_U32BE, SAMPLE_FMT_S32, pcm_u32be, "PCM unsigned 32-bit big-endian"); PCM_CODEC (CODEC_ID_PCM_U32LE, SAMPLE_FMT_S32, pcm_u32le, "PCM unsigned 32-bit little-endian"); PCM_CODEC (CODEC_ID_PCM_ZORK, SAMPLE_FMT_S16, pcm_zork, "PCM Zork");
123linslouis-android-video-cutter
jni/libavcodec/pcm.c
C
asf20
16,815