code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
/* * ADPCM codecs * Copyright (c) 2001-2003 The ffmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "get_bits.h" #include "put_bits.h" #include "bytestream.h" /** * @file * ADPCM codecs. * First version by Francois Revol (revol@free.fr) * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood) * by Mike Melanson (melanson@pcisys.net) * CD-ROM XA ADPCM codec by BERO * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com) * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org) * EA IMA EACS decoder by Peter Ross (pross@xvid.org) * EA IMA SEAD decoder by Peter Ross (pross@xvid.org) * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org) * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com) * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl) * * Features and limitations: * * Reference documents: * http://www.pcisys.net/~melanson/codecs/simpleaudio.html * http://www.geocities.com/SiliconValley/8682/aud3.txt * http://openquicktime.sourceforge.net/plugins.htm * XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html * http://www.cs.ucla.edu/~leec/mediabench/applications.html * SoX source code http://home.sprynet.com/~cbagwell/sox.html * * CD-ROM XA: * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html * readstr http://www.geocities.co.jp/Playtown/2004/ */ #define BLKSIZE 1024 /* step_table[] and index_table[] are from the ADPCM reference source */ /* This is the index table: */ static const int index_table[16] = { -1, -1, -1, -1, 2, 4, 6, 8, -1, -1, -1, -1, 2, 4, 6, 8, }; /** * This is the step table. Note that many programs use slight deviations from * this table, but such deviations are negligible: */ static const int step_table[89] = { 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 }; /* These are for MS-ADPCM */ /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */ static const int AdaptationTable[] = { 230, 230, 230, 230, 307, 409, 512, 614, 768, 614, 512, 409, 307, 230, 230, 230 }; /** Divided by 4 to fit in 8-bit integers */ static const uint8_t AdaptCoeff1[] = { 64, 128, 0, 48, 60, 115, 98 }; /** Divided by 4 to fit in 8-bit integers */ static const int8_t AdaptCoeff2[] = { 0, -64, 0, 16, 0, -52, -58 }; /* These are for CD-ROM XA ADPCM */ static const int xa_adpcm_table[5][2] = { { 0, 0 }, { 60, 0 }, { 115, -52 }, { 98, -55 }, { 122, -60 } }; static const int ea_adpcm_table[] = { 0, 240, 460, 392, 0, 0, -208, -220, 0, 1, 3, 4, 7, 8, 10, 11, 0, -1, -3, -4 }; // padded to zero where table size is less then 16 static const int swf_index_tables[4][16] = { /*2*/ { -1, 2 }, /*3*/ { -1, -1, 2, 4 }, /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 }, /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } }; static const int yamaha_indexscale[] = { 230, 230, 230, 230, 307, 409, 512, 614, 230, 230, 230, 230, 307, 409, 512, 614 }; static const int yamaha_difflookup[] = { 1, 3, 5, 7, 9, 11, 13, 15, -1, -3, -5, -7, -9, -11, -13, -15 }; /* end of tables */ typedef struct ADPCMChannelStatus { int predictor; short int step_index; int step; /* for encoding */ int prev_sample; /* MS version */ short sample1; short sample2; int coeff1; int coeff2; int idelta; } ADPCMChannelStatus; typedef struct ADPCMContext { ADPCMChannelStatus status[6]; } ADPCMContext; /* XXX: implement encoding */ #if CONFIG_ENCODERS static av_cold int adpcm_encode_init(AVCodecContext *avctx) { uint8_t *extradata; int i; if (avctx->channels > 2) return -1; /* only stereo or mono =) */ if(avctx->trellis && (unsigned)avctx->trellis > 16U){ av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); return -1; } switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_WAV: avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ /* and we have 4 bytes per channel overhead */ avctx->block_align = BLKSIZE; /* seems frame_size isn't taken into account... have to buffer the samples :-( */ break; case CODEC_ID_ADPCM_IMA_QT: avctx->frame_size = 64; avctx->block_align = 34 * avctx->channels; break; case CODEC_ID_ADPCM_MS: avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ /* and we have 7 bytes per channel overhead */ avctx->block_align = BLKSIZE; avctx->extradata_size = 32; extradata = avctx->extradata = av_malloc(avctx->extradata_size); if (!extradata) return AVERROR(ENOMEM); bytestream_put_le16(&extradata, avctx->frame_size); bytestream_put_le16(&extradata, 7); /* wNumCoef */ for (i = 0; i < 7; i++) { bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4); bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4); } break; case CODEC_ID_ADPCM_YAMAHA: avctx->frame_size = BLKSIZE * avctx->channels; avctx->block_align = BLKSIZE; break; case CODEC_ID_ADPCM_SWF: if (avctx->sample_rate != 11025 && avctx->sample_rate != 22050 && avctx->sample_rate != 44100) { av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); return -1; } avctx->frame_size = 512 * (avctx->sample_rate / 11025); break; default: return -1; } avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; } static av_cold int adpcm_encode_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); return 0; } static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) { int delta = sample - c->prev_sample; int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); c->prev_sample = av_clip_int16(c->prev_sample); c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); return nibble; } static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) { int predictor, nibble, bias; predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; nibble= sample - predictor; if(nibble>=0) bias= c->idelta/2; else bias=-c->idelta/2; nibble= (nibble + bias) / c->idelta; nibble= av_clip(nibble, -8, 7)&0x0F; predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; c->sample2 = c->sample1; c->sample1 = av_clip_int16(predictor); c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; if (c->idelta < 16) c->idelta = 16; return nibble; } static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) { int nibble, delta; if(!c->step) { c->predictor = 0; c->step = 127; } delta = sample - c->predictor; nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8); c->predictor = av_clip_int16(c->predictor); c->step = (c->step * yamaha_indexscale[nibble]) >> 8; c->step = av_clip(c->step, 127, 24567); return nibble; } typedef struct TrellisPath { int nibble; int prev; } TrellisPath; typedef struct TrellisNode { uint32_t ssd; int path; int sample1; int sample2; int step; } TrellisNode; static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, uint8_t *dst, ADPCMChannelStatus *c, int n) { #define FREEZE_INTERVAL 128 //FIXME 6% faster if frontier is a compile-time constant const int frontier = 1 << avctx->trellis; const int stride = avctx->channels; const int version = avctx->codec->id; const int max_paths = frontier*FREEZE_INTERVAL; TrellisPath paths[max_paths], *p; TrellisNode node_buf[2][frontier]; TrellisNode *nodep_buf[2][frontier]; TrellisNode **nodes = nodep_buf[0]; // nodes[] is always sorted by .ssd TrellisNode **nodes_next = nodep_buf[1]; int pathn = 0, froze = -1, i, j, k; assert(!(max_paths&(max_paths-1))); memset(nodep_buf, 0, sizeof(nodep_buf)); nodes[0] = &node_buf[1][0]; nodes[0]->ssd = 0; nodes[0]->path = 0; nodes[0]->step = c->step_index; nodes[0]->sample1 = c->sample1; nodes[0]->sample2 = c->sample2; if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) nodes[0]->sample1 = c->prev_sample; if(version == CODEC_ID_ADPCM_MS) nodes[0]->step = c->idelta; if(version == CODEC_ID_ADPCM_YAMAHA) { if(c->step == 0) { nodes[0]->step = 127; nodes[0]->sample1 = 0; } else { nodes[0]->step = c->step; nodes[0]->sample1 = c->predictor; } } for(i=0; i<n; i++) { TrellisNode *t = node_buf[i&1]; TrellisNode **u; int sample = samples[i*stride]; memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); for(j=0; j<frontier && nodes[j]; j++) { // higher j have higher ssd already, so they're unlikely to use a suboptimal next sample too const int range = (j < frontier/2) ? 1 : 0; const int step = nodes[j]->step; int nidx; if(version == CODEC_ID_ADPCM_MS) { const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; const int div = (sample - predictor) / step; const int nmin = av_clip(div-range, -8, 6); const int nmax = av_clip(div+range, -7, 7); for(nidx=nmin; nidx<=nmax; nidx++) { const int nibble = nidx & 0xf; int dec_sample = predictor + nidx * step; #define STORE_NODE(NAME, STEP_INDEX)\ int d;\ uint32_t ssd;\ dec_sample = av_clip_int16(dec_sample);\ d = sample - dec_sample;\ ssd = nodes[j]->ssd + d*d;\ if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\ continue;\ /* Collapse any two states with the same previous sample value. \ * One could also distinguish states by step and by 2nd to last * sample, but the effects of that are negligible. */\ for(k=0; k<frontier && nodes_next[k]; k++) {\ if(dec_sample == nodes_next[k]->sample1) {\ assert(ssd >= nodes_next[k]->ssd);\ goto next_##NAME;\ }\ }\ for(k=0; k<frontier; k++) {\ if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\ TrellisNode *u = nodes_next[frontier-1];\ if(!u) {\ assert(pathn < max_paths);\ u = t++;\ u->path = pathn++;\ }\ u->ssd = ssd;\ u->step = STEP_INDEX;\ u->sample2 = nodes[j]->sample1;\ u->sample1 = dec_sample;\ paths[u->path].nibble = nibble;\ paths[u->path].prev = nodes[j]->path;\ memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\ nodes_next[k] = u;\ break;\ }\ }\ next_##NAME:; STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); } } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ const int predictor = nodes[j]->sample1;\ const int div = (sample - predictor) * 4 / STEP_TABLE;\ int nmin = av_clip(div-range, -7, 6);\ int nmax = av_clip(div+range, -6, 7);\ if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ if(nmax<0) nmax--;\ for(nidx=nmin; nidx<=nmax; nidx++) {\ const int nibble = nidx<0 ? 7-nidx : nidx;\ int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ STORE_NODE(NAME, STEP_INDEX);\ } LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); } else { //CODEC_ID_ADPCM_YAMAHA LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); #undef LOOP_NODES #undef STORE_NODE } } u = nodes; nodes = nodes_next; nodes_next = u; // prevent overflow if(nodes[0]->ssd > (1<<28)) { for(j=1; j<frontier && nodes[j]; j++) nodes[j]->ssd -= nodes[0]->ssd; nodes[0]->ssd = 0; } // merge old paths to save memory if(i == froze + FREEZE_INTERVAL) { p = &paths[nodes[0]->path]; for(k=i; k>froze; k--) { dst[k] = p->nibble; p = &paths[p->prev]; } froze = i; pathn = 0; // other nodes might use paths that don't coincide with the frozen one. // checking which nodes do so is too slow, so just kill them all. // this also slightly improves quality, but I don't know why. memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); } } p = &paths[nodes[0]->path]; for(i=n-1; i>froze; i--) { dst[i] = p->nibble; p = &paths[p->prev]; } c->predictor = nodes[0]->sample1; c->sample1 = nodes[0]->sample1; c->sample2 = nodes[0]->sample2; c->step_index = nodes[0]->step; c->step = nodes[0]->step; c->idelta = nodes[0]->step; } static int adpcm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { int n, i, st; short *samples; unsigned char *dst; ADPCMContext *c = avctx->priv_data; dst = frame; samples = (short *)data; st= avctx->channels == 2; /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_WAV: n = avctx->frame_size / 8; c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ bytestream_put_le16(&dst, c->status[0].prev_sample); *dst++ = (unsigned char)c->status[0].step_index; *dst++ = 0; /* unknown */ samples++; if (avctx->channels == 2) { c->status[1].prev_sample = (signed short)samples[0]; /* c->status[1].step_index = 0; */ bytestream_put_le16(&dst, c->status[1].prev_sample); *dst++ = (unsigned char)c->status[1].step_index; *dst++ = 0; samples++; } /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ if(avctx->trellis > 0) { uint8_t buf[2][n*8]; adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8); if(avctx->channels == 2) adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8); for(i=0; i<n; i++) { *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4); *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4); *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4); *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4); if (avctx->channels == 2) { *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4); *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4); *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4); *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4); } } } else for (; n>0; n--) { *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; dst++; *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; dst++; *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; dst++; *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; dst++; /* right channel */ if (avctx->channels == 2) { *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; dst++; *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; dst++; *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; dst++; *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; dst++; } samples += 8 * avctx->channels; } break; case CODEC_ID_ADPCM_IMA_QT: { int ch, i; PutBitContext pb; init_put_bits(&pb, dst, buf_size*8); for(ch=0; ch<avctx->channels; ch++){ put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); put_bits(&pb, 7, c->status[ch].step_index); if(avctx->trellis > 0) { uint8_t buf[64]; adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); for(i=0; i<64; i++) put_bits(&pb, 4, buf[i^1]); c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F; } else { for (i=0; i<64; i+=2){ int t1, t2; t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); put_bits(&pb, 4, t2); put_bits(&pb, 4, t1); } c->status[ch].prev_sample &= ~0x7F; } } dst += put_bits_count(&pb)>>3; break; } case CODEC_ID_ADPCM_SWF: { int i; PutBitContext pb; init_put_bits(&pb, dst, buf_size*8); n = avctx->frame_size-1; //Store AdpcmCodeSize put_bits(&pb, 2, 2); //Set 4bits flash adpcm format //Init the encoder state for(i=0; i<avctx->channels; i++){ c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits put_sbits(&pb, 16, samples[i]); put_bits(&pb, 6, c->status[i].step_index); c->status[i].prev_sample = (signed short)samples[i]; } if(avctx->trellis > 0) { uint8_t buf[2][n]; adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n); if (avctx->channels == 2) adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n); for(i=0; i<n; i++) { put_bits(&pb, 4, buf[0][i]); if (avctx->channels == 2) put_bits(&pb, 4, buf[1][i]); } } else { for (i=1; i<avctx->frame_size; i++) { put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); if (avctx->channels == 2) put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); } } flush_put_bits(&pb); dst += put_bits_count(&pb)>>3; break; } case CODEC_ID_ADPCM_MS: for(i=0; i<avctx->channels; i++){ int predictor=0; *dst++ = predictor; c->status[i].coeff1 = AdaptCoeff1[predictor]; c->status[i].coeff2 = AdaptCoeff2[predictor]; } for(i=0; i<avctx->channels; i++){ if (c->status[i].idelta < 16) c->status[i].idelta = 16; bytestream_put_le16(&dst, c->status[i].idelta); } for(i=0; i<avctx->channels; i++){ c->status[i].sample2= *samples++; } for(i=0; i<avctx->channels; i++){ c->status[i].sample1= *samples++; bytestream_put_le16(&dst, c->status[i].sample1); } for(i=0; i<avctx->channels; i++) bytestream_put_le16(&dst, c->status[i].sample2); if(avctx->trellis > 0) { int n = avctx->block_align - 7*avctx->channels; uint8_t buf[2][n]; if(avctx->channels == 1) { n *= 2; adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); for(i=0; i<n; i+=2) *dst++ = (buf[0][i] << 4) | buf[0][i+1]; } else { adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); for(i=0; i<n; i++) *dst++ = (buf[0][i] << 4) | buf[1][i]; } } else for(i=7*avctx->channels; i<avctx->block_align; i++) { int nibble; nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); *dst++ = nibble; } break; case CODEC_ID_ADPCM_YAMAHA: n = avctx->frame_size / 2; if(avctx->trellis > 0) { uint8_t buf[2][n*2]; n *= 2; if(avctx->channels == 1) { adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); for(i=0; i<n; i+=2) *dst++ = buf[0][i] | (buf[0][i+1] << 4); } else { adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); for(i=0; i<n; i++) *dst++ = buf[0][i] | (buf[1][i] << 4); } } else for (n *= avctx->channels; n>0; n--) { int nibble; nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; *dst++ = nibble; } break; default: return -1; } return dst - frame; } #endif //CONFIG_ENCODERS static av_cold int adpcm_decode_init(AVCodecContext * avctx) { ADPCMContext *c = avctx->priv_data; unsigned int max_channels = 2; switch(avctx->codec->id) { case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_EA_R3: max_channels = 6; break; } if(avctx->channels > max_channels){ return -1; } switch(avctx->codec->id) { case CODEC_ID_ADPCM_CT: c->status[0].step = c->status[1].step = 511; break; case CODEC_ID_ADPCM_IMA_WS: if (avctx->extradata && avctx->extradata_size == 2 * 4) { c->status[0].predictor = AV_RL32(avctx->extradata); c->status[1].predictor = AV_RL32(avctx->extradata + 4); } break; default: break; } avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift) { int step_index; int predictor; int sign, delta, diff, step; step = step_table[c->step_index]; step_index = c->step_index + index_table[(unsigned)nibble]; if (step_index < 0) step_index = 0; else if (step_index > 88) step_index = 88; sign = nibble & 8; delta = nibble & 7; /* perform direct multiplication instead of series of jumps proposed by * the reference ADPCM implementation since modern CPUs can do the mults * quickly enough */ diff = ((2 * delta + 1) * step) >> shift; predictor = c->predictor; if (sign) predictor -= diff; else predictor += diff; c->predictor = av_clip_int16(predictor); c->step_index = step_index; return (short)c->predictor; } static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) { int predictor; predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; c->sample2 = c->sample1; c->sample1 = av_clip_int16(predictor); c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; if (c->idelta < 16) c->idelta = 16; return c->sample1; } static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) { int sign, delta, diff; int new_step; sign = nibble & 8; delta = nibble & 7; /* perform direct multiplication instead of series of jumps proposed by * the reference ADPCM implementation since modern CPUs can do the mults * quickly enough */ diff = ((2 * delta + 1) * c->step) >> 3; /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */ c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); c->predictor = av_clip_int16(c->predictor); /* calculate new step and clamp it to range 511..32767 */ new_step = (AdaptationTable[nibble & 7] * c->step) >> 8; c->step = av_clip(new_step, 511, 32767); return (short)c->predictor; } static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift) { int sign, delta, diff; sign = nibble & (1<<(size-1)); delta = nibble & ((1<<(size-1))-1); diff = delta << (7 + c->step + shift); /* clamp result */ c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256); /* calculate new step */ if (delta >= (2*size - 3) && c->step < 3) c->step++; else if (delta == 0 && c->step > 0) c->step--; return (short) c->predictor; } static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble) { if(!c->step) { c->predictor = 0; c->step = 127; } c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; c->predictor = av_clip_int16(c->predictor); c->step = (c->step * yamaha_indexscale[nibble]) >> 8; c->step = av_clip(c->step, 127, 24567); return c->predictor; } static void xa_decode(short *out, const unsigned char *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc) { int i, j; int shift,filter,f0,f1; int s_1,s_2; int d,s,t; for(i=0;i<4;i++) { shift = 12 - (in[4+i*2] & 15); filter = in[4+i*2] >> 4; f0 = xa_adpcm_table[filter][0]; f1 = xa_adpcm_table[filter][1]; s_1 = left->sample1; s_2 = left->sample2; for(j=0;j<28;j++) { d = in[16+i+j*4]; t = (signed char)(d<<4)>>4; s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); s_2 = s_1; s_1 = av_clip_int16(s); *out = s_1; out += inc; } if (inc==2) { /* stereo */ left->sample1 = s_1; left->sample2 = s_2; s_1 = right->sample1; s_2 = right->sample2; out = out + 1 - 28*2; } shift = 12 - (in[5+i*2] & 15); filter = in[5+i*2] >> 4; f0 = xa_adpcm_table[filter][0]; f1 = xa_adpcm_table[filter][1]; for(j=0;j<28;j++) { d = in[16+i+j*4]; t = (signed char)d >> 4; s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); s_2 = s_1; s_1 = av_clip_int16(s); *out = s_1; out += inc; } if (inc==2) { /* stereo */ right->sample1 = s_1; right->sample2 = s_2; out -= 1; } else { left->sample1 = s_1; left->sample2 = s_2; } } } /* DK3 ADPCM support macro */ #define DK3_GET_NEXT_NIBBLE() \ if (decode_top_nibble_next) \ { \ nibble = last_byte >> 4; \ decode_top_nibble_next = 0; \ } \ else \ { \ last_byte = *src++; \ if (src >= buf + buf_size) break; \ nibble = last_byte & 0x0F; \ decode_top_nibble_next = 1; \ } static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ADPCMContext *c = avctx->priv_data; ADPCMChannelStatus *cs; int n, m, channel, i; int block_predictor[2]; short *samples; short *samples_end; const uint8_t *src; int st; /* stereo */ /* DK3 ADPCM accounting variables */ unsigned char last_byte = 0; unsigned char nibble; int decode_top_nibble_next = 0; int diff_channel; /* EA ADPCM state variables */ uint32_t samples_in_chunk; int32_t previous_left_sample, previous_right_sample; int32_t current_left_sample, current_right_sample; int32_t next_left_sample, next_right_sample; int32_t coeff1l, coeff2l, coeff1r, coeff2r; uint8_t shift_left, shift_right; int count1, count2; int coeff[2][2], shift[2];//used in EA MAXIS ADPCM if (!buf_size) return 0; //should protect all 4bit ADPCM variants //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels // if(*data_size/4 < buf_size + 8) return -1; samples = data; samples_end= samples + *data_size/2; *data_size= 0; src = buf; st = avctx->channels == 2 ? 1 : 0; switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_QT: n = buf_size - 2*avctx->channels; for (channel = 0; channel < avctx->channels; channel++) { cs = &(c->status[channel]); /* (pppppp) (piiiiiii) */ /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ cs->predictor = (*src++) << 8; cs->predictor |= (*src & 0x80); cs->predictor &= 0xFF80; /* sign extension */ if(cs->predictor & 0x8000) cs->predictor -= 0x10000; cs->predictor = av_clip_int16(cs->predictor); cs->step_index = (*src++) & 0x7F; if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); cs->step_index = 88; } cs->step = step_table[cs->step_index]; samples = (short*)data + channel; for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3); samples += avctx->channels; src ++; } } if (st) samples--; break; case CODEC_ID_ADPCM_IMA_WAV: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1; for(i=0; i<avctx->channels; i++){ cs = &(c->status[i]); cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src); cs->step_index = *src++; if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); cs->step_index = 88; } if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */ } while(src < buf + buf_size){ for(m=0; m<4; m++){ for(i=0; i<=st; i++) *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); for(i=0; i<=st; i++) *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); src++; } src += 4*st; } break; case CODEC_ID_ADPCM_4XM: cs = &(c->status[0]); c->status[0].predictor= (int16_t)bytestream_get_le16(&src); if(st){ c->status[1].predictor= (int16_t)bytestream_get_le16(&src); } c->status[0].step_index= (int16_t)bytestream_get_le16(&src); if(st){ c->status[1].step_index= (int16_t)bytestream_get_le16(&src); } if (cs->step_index < 0) cs->step_index = 0; if (cs->step_index > 88) cs->step_index = 88; m= (buf_size - (src - buf))>>st; for(i=0; i<m; i++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); } src += m<<st; break; case CODEC_ID_ADPCM_MS: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; n = buf_size - 7 * avctx->channels; if (n < 0) return -1; block_predictor[0] = av_clip(*src++, 0, 6); block_predictor[1] = 0; if (st) block_predictor[1] = av_clip(*src++, 0, 6); c->status[0].idelta = (int16_t)bytestream_get_le16(&src); if (st){ c->status[1].idelta = (int16_t)bytestream_get_le16(&src); } c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; c->status[0].sample1 = bytestream_get_le16(&src); if (st) c->status[1].sample1 = bytestream_get_le16(&src); c->status[0].sample2 = bytestream_get_le16(&src); if (st) c->status[1].sample2 = bytestream_get_le16(&src); *samples++ = c->status[0].sample2; if (st) *samples++ = c->status[1].sample2; *samples++ = c->status[0].sample1; if (st) *samples++ = c->status[1].sample1; for(;n>0;n--) { *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); src ++; } break; case CODEC_ID_ADPCM_IMA_DK4: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; c->status[0].predictor = (int16_t)bytestream_get_le16(&src); c->status[0].step_index = *src++; src++; *samples++ = c->status[0].predictor; if (st) { c->status[1].predictor = (int16_t)bytestream_get_le16(&src); c->status[1].step_index = *src++; src++; *samples++ = c->status[1].predictor; } while (src < buf + buf_size) { /* take care of the top nibble (always left or mono channel) */ *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 3); /* take care of the bottom nibble, which is right sample for * stereo, or another mono sample */ if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); else *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); src++; } break; case CODEC_ID_ADPCM_IMA_DK3: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; if(buf_size + 16 > (samples_end - samples)*3/8) return -1; c->status[0].predictor = (int16_t)AV_RL16(src + 10); c->status[1].predictor = (int16_t)AV_RL16(src + 12); c->status[0].step_index = src[14]; c->status[1].step_index = src[15]; /* sign extend the predictors */ src += 16; diff_channel = c->status[1].predictor; /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when * the buffer is consumed */ while (1) { /* for this algorithm, c->status[0] is the sum channel and * c->status[1] is the diff channel */ /* process the first predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the diff channel predictor */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[1], nibble, 3); /* process the first pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; /* process the second predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the second pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; } break; case CODEC_ID_ADPCM_IMA_ISS: c->status[0].predictor = (int16_t)AV_RL16(src + 0); c->status[0].step_index = src[2]; src += 4; if(st) { c->status[1].predictor = (int16_t)AV_RL16(src + 0); c->status[1].step_index = src[2]; src += 4; } while (src < buf + buf_size) { if (st) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); } else { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4 , 3); } src++; } break; case CODEC_ID_ADPCM_IMA_WS: /* no per-block initialization; just start decoding the data */ while (src < buf + buf_size) { if (st) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); } else { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); } src++; } break; case CODEC_ID_ADPCM_XA: while (buf_size >= 128) { xa_decode(samples, src, &c->status[0], &c->status[1], avctx->channels); src += 128; samples += 28 * 8; buf_size -= 128; } break; case CODEC_ID_ADPCM_IMA_EA_EACS: samples_in_chunk = bytestream_get_le32(&src) >> (1-st); if (samples_in_chunk > buf_size-4-(8<<st)) { src += buf_size - 4; break; } for (i=0; i<=st; i++) c->status[i].step_index = bytestream_get_le32(&src); for (i=0; i<=st; i++) c->status[i].predictor = bytestream_get_le32(&src); for (; samples_in_chunk; samples_in_chunk--, src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); } break; case CODEC_ID_ADPCM_IMA_EA_SEAD: for (; src < buf+buf_size; src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); } break; case CODEC_ID_ADPCM_EA: if (buf_size < 4 || AV_RL32(src) >= ((buf_size - 12) * 2)) { src += buf_size; break; } samples_in_chunk = AV_RL32(src); src += 4; current_left_sample = (int16_t)bytestream_get_le16(&src); previous_left_sample = (int16_t)bytestream_get_le16(&src); current_right_sample = (int16_t)bytestream_get_le16(&src); previous_right_sample = (int16_t)bytestream_get_le16(&src); for (count1 = 0; count1 < samples_in_chunk/28;count1++) { coeff1l = ea_adpcm_table[ *src >> 4 ]; coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; coeff1r = ea_adpcm_table[*src & 0x0F]; coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; src++; shift_left = (*src >> 4 ) + 8; shift_right = (*src & 0x0F) + 8; src++; for (count2 = 0; count2 < 28; count2++) { next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left; next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right; src++; next_left_sample = (next_left_sample + (current_left_sample * coeff1l) + (previous_left_sample * coeff2l) + 0x80) >> 8; next_right_sample = (next_right_sample + (current_right_sample * coeff1r) + (previous_right_sample * coeff2r) + 0x80) >> 8; previous_left_sample = current_left_sample; current_left_sample = av_clip_int16(next_left_sample); previous_right_sample = current_right_sample; current_right_sample = av_clip_int16(next_right_sample); *samples++ = (unsigned short)current_left_sample; *samples++ = (unsigned short)current_right_sample; } } if (src - buf == buf_size - 2) src += 2; // Skip terminating 0x0000 break; case CODEC_ID_ADPCM_EA_MAXIS_XA: for(channel = 0; channel < avctx->channels; channel++) { for (i=0; i<2; i++) coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; shift[channel] = (*src & 0x0F) + 8; src++; } for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) { for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ for(channel = 0; channel < avctx->channels; channel++) { int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel]; sample = (sample + c->status[channel].sample1 * coeff[channel][0] + c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; c->status[channel].sample2 = c->status[channel].sample1; c->status[channel].sample1 = av_clip_int16(sample); *samples++ = c->status[channel].sample1; } } src+=avctx->channels; } break; case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_EA_R3: { /* channel numbering 2chan: 0=fl, 1=fr 4chan: 0=fl, 1=rl, 2=fr, 3=rr 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; int32_t previous_sample, current_sample, next_sample; int32_t coeff1, coeff2; uint8_t shift; unsigned int channel; uint16_t *samplesC; const uint8_t *srcC; const uint8_t *src_end = buf + buf_size; samples_in_chunk = (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) / 28; if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) || 28*samples_in_chunk*avctx->channels > samples_end-samples) { src += buf_size - 4; break; } for (channel=0; channel<avctx->channels; channel++) { int32_t offset = (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) + (avctx->channels-channel-1) * 4; if ((offset < 0) || (offset >= src_end - src - 4)) break; srcC = src + offset; samplesC = samples + channel; if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { current_sample = (int16_t)bytestream_get_le16(&srcC); previous_sample = (int16_t)bytestream_get_le16(&srcC); } else { current_sample = c->status[channel].predictor; previous_sample = c->status[channel].prev_sample; } for (count1=0; count1<samples_in_chunk; count1++) { if (*srcC == 0xEE) { /* only seen in R2 and R3 */ srcC++; if (srcC > src_end - 30*2) break; current_sample = (int16_t)bytestream_get_be16(&srcC); previous_sample = (int16_t)bytestream_get_be16(&srcC); for (count2=0; count2<28; count2++) { *samplesC = (int16_t)bytestream_get_be16(&srcC); samplesC += avctx->channels; } } else { coeff1 = ea_adpcm_table[ *srcC>>4 ]; coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; shift = (*srcC++ & 0x0F) + 8; if (srcC > src_end - 14) break; for (count2=0; count2<28; count2++) { if (count2 & 1) next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift; else next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift; next_sample += (current_sample * coeff1) + (previous_sample * coeff2); next_sample = av_clip_int16(next_sample >> 8); previous_sample = current_sample; current_sample = next_sample; *samplesC = current_sample; samplesC += avctx->channels; } } } if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { c->status[channel].predictor = current_sample; c->status[channel].prev_sample = previous_sample; } } src = src + buf_size - (4 + 4*avctx->channels); samples += 28 * samples_in_chunk * avctx->channels; break; } case CODEC_ID_ADPCM_EA_XAS: if (samples_end-samples < 32*4*avctx->channels || buf_size < (4+15)*4*avctx->channels) { src += buf_size; break; } for (channel=0; channel<avctx->channels; channel++) { int coeff[2][4], shift[4]; short *s2, *s = &samples[channel]; for (n=0; n<4; n++, s+=32*avctx->channels) { for (i=0; i<2; i++) coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; shift[n] = (src[2]&0x0F) + 8; for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) s2[0] = (src[0]&0xF0) + (src[1]<<8); } for (m=2; m<32; m+=2) { s = &samples[m*avctx->channels + channel]; for (n=0; n<4; n++, src++, s+=32*avctx->channels) { for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n]; int pred = s2[-1*avctx->channels] * coeff[0][n] + s2[-2*avctx->channels] * coeff[1][n]; s2[0] = av_clip_int16((level + pred + 0x80) >> 8); } } } } samples += 32*4*avctx->channels; break; case CODEC_ID_ADPCM_IMA_AMV: case CODEC_ID_ADPCM_IMA_SMJPEG: c->status[0].predictor = (int16_t)bytestream_get_le16(&src); c->status[0].step_index = bytestream_get_le16(&src); if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) src+=4; while (src < buf + buf_size) { char hi, lo; lo = *src & 0x0F; hi = *src >> 4; if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) FFSWAP(char, hi, lo); *samples++ = adpcm_ima_expand_nibble(&c->status[0], lo, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], hi, 3); src++; } break; case CODEC_ID_ADPCM_CT: while (src < buf + buf_size) { if (st) { *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] >> 4); *samples++ = adpcm_ct_expand_nibble(&c->status[1], src[0] & 0x0F); } else { *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] >> 4); *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] & 0x0F); } src++; } break; case CODEC_ID_ADPCM_SBPRO_4: case CODEC_ID_ADPCM_SBPRO_3: case CODEC_ID_ADPCM_SBPRO_2: if (!c->status[0].step_index) { /* the first byte is a raw sample */ *samples++ = 128 * (*src++ - 0x80); if (st) *samples++ = 128 * (*src++ - 0x80); c->status[0].step_index = 1; } if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { while (src < buf + buf_size) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 4, 4, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x0F, 4, 0); src++; } } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { while (src < buf + buf_size && samples + 2 < samples_end) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 5 , 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x07, 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] & 0x03, 2, 0); src++; } } else { while (src < buf + buf_size && samples + 3 < samples_end) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 6 , 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], (src[0] >> 4) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x03, 2, 2); src++; } } break; case CODEC_ID_ADPCM_SWF: { GetBitContext gb; const int *table; int k0, signmask, nb_bits, count; int size = buf_size*8; init_get_bits(&gb, buf, size); //read bits & initial values nb_bits = get_bits(&gb, 2)+2; //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits); table = swf_index_tables[nb_bits-2]; k0 = 1 << (nb_bits-2); signmask = 1 << (nb_bits-1); while (get_bits_count(&gb) <= size - 22*avctx->channels) { for (i = 0; i < avctx->channels; i++) { *samples++ = c->status[i].predictor = get_sbits(&gb, 16); c->status[i].step_index = get_bits(&gb, 6); } for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { int i; for (i = 0; i < avctx->channels; i++) { // similar to IMA adpcm int delta = get_bits(&gb, nb_bits); int step = step_table[c->status[i].step_index]; long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 int k = k0; do { if (delta & k) vpdiff += step; step >>= 1; k >>= 1; } while(k); vpdiff += step; if (delta & signmask) c->status[i].predictor -= vpdiff; else c->status[i].predictor += vpdiff; c->status[i].step_index += table[delta & (~signmask)]; c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); c->status[i].predictor = av_clip_int16(c->status[i].predictor); *samples++ = c->status[i].predictor; if (samples >= samples_end) { av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); return -1; } } } } src += buf_size; break; } case CODEC_ID_ADPCM_YAMAHA: while (src < buf + buf_size) { if (st) { *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], src[0] & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[1], src[0] >> 4 ); } else { *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], src[0] & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], src[0] >> 4 ); } src++; } break; case CODEC_ID_ADPCM_THP: { int table[2][16]; unsigned int samplecnt; int prev[2][2]; int ch; if (buf_size < 80) { av_log(avctx, AV_LOG_ERROR, "frame too small\n"); return -1; } src+=4; samplecnt = bytestream_get_be32(&src); for (i = 0; i < 32; i++) table[0][i] = (int16_t)bytestream_get_be16(&src); /* Initialize the previous sample. */ for (i = 0; i < 4; i++) prev[0][i] = (int16_t)bytestream_get_be16(&src); if (samplecnt >= (samples_end - samples) / (st + 1)) { av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); return -1; } for (ch = 0; ch <= st; ch++) { samples = (unsigned short *) data + ch; /* Read in every sample for this channel. */ for (i = 0; i < samplecnt / 14; i++) { int index = (*src >> 4) & 7; unsigned int exp = 28 - (*src++ & 15); int factor1 = table[ch][index * 2]; int factor2 = table[ch][index * 2 + 1]; /* Decode 14 samples. */ for (n = 0; n < 14; n++) { int32_t sampledat; if(n&1) sampledat= *src++ <<28; else sampledat= (*src&0xF0)<<24; sampledat = ((prev[ch][0]*factor1 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp); *samples = av_clip_int16(sampledat); prev[ch][1] = prev[ch][0]; prev[ch][0] = *samples++; /* In case of stereo, skip one sample, this sample is for the other channel. */ samples += st; } } } /* In the previous loop, in case stereo is used, samples is increased exactly one time too often. */ samples -= st; break; } default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; } #if CONFIG_ENCODERS #define ADPCM_ENCODER(id,name,long_name_) \ AVCodec name ## _encoder = { \ #name, \ AVMEDIA_TYPE_AUDIO, \ id, \ sizeof(ADPCMContext), \ adpcm_encode_init, \ adpcm_encode_frame, \ adpcm_encode_close, \ NULL, \ .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ }; #else #define ADPCM_ENCODER(id,name,long_name_) #endif #if CONFIG_DECODERS #define ADPCM_DECODER(id,name,long_name_) \ AVCodec name ## _decoder = { \ #name, \ AVMEDIA_TYPE_AUDIO, \ id, \ sizeof(ADPCMContext), \ adpcm_decode_init, \ NULL, \ NULL, \ adpcm_decode_frame, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ }; #else #define ADPCM_DECODER(id,name,long_name_) #endif #define ADPCM_CODEC(id,name,long_name_) \ ADPCM_ENCODER(id,name,long_name_) ADPCM_DECODER(id,name,long_name_) /* Note: Do not forget to add new entries to the Makefile as well. */ ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology"); ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts"); ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA"); ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1"); ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2"); ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3"); ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");
123linslouis-android-video-cutter
jni/libavcodec/adpcm.c
C
asf20
63,525
/* * JPEG 2000 decoding support via OpenJPEG * Copyright (c) 2009 Jaikrishnan Menon <realityman@gmx.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * JPEG 2000 decoder using libopenjpeg */ #include "avcodec.h" #include "libavutil/intreadwrite.h" #define OPJ_STATIC #include <openjpeg.h> #define JP2_SIG_TYPE 0x6A502020 #define JP2_SIG_VALUE 0x0D0A870A typedef struct { opj_dparameters_t dec_params; AVFrame image; } LibOpenJPEGContext; static int check_image_attributes(opj_image_t *image) { return image->comps[0].dx == image->comps[1].dx && image->comps[1].dx == image->comps[2].dx && image->comps[0].dy == image->comps[1].dy && image->comps[1].dy == image->comps[2].dy && image->comps[0].prec == image->comps[1].prec && image->comps[1].prec == image->comps[2].prec; } static av_cold int libopenjpeg_decode_init(AVCodecContext *avctx) { LibOpenJPEGContext *ctx = avctx->priv_data; opj_set_default_decoder_parameters(&ctx->dec_params); avctx->coded_frame = &ctx->image; return 0; } static int libopenjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibOpenJPEGContext *ctx = avctx->priv_data; AVFrame *picture = &ctx->image, *output = data; opj_dinfo_t *dec; opj_cio_t *stream; opj_image_t *image; int width, height, has_alpha = 0, ret = -1; int x, y, index; uint8_t *img_ptr; int adjust[4]; *data_size = 0; // Check if input is a raw jpeg2k codestream or in jp2 wrapping if((AV_RB32(buf) == 12) && (AV_RB32(buf + 4) == JP2_SIG_TYPE) && (AV_RB32(buf + 8) == JP2_SIG_VALUE)) { dec = opj_create_decompress(CODEC_JP2); } else { // If the AVPacket contains a jp2c box, then skip to // the starting byte of the codestream. if (AV_RB32(buf + 4) == AV_RB32("jp2c")) buf += 8; dec = opj_create_decompress(CODEC_J2K); } if(!dec) { av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n"); return -1; } opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL); ctx->dec_params.cp_reduce = avctx->lowres; // Tie decoder with decoding parameters opj_setup_decoder(dec, &ctx->dec_params); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); if(!stream) { av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n"); opj_destroy_decompress(dec); return -1; } // Decode the codestream image = opj_decode_with_info(dec, stream, NULL); opj_cio_close(stream); if(!image) { av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n"); opj_destroy_decompress(dec); return -1; } width = image->comps[0].w << avctx->lowres; height = image->comps[0].h << avctx->lowres; if(avcodec_check_dimensions(avctx, width, height) < 0) { av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height); goto done; } avcodec_set_dimensions(avctx, width, height); switch(image->numcomps) { case 1: avctx->pix_fmt = PIX_FMT_GRAY8; break; case 3: if(check_image_attributes(image)) { avctx->pix_fmt = PIX_FMT_RGB24; } else { avctx->pix_fmt = PIX_FMT_GRAY8; av_log(avctx, AV_LOG_ERROR, "Only first component will be used.\n"); } break; case 4: has_alpha = 1; avctx->pix_fmt = PIX_FMT_RGBA; break; default: av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps); goto done; } if(picture->data[0]) avctx->release_buffer(avctx, picture); if(avctx->get_buffer(avctx, picture) < 0) { av_log(avctx, AV_LOG_ERROR, "Couldn't allocate image buffer.\n"); return -1; } for(x = 0; x < image->numcomps; x++) { adjust[x] = FFMAX(image->comps[x].prec - 8, 0); } for(y = 0; y < avctx->height; y++) { index = y*avctx->width; img_ptr = picture->data[0] + y*picture->linesize[0]; for(x = 0; x < avctx->width; x++, index++) { *img_ptr++ = image->comps[0].data[index] >> adjust[0]; if(image->numcomps > 2 && check_image_attributes(image)) { *img_ptr++ = image->comps[1].data[index] >> adjust[1]; *img_ptr++ = image->comps[2].data[index] >> adjust[2]; if(has_alpha) *img_ptr++ = image->comps[3].data[index] >> adjust[3]; } } } *output = ctx->image; *data_size = sizeof(AVPicture); ret = buf_size; done: opj_image_destroy(image); opj_destroy_decompress(dec); return ret; } static av_cold int libopenjpeg_decode_close(AVCodecContext *avctx) { LibOpenJPEGContext *ctx = avctx->priv_data; if(ctx->image.data[0]) avctx->release_buffer(avctx, &ctx->image); return 0 ; } AVCodec libopenjpeg_decoder = { "libopenjpeg", AVMEDIA_TYPE_VIDEO, CODEC_ID_JPEG2000, sizeof(LibOpenJPEGContext), libopenjpeg_decode_init, NULL, libopenjpeg_decode_close, libopenjpeg_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG based JPEG 2000 decoder"), } ;
123linslouis-android-video-cutter
jni/libavcodec/libopenjpeg.c
C
asf20
6,262
/* * Miro VideoXL codec * Copyright (c) 2004 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Miro VideoXL codec. */ #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct VideoXLContext{ AVCodecContext *avctx; AVFrame pic; } VideoXLContext; static const int xl_table[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 20, 25, 34, 46, 64, 82, 94, 103, 108, 113, 116, 119, 120, 121, 122, 123, 124, 125, 126, 127}; static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; VideoXLContext * const a = avctx->priv_data; AVFrame * const p= (AVFrame*)&a->pic; uint8_t *Y, *U, *V; int i, j; int stride; uint32_t val; int y0, y1, y2, y3 = 0, c0 = 0, c1 = 0; if(p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type= FF_I_TYPE; p->key_frame= 1; Y = a->pic.data[0]; U = a->pic.data[1]; V = a->pic.data[2]; stride = avctx->width - 4; for (i = 0; i < avctx->height; i++) { /* lines are stored in reversed order */ buf += stride; for (j = 0; j < avctx->width; j += 4) { /* value is stored in LE dword with word swapped */ val = AV_RL32(buf); buf -= 4; val = ((val >> 16) & 0xFFFF) | ((val & 0xFFFF) << 16); if(!j) y0 = (val & 0x1F) << 2; else y0 = y3 + xl_table[val & 0x1F]; val >>= 5; y1 = y0 + xl_table[val & 0x1F]; val >>= 5; y2 = y1 + xl_table[val & 0x1F]; val >>= 6; /* align to word */ y3 = y2 + xl_table[val & 0x1F]; val >>= 5; if(!j) c0 = (val & 0x1F) << 2; else c0 += xl_table[val & 0x1F]; val >>= 5; if(!j) c1 = (val & 0x1F) << 2; else c1 += xl_table[val & 0x1F]; Y[j + 0] = y0 << 1; Y[j + 1] = y1 << 1; Y[j + 2] = y2 << 1; Y[j + 3] = y3 << 1; U[j >> 2] = c0 << 1; V[j >> 2] = c1 << 1; } buf += avctx->width + 4; Y += a->pic.linesize[0]; U += a->pic.linesize[1]; V += a->pic.linesize[2]; } *data_size = sizeof(AVFrame); *(AVFrame*)data = a->pic; return buf_size; } static av_cold int decode_init(AVCodecContext *avctx){ // VideoXLContext * const a = avctx->priv_data; avctx->pix_fmt= PIX_FMT_YUV411P; return 0; } static av_cold int decode_end(AVCodecContext *avctx){ VideoXLContext * const a = avctx->priv_data; AVFrame *pic = &a->pic; if (pic->data[0]) avctx->release_buffer(avctx, pic); return 0; } AVCodec xl_decoder = { "xl", AVMEDIA_TYPE_VIDEO, CODEC_ID_VIXL, sizeof(VideoXLContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Miro VideoXL"), };
123linslouis-android-video-cutter
jni/libavcodec/xl.c
C
asf20
4,044
/* * Sony PlayStation MDEC (Motion DECoder) * Copyright (c) 2003 Michael Niedermayer * * based upon code from Sebastian Jedruszkiewicz <elf@frogger.rules.pl> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Sony PlayStation MDEC (Motion DECoder) * This is very similar to intra-only MPEG-1. */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "mpeg12.h" typedef struct MDECContext{ AVCodecContext *avctx; DSPContext dsp; AVFrame picture; GetBitContext gb; ScanTable scantable; int version; int qscale; int last_dc[3]; int mb_width; int mb_height; int mb_x, mb_y; DECLARE_ALIGNED(16, DCTELEM, block)[6][64]; uint8_t *bitstream_buffer; unsigned int bitstream_buffer_size; int block_last_index[6]; } MDECContext; //very similar to MPEG-1 static inline int mdec_decode_block_intra(MDECContext *a, DCTELEM *block, int n) { int level, diff, i, j, run; int component; RLTable *rl = &ff_rl_mpeg1; uint8_t * const scantable= a->scantable.permutated; const uint16_t *quant_matrix= ff_mpeg1_default_intra_matrix; const int qscale= a->qscale; /* DC coefficient */ if(a->version==2){ block[0]= 2*get_sbits(&a->gb, 10) + 1024; }else{ component = (n <= 3 ? 0 : n - 4 + 1); diff = decode_dc(&a->gb, component); if (diff >= 0xffff) return -1; a->last_dc[component]+= diff; block[0] = a->last_dc[component]<<3; } i = 0; { OPEN_READER(re, &a->gb); /* now quantify & encode AC coefficients */ for(;;) { UPDATE_CACHE(re, &a->gb); GET_RL_VLC(level, run, re, &a->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if(level == 127){ break; } else if(level != 0) { i += run; j = scantable[i]; level= (level*qscale*quant_matrix[j])>>3; level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1); LAST_SKIP_BITS(re, &a->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &a->gb, 6)+1; LAST_SKIP_BITS(re, &a->gb, 6); UPDATE_CACHE(re, &a->gb); level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10); i += run; j = scantable[i]; if(level<0){ level= -level; level= (level*qscale*quant_matrix[j])>>3; level= (level-1)|1; level= -level; }else{ level= (level*qscale*quant_matrix[j])>>3; level= (level-1)|1; } } if (i > 63){ av_log(a->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y); return -1; } block[j] = level; } CLOSE_READER(re, &a->gb); } a->block_last_index[n] = i; return 0; } static inline int decode_mb(MDECContext *a, DCTELEM block[6][64]){ int i; const int block_index[6]= {5,4,0,1,2,3}; a->dsp.clear_blocks(block[0]); for(i=0; i<6; i++){ if( mdec_decode_block_intra(a, block[ block_index[i] ], block_index[i]) < 0) return -1; } return 0; } static inline void idct_put(MDECContext *a, int mb_x, int mb_y){ DCTELEM (*block)[64]= a->block; int linesize= a->picture.linesize[0]; uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16; uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8; uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8; a->dsp.idct_put(dest_y , linesize, block[0]); a->dsp.idct_put(dest_y + 8, linesize, block[1]); a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]); a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]); if(!(a->avctx->flags&CODEC_FLAG_GRAY)){ a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]); a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]); } } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MDECContext * const a = avctx->priv_data; AVFrame *picture = data; AVFrame * const p= &a->picture; int i; if(p->data[0]) avctx->release_buffer(avctx, p); p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type= FF_I_TYPE; p->key_frame= 1; av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!a->bitstream_buffer) return AVERROR(ENOMEM); for(i=0; i<buf_size; i+=2){ a->bitstream_buffer[i] = buf[i+1]; a->bitstream_buffer[i+1]= buf[i ]; } init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8); /* skip over 4 preamble bytes in stream (typically 0xXX 0xXX 0x00 0x38) */ skip_bits(&a->gb, 32); a->qscale= get_bits(&a->gb, 16); a->version= get_bits(&a->gb, 16); a->last_dc[0]= a->last_dc[1]= a->last_dc[2]= 128; for(a->mb_x=0; a->mb_x<a->mb_width; a->mb_x++){ for(a->mb_y=0; a->mb_y<a->mb_height; a->mb_y++){ if( decode_mb(a, a->block) <0) return -1; idct_put(a, a->mb_x, a->mb_y); } } p->quality= a->qscale * FF_QP2LAMBDA; memset(p->qscale_table, a->qscale, a->mb_width); *picture = a->picture; *data_size = sizeof(AVPicture); return (get_bits_count(&a->gb)+31)/32*4; } static av_cold void mdec_common_init(AVCodecContext *avctx){ MDECContext * const a = avctx->priv_data; dsputil_init(&a->dsp, avctx); a->mb_width = (avctx->coded_width + 15) / 16; a->mb_height = (avctx->coded_height + 15) / 16; avctx->coded_frame= &a->picture; a->avctx= avctx; } static av_cold int decode_init(AVCodecContext *avctx){ MDECContext * const a = avctx->priv_data; AVFrame *p= &a->picture; mdec_common_init(avctx); ff_mpeg12_init_vlcs(); ff_init_scantable(a->dsp.idct_permutation, &a->scantable, ff_zigzag_direct); p->qstride= 0; p->qscale_table= av_mallocz(a->mb_width); avctx->pix_fmt= PIX_FMT_YUV420P; return 0; } static av_cold int decode_end(AVCodecContext *avctx){ MDECContext * const a = avctx->priv_data; if(a->picture.data[0]) avctx->release_buffer(avctx, &a->picture); av_freep(&a->bitstream_buffer); av_freep(&a->picture.qscale_table); a->bitstream_buffer_size=0; return 0; } AVCodec mdec_decoder = { "mdec", AVMEDIA_TYPE_VIDEO, CODEC_ID_MDEC, sizeof(MDECContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Sony PlayStation MDEC (Motion DECoder)"), };
123linslouis-android-video-cutter
jni/libavcodec/mdec.c
C
asf20
7,886
/* * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_FFT_H #define AVCODEC_FFT_H #include <stdint.h> #include "config.h" #include "libavutil/mem.h" #include "avfft.h" /* FFT computation */ struct FFTContext { int nbits; int inverse; uint16_t *revtab; FFTComplex *exptab; FFTComplex *exptab1; /* only used by SSE code */ FFTComplex *tmp_buf; int mdct_size; /* size of MDCT (i.e. number of input data * 2) */ int mdct_bits; /* n = 2^nbits */ /* pre/post rotation tables */ FFTSample *tcos; FFTSample *tsin; void (*fft_permute)(struct FFTContext *s, FFTComplex *z); void (*fft_calc)(struct FFTContext *s, FFTComplex *z); void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input); void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input); void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input); int split_radix; int permutation; #define FF_MDCT_PERM_NONE 0 #define FF_MDCT_PERM_INTERLEAVE 1 }; #if CONFIG_HARDCODED_TABLES #define COSTABLE_CONST const #define SINTABLE_CONST const #define SINETABLE_CONST const #else #define COSTABLE_CONST #define SINTABLE_CONST #define SINETABLE_CONST #endif #define COSTABLE(size) \ COSTABLE_CONST DECLARE_ALIGNED(16, FFTSample, ff_cos_##size)[size/2] #define SINTABLE(size) \ SINTABLE_CONST DECLARE_ALIGNED(16, FFTSample, ff_sin_##size)[size/2] #define SINETABLE(size) \ SINETABLE_CONST DECLARE_ALIGNED(16, float, ff_sine_##size)[size] extern COSTABLE(16); extern COSTABLE(32); extern COSTABLE(64); extern COSTABLE(128); extern COSTABLE(256); extern COSTABLE(512); extern COSTABLE(1024); extern COSTABLE(2048); extern COSTABLE(4096); extern COSTABLE(8192); extern COSTABLE(16384); extern COSTABLE(32768); extern COSTABLE(65536); extern COSTABLE_CONST FFTSample* const ff_cos_tabs[17]; /** * Initializes the cosine table in ff_cos_tabs[index] * \param index index in ff_cos_tabs array of the table to initialize */ void ff_init_ff_cos_tabs(int index); extern SINTABLE(16); extern SINTABLE(32); extern SINTABLE(64); extern SINTABLE(128); extern SINTABLE(256); extern SINTABLE(512); extern SINTABLE(1024); extern SINTABLE(2048); extern SINTABLE(4096); extern SINTABLE(8192); extern SINTABLE(16384); extern SINTABLE(32768); extern SINTABLE(65536); /** * Sets up a complex FFT. * @param nbits log2 of the length of the input array * @param inverse if 0 perform the forward transform, if 1 perform the inverse */ int ff_fft_init(FFTContext *s, int nbits, int inverse); void ff_fft_permute_c(FFTContext *s, FFTComplex *z); void ff_fft_calc_c(FFTContext *s, FFTComplex *z); void ff_fft_init_altivec(FFTContext *s); void ff_fft_init_mmx(FFTContext *s); void ff_fft_init_arm(FFTContext *s); /** * Do the permutation needed BEFORE calling ff_fft_calc(). */ static inline void ff_fft_permute(FFTContext *s, FFTComplex *z) { s->fft_permute(s, z); } /** * Do a complex FFT with the parameters defined in ff_fft_init(). The * input data must be permuted before. No 1.0/sqrt(n) normalization is done. */ static inline void ff_fft_calc(FFTContext *s, FFTComplex *z) { s->fft_calc(s, z); } void ff_fft_end(FFTContext *s); /* MDCT computation */ static inline void ff_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input) { s->imdct_calc(s, output, input); } static inline void ff_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input) { s->imdct_half(s, output, input); } static inline void ff_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input) { s->mdct_calc(s, output, input); } /** * Generate a Kaiser-Bessel Derived Window. * @param window pointer to half window * @param alpha determines window shape * @param n size of half window */ void ff_kbd_window_init(float *window, float alpha, int n); /** * Generate a sine window. * @param window pointer to half window * @param n size of half window */ void ff_sine_window_init(float *window, int n); /** * initialize the specified entry of ff_sine_windows */ void ff_init_ff_sine_windows(int index); extern SINETABLE( 32); extern SINETABLE( 64); extern SINETABLE( 128); extern SINETABLE( 256); extern SINETABLE( 512); extern SINETABLE(1024); extern SINETABLE(2048); extern SINETABLE(4096); extern SINETABLE_CONST float * const ff_sine_windows[13]; int ff_mdct_init(FFTContext *s, int nbits, int inverse, double scale); void ff_imdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_half_c(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_mdct_calc_c(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_mdct_end(FFTContext *s); /* Real Discrete Fourier Transform */ struct RDFTContext { int nbits; int inverse; int sign_convention; /* pre/post rotation tables */ const FFTSample *tcos; SINTABLE_CONST FFTSample *tsin; FFTContext fft; void (*rdft_calc)(struct RDFTContext *s, FFTSample *z); }; /** * Sets up a real FFT. * @param nbits log2 of the length of the input array * @param trans the type of transform */ int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans); void ff_rdft_end(RDFTContext *s); void ff_rdft_init_arm(RDFTContext *s); static av_always_inline void ff_rdft_calc(RDFTContext *s, FFTSample *data) { s->rdft_calc(s, data); } /* Discrete Cosine Transform */ struct DCTContext { int nbits; int inverse; RDFTContext rdft; const float *costab; FFTSample *csc2; void (*dct_calc)(struct DCTContext *s, FFTSample *data); }; /** * Sets up DCT. * @param nbits size of the input array: * (1 << nbits) for DCT-II, DCT-III and DST-I * (1 << nbits) + 1 for DCT-I * * @note the first element of the input of DST-I is ignored */ int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType type); void ff_dct_calc(DCTContext *s, FFTSample *data); void ff_dct_end (DCTContext *s); #endif /* AVCODEC_FFT_H */
123linslouis-android-video-cutter
jni/libavcodec/fft.h
C
asf20
7,022
/* * various filters for CELP-based codecs * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_CELP_FILTERS_H #define AVCODEC_CELP_FILTERS_H #include <stdint.h> /** * Circularly convolve fixed vector with a phase dispersion impulse * response filter (D.6.2 of G.729 and 6.1.5 of AMR). * @param fc_out vector with filter applied * @param fc_in source vector * @param filter phase filter coefficients * * fc_out[n] = sum(i,0,len-1){ fc_in[i] * filter[(len + n - i)%len] } * * \note fc_in and fc_out should not overlap! */ void ff_celp_convolve_circ(int16_t *fc_out, const int16_t *fc_in, const int16_t *filter, int len); /** * Add an array to a rotated array. * * out[k] = in[k] + fac * lagged[k-lag] with wrap-around * * @param out result vector * @param in samples to be added unfiltered * @param lagged samples to be rotated, multiplied and added * @param lag lagged vector delay in the range [0, n] * @param fac scalefactor for lagged samples * @param n number of samples */ void ff_celp_circ_addf(float *out, const float *in, const float *lagged, int lag, float fac, int n); /** * LP synthesis filter. * @param out [out] pointer to output buffer * @param filter_coeffs filter coefficients (-0x8000 <= (3.12) < 0x8000) * @param in input signal * @param buffer_length amount of data to process * @param filter_length filter length (10 for 10th order LP filter) * @param stop_on_overflow 1 - return immediately if overflow occurs * 0 - ignore overflows * @param rounder the amount to add for rounding (usually 0x800 or 0xfff) * * @return 1 if overflow occurred, 0 - otherwise * * @note Output buffer must contain filter_length samples of past * speech data before pointer. * * Routine applies 1/A(z) filter to given speech data. */ int ff_celp_lp_synthesis_filter(int16_t *out, const int16_t *filter_coeffs, const int16_t *in, int buffer_length, int filter_length, int stop_on_overflow, int rounder); /** * LP synthesis filter. * @param out [out] pointer to output buffer * - the array out[-filter_length, -1] must * contain the previous result of this filter * @param filter_coeffs filter coefficients. * @param in input signal * @param buffer_length amount of data to process * @param filter_length filter length (10 for 10th order LP filter). Must be * greater than 4 and even. * * @note Output buffer must contain filter_length samples of past * speech data before pointer. * * Routine applies 1/A(z) filter to given speech data. */ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length); /** * LP zero synthesis filter. * @param out [out] pointer to output buffer * @param filter_coeffs filter coefficients. * @param in input signal * - the array in[-filter_length, -1] must * contain the previous input of this filter * @param buffer_length amount of data to process * @param filter_length filter length (10 for 10th order LP filter) * * @note Output buffer must contain filter_length samples of past * speech data before pointer. * * Routine applies A(z) filter to given speech data. */ void ff_celp_lp_zero_synthesis_filterf(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length); #endif /* AVCODEC_CELP_FILTERS_H */
123linslouis-android-video-cutter
jni/libavcodec/celp_filters.h
C
asf20
4,482
/* * IBM Ultimotion Video Decoder * Copyright (C) 2004 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * IBM Ultimotion Video Decoder. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "avcodec.h" #include "bytestream.h" #include "ulti_cb.h" typedef struct UltimotionDecodeContext { AVCodecContext *avctx; int width, height, blocks; AVFrame frame; const uint8_t *ulti_codebook; } UltimotionDecodeContext; static av_cold int ulti_decode_init(AVCodecContext *avctx) { UltimotionDecodeContext *s = avctx->priv_data; s->avctx = avctx; s->width = avctx->width; s->height = avctx->height; s->blocks = (s->width / 8) * (s->height / 8); avctx->pix_fmt = PIX_FMT_YUV410P; avctx->coded_frame = (AVFrame*) &s->frame; s->ulti_codebook = ulti_codebook; return 0; } static av_cold int ulti_decode_end(AVCodecContext *avctx){ UltimotionDecodeContext *s = avctx->priv_data; AVFrame *pic = &s->frame; if (pic->data[0]) avctx->release_buffer(avctx, pic); return 0; } static const int block_coords[8] = // 4x4 block coords in 8x8 superblock { 0, 0, 0, 4, 4, 4, 4, 0}; static const int angle_by_index[4] = { 0, 2, 6, 12}; /* Lookup tables for luma and chroma - used by ulti_convert_yuv() */ static const uint8_t ulti_lumas[64] = { 0x10, 0x13, 0x17, 0x1A, 0x1E, 0x21, 0x25, 0x28, 0x2C, 0x2F, 0x33, 0x36, 0x3A, 0x3D, 0x41, 0x44, 0x48, 0x4B, 0x4F, 0x52, 0x56, 0x59, 0x5C, 0x60, 0x63, 0x67, 0x6A, 0x6E, 0x71, 0x75, 0x78, 0x7C, 0x7F, 0x83, 0x86, 0x8A, 0x8D, 0x91, 0x94, 0x98, 0x9B, 0x9F, 0xA2, 0xA5, 0xA9, 0xAC, 0xB0, 0xB3, 0xB7, 0xBA, 0xBE, 0xC1, 0xC5, 0xC8, 0xCC, 0xCF, 0xD3, 0xD6, 0xDA, 0xDD, 0xE1, 0xE4, 0xE8, 0xEB}; static const uint8_t ulti_chromas[16] = { 0x60, 0x67, 0x6D, 0x73, 0x7A, 0x80, 0x86, 0x8D, 0x93, 0x99, 0xA0, 0xA6, 0xAC, 0xB3, 0xB9, 0xC0}; /* convert Ultimotion YUV block (sixteen 6-bit Y samples and two 4-bit chroma samples) into standard YUV and put it into frame */ static void ulti_convert_yuv(AVFrame *frame, int x, int y, uint8_t *luma,int chroma) { uint8_t *y_plane, *cr_plane, *cb_plane; int i; y_plane = frame->data[0] + x + y * frame->linesize[0]; cr_plane = frame->data[1] + (x / 4) + (y / 4) * frame->linesize[1]; cb_plane = frame->data[2] + (x / 4) + (y / 4) * frame->linesize[2]; cr_plane[0] = ulti_chromas[chroma >> 4]; cb_plane[0] = ulti_chromas[chroma & 0xF]; for(i = 0; i < 16; i++){ y_plane[i & 3] = ulti_lumas[luma[i]]; if((i & 3) == 3) { //next row y_plane += frame->linesize[0]; } } } /* generate block like in MS Video1 */ static void ulti_pattern(AVFrame *frame, int x, int y, int f0, int f1, int Y0, int Y1, int chroma) { uint8_t Luma[16]; int mask, i; for(mask = 0x80, i = 0; mask; mask >>= 1, i++) { if(f0 & mask) Luma[i] = Y1; else Luma[i] = Y0; } for(mask = 0x80, i = 8; mask; mask >>= 1, i++) { if(f1 & mask) Luma[i] = Y1; else Luma[i] = Y0; } ulti_convert_yuv(frame, x, y, Luma, chroma); } /* fill block with some gradient */ static void ulti_grad(AVFrame *frame, int x, int y, uint8_t *Y, int chroma, int angle) { uint8_t Luma[16]; if(angle & 8) { //reverse order int t; angle &= 0x7; t = Y[0]; Y[0] = Y[3]; Y[3] = t; t = Y[1]; Y[1] = Y[2]; Y[2] = t; } switch(angle){ case 0: Luma[0] = Y[0]; Luma[1] = Y[1]; Luma[2] = Y[2]; Luma[3] = Y[3]; Luma[4] = Y[0]; Luma[5] = Y[1]; Luma[6] = Y[2]; Luma[7] = Y[3]; Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[2]; Luma[11] = Y[3]; Luma[12] = Y[0]; Luma[13] = Y[1]; Luma[14] = Y[2]; Luma[15] = Y[3]; break; case 1: Luma[0] = Y[1]; Luma[1] = Y[2]; Luma[2] = Y[3]; Luma[3] = Y[3]; Luma[4] = Y[0]; Luma[5] = Y[1]; Luma[6] = Y[2]; Luma[7] = Y[3]; Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[2]; Luma[11] = Y[3]; Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[1]; Luma[15] = Y[2]; break; case 2: Luma[0] = Y[1]; Luma[1] = Y[2]; Luma[2] = Y[3]; Luma[3] = Y[3]; Luma[4] = Y[1]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[3]; Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[2]; Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[1]; Luma[15] = Y[2]; break; case 3: Luma[0] = Y[2]; Luma[1] = Y[3]; Luma[2] = Y[3]; Luma[3] = Y[3]; Luma[4] = Y[1]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[3]; Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[2]; Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[0]; Luma[15] = Y[1]; break; case 4: Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[3]; Luma[3] = Y[3]; Luma[4] = Y[2]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[2]; Luma[8] = Y[1]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[1]; Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[0]; Luma[15] = Y[0]; break; case 5: Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[3]; Luma[3] = Y[2]; Luma[4] = Y[3]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[1]; Luma[8] = Y[2]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[0]; Luma[12] = Y[1]; Luma[13] = Y[0]; Luma[14] = Y[0]; Luma[15] = Y[0]; break; case 6: Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[2]; Luma[3] = Y[2]; Luma[4] = Y[3]; Luma[5] = Y[2]; Luma[6] = Y[1]; Luma[7] = Y[1]; Luma[8] = Y[2]; Luma[9] = Y[2]; Luma[10] = Y[1]; Luma[11] = Y[0]; Luma[12] = Y[1]; Luma[13] = Y[1]; Luma[14] = Y[0]; Luma[15] = Y[0]; break; case 7: Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[2]; Luma[3] = Y[1]; Luma[4] = Y[3]; Luma[5] = Y[2]; Luma[6] = Y[1]; Luma[7] = Y[0]; Luma[8] = Y[3]; Luma[9] = Y[2]; Luma[10] = Y[1]; Luma[11] = Y[0]; Luma[12] = Y[2]; Luma[13] = Y[1]; Luma[14] = Y[0]; Luma[15] = Y[0]; break; default: Luma[0] = Y[0]; Luma[1] = Y[0]; Luma[2] = Y[1]; Luma[3] = Y[1]; Luma[4] = Y[0]; Luma[5] = Y[0]; Luma[6] = Y[1]; Luma[7] = Y[1]; Luma[8] = Y[2]; Luma[9] = Y[2]; Luma[10] = Y[3]; Luma[11] = Y[3]; Luma[12] = Y[2]; Luma[13] = Y[2]; Luma[14] = Y[3]; Luma[15] = Y[3]; break; } ulti_convert_yuv(frame, x, y, Luma, chroma); } static int ulti_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; UltimotionDecodeContext *s=avctx->priv_data; int modifier = 0; int uniq = 0; int mode = 0; int blocks = 0; int done = 0; int x = 0, y = 0; int i; int skip; int tmp; if(s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if(avctx->get_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } while(!done) { int idx; if(blocks >= s->blocks || y >= s->height) break;//all blocks decoded idx = *buf++; if((idx & 0xF8) == 0x70) { switch(idx) { case 0x70: //change modifier modifier = *buf++; if(modifier>1) av_log(avctx, AV_LOG_INFO, "warning: modifier must be 0 or 1, got %i\n", modifier); break; case 0x71: // set uniq flag uniq = 1; break; case 0x72: //toggle mode mode = !mode; break; case 0x73: //end-of-frame done = 1; break; case 0x74: //skip some blocks skip = *buf++; if ((blocks + skip) >= s->blocks) break; blocks += skip; x += skip * 8; while(x >= s->width) { x -= s->width; y += 8; } break; default: av_log(avctx, AV_LOG_INFO, "warning: unknown escape 0x%02X\n", idx); } } else { //handle one block int code; int cf; int angle = 0; uint8_t Y[4]; // luma samples of block int tx = 0, ty = 0; //coords of subblock int chroma = 0; if (mode || uniq) { uniq = 0; cf = 1; chroma = 0; } else { cf = 0; if (idx) chroma = *buf++; } for (i = 0; i < 4; i++) { // for every subblock code = (idx >> (6 - i*2)) & 3; //extract 2 bits if(!code) //skip subblock continue; if(cf) chroma = *buf++; tx = x + block_coords[i * 2]; ty = y + block_coords[(i * 2) + 1]; switch(code) { case 1: tmp = *buf++; angle = angle_by_index[(tmp >> 6) & 0x3]; Y[0] = tmp & 0x3F; Y[1] = Y[0]; if (angle) { Y[2] = Y[0]+1; if (Y[2] > 0x3F) Y[2] = 0x3F; Y[3] = Y[2]; } else { Y[2] = Y[0]; Y[3] = Y[0]; } break; case 2: if (modifier) { // unpack four luma samples tmp = bytestream_get_be24(&buf); Y[0] = (tmp >> 18) & 0x3F; Y[1] = (tmp >> 12) & 0x3F; Y[2] = (tmp >> 6) & 0x3F; Y[3] = tmp & 0x3F; angle = 16; } else { // retrieve luma samples from codebook tmp = bytestream_get_be16(&buf); angle = (tmp >> 12) & 0xF; tmp &= 0xFFF; tmp <<= 2; Y[0] = s->ulti_codebook[tmp]; Y[1] = s->ulti_codebook[tmp + 1]; Y[2] = s->ulti_codebook[tmp + 2]; Y[3] = s->ulti_codebook[tmp + 3]; } break; case 3: if (modifier) { // all 16 luma samples uint8_t Luma[16]; tmp = bytestream_get_be24(&buf); Luma[0] = (tmp >> 18) & 0x3F; Luma[1] = (tmp >> 12) & 0x3F; Luma[2] = (tmp >> 6) & 0x3F; Luma[3] = tmp & 0x3F; tmp = bytestream_get_be24(&buf); Luma[4] = (tmp >> 18) & 0x3F; Luma[5] = (tmp >> 12) & 0x3F; Luma[6] = (tmp >> 6) & 0x3F; Luma[7] = tmp & 0x3F; tmp = bytestream_get_be24(&buf); Luma[8] = (tmp >> 18) & 0x3F; Luma[9] = (tmp >> 12) & 0x3F; Luma[10] = (tmp >> 6) & 0x3F; Luma[11] = tmp & 0x3F; tmp = bytestream_get_be24(&buf); Luma[12] = (tmp >> 18) & 0x3F; Luma[13] = (tmp >> 12) & 0x3F; Luma[14] = (tmp >> 6) & 0x3F; Luma[15] = tmp & 0x3F; ulti_convert_yuv(&s->frame, tx, ty, Luma, chroma); } else { tmp = *buf++; if(tmp & 0x80) { angle = (tmp >> 4) & 0x7; tmp = (tmp << 8) + *buf++; Y[0] = (tmp >> 6) & 0x3F; Y[1] = tmp & 0x3F; Y[2] = (*buf++) & 0x3F; Y[3] = (*buf++) & 0x3F; ulti_grad(&s->frame, tx, ty, Y, chroma, angle); //draw block } else { // some patterns int f0, f1; f0 = *buf++; f1 = tmp; Y[0] = (*buf++) & 0x3F; Y[1] = (*buf++) & 0x3F; ulti_pattern(&s->frame, tx, ty, f1, f0, Y[0], Y[1], chroma); } } break; } if(code != 3) ulti_grad(&s->frame, tx, ty, Y, chroma, angle); // draw block } blocks++; x += 8; if(x >= s->width) { x = 0; y += 8; } } } *data_size=sizeof(AVFrame); *(AVFrame*)data= s->frame; return buf_size; } AVCodec ulti_decoder = { "ultimotion", AVMEDIA_TYPE_VIDEO, CODEC_ID_ULTI, sizeof(UltimotionDecodeContext), ulti_decode_init, NULL, ulti_decode_end, ulti_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("IBM UltiMotion"), };
123linslouis-android-video-cutter
jni/libavcodec/ulti.c
C
asf20
14,537
/* * MPEG4 encoder. * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mpegvideo.h" #include "h263.h" #include "mpeg4video.h" //The uni_DCtab_* tables below contain unified bits+length tables to encode DC //differences in mpeg4. Unified in the sense that the specification specifies //this encoding in several steps. static uint8_t uni_DCtab_lum_len[512]; static uint8_t uni_DCtab_chrom_len[512]; static uint16_t uni_DCtab_lum_bits[512]; static uint16_t uni_DCtab_chrom_bits[512]; //unified encoding tables for run length encoding of coefficients //unified in the sense that the specification specifies the encoding in several steps. static uint32_t uni_mpeg4_intra_rl_bits[64*64*2*2]; static uint8_t uni_mpeg4_intra_rl_len [64*64*2*2]; static uint32_t uni_mpeg4_inter_rl_bits[64*64*2*2]; static uint8_t uni_mpeg4_inter_rl_len [64*64*2*2]; //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128 + (run)*256 + (level)) //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run) + (level)*64) #define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level)) /* mpeg4 inter max level: 24/6 max run: 53/63 intra max level: 53/16 max run: 29/41 */ /** * Returns the number of bits that encoding the 8x8 block in block would need. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block. */ static inline int get_block_rate(MpegEncContext * s, DCTELEM block[64], int block_last_index, uint8_t scantable[64]){ int last=0; int j; int rate=0; for(j=1; j<=block_last_index; j++){ const int index= scantable[j]; int level= block[index]; if(level){ level+= 64; if((level&(~127)) == 0){ if(j<block_last_index) rate+= s->intra_ac_vlc_length [UNI_AC_ENC_INDEX(j-last-1, level)]; else rate+= s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j-last-1, level)]; }else rate += s->ac_esc_length; last= j; } } return rate; } /** * Restores the ac coefficients in block that have been changed by decide_ac_pred(). * This function also restores s->block_last_index. * @param[in,out] block MB coefficients, these will be restored * @param[in] dir ac prediction direction for each 8x8 block * @param[out] st scantable for each 8x8 block * @param[in] zigzag_last_index index refering to the last non zero coefficient in zigzag order */ static inline void restore_ac_coeffs(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6]) { int i, n; memcpy(s->block_last_index, zigzag_last_index, sizeof(int)*6); for(n=0; n<6; n++){ int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16; st[n]= s->intra_scantable.permutated; if(dir[n]){ /* top prediction */ for(i=1; i<8; i++){ block[n][s->dsp.idct_permutation[i ]] = ac_val[i+8]; } }else{ /* left prediction */ for(i=1; i<8; i++){ block[n][s->dsp.idct_permutation[i<<3]]= ac_val[i ]; } } } } /** * Returns the optimal value (0 or 1) for the ac_pred element for the given MB in mpeg4. * This function will also update s->block_last_index and s->ac_val. * @param[in,out] block MB coefficients, these will be updated if 1 is returned * @param[in] dir ac prediction direction for each 8x8 block * @param[out] st scantable for each 8x8 block * @param[out] zigzag_last_index index refering to the last non zero coefficient in zigzag order */ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6]) { int score= 0; int i, n; int8_t * const qscale_table= s->current_picture.qscale_table; memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6); for(n=0; n<6; n++){ int16_t *ac_val, *ac_val1; score -= get_block_rate(s, block[n], s->block_last_index[n], s->intra_scantable.permutated); ac_val = s->ac_val[0][0] + s->block_index[n] * 16; ac_val1= ac_val; if(dir[n]){ const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride; /* top prediction */ ac_val-= s->block_wrap[n]*16; if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ /* same qscale */ for(i=1; i<8; i++){ const int level= block[n][s->dsp.idct_permutation[i ]]; block[n][s->dsp.idct_permutation[i ]] = level - ac_val[i+8]; ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]]; ac_val1[i+8]= level; } }else{ /* different qscale, we must rescale */ for(i=1; i<8; i++){ const int level= block[n][s->dsp.idct_permutation[i ]]; block[n][s->dsp.idct_permutation[i ]] = level - ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale); ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]]; ac_val1[i+8]= level; } } st[n]= s->intra_h_scantable.permutated; }else{ const int xy= s->mb_x-1 + s->mb_y*s->mb_stride; /* left prediction */ ac_val-= 16; if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ /* same qscale */ for(i=1; i<8; i++){ const int level= block[n][s->dsp.idct_permutation[i<<3]]; block[n][s->dsp.idct_permutation[i<<3]]= level - ac_val[i]; ac_val1[i ]= level; ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]]; } }else{ /* different qscale, we must rescale */ for(i=1; i<8; i++){ const int level= block[n][s->dsp.idct_permutation[i<<3]]; block[n][s->dsp.idct_permutation[i<<3]]= level - ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale); ac_val1[i ]= level; ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]]; } } st[n]= s->intra_v_scantable.permutated; } for(i=63; i>0; i--) //FIXME optimize if(block[n][ st[n][i] ]) break; s->block_last_index[n]= i; score += get_block_rate(s, block[n], s->block_last_index[n], st[n]); } if(score < 0){ return 1; }else{ restore_ac_coeffs(s, block, dir, st, zigzag_last_index); return 0; } } /** * modify mb_type & qscale so that encoding is acually possible in mpeg4 */ void ff_clean_mpeg4_qscales(MpegEncContext *s){ int i; int8_t * const qscale_table= s->current_picture.qscale_table; ff_clean_h263_qscales(s); if(s->pict_type== FF_B_TYPE){ int odd=0; /* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */ for(i=0; i<s->mb_num; i++){ int mb_xy= s->mb_index2xy[i]; odd += qscale_table[mb_xy]&1; } if(2*odd > s->mb_num) odd=1; else odd=0; for(i=0; i<s->mb_num; i++){ int mb_xy= s->mb_index2xy[i]; if((qscale_table[mb_xy]&1) != odd) qscale_table[mb_xy]++; if(qscale_table[mb_xy] > 31) qscale_table[mb_xy]= 31; } for(i=1; i<s->mb_num; i++){ int mb_xy= s->mb_index2xy[i]; if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_DIRECT)){ s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_BIDIR; } } } } /** * encodes the dc value. * @param n block index (0-3 are luma, 4-5 are chroma) */ static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n) { #if 1 /* DC will overflow if level is outside the [-255,255] range. */ level+=256; if (n < 4) { /* luminance */ put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]); } else { /* chrominance */ put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]); } #else int size, v; /* find number of bits */ size = 0; v = abs(level); while (v) { v >>= 1; size++; } if (n < 4) { /* luminance */ put_bits(&s->pb, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]); } else { /* chrominance */ put_bits(&s->pb, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]); } /* encode remaining bits */ if (size > 0) { if (level < 0) level = (-level) ^ ((1 << size) - 1); put_bits(&s->pb, size, level); if (size > 8) put_bits(&s->pb, 1, 1); } #endif } static inline int mpeg4_get_dc_length(int level, int n){ if (n < 4) { return uni_DCtab_lum_len[level + 256]; } else { return uni_DCtab_chrom_len[level + 256]; } } /** * encodes a 8x8 block * @param n block index (0-3 are luma, 4-5 are chroma) */ static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc, uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb) { int i, last_non_zero; #if 0 //variables for the outcommented version int code, sign, last; #endif const RLTable *rl; uint32_t *bits_tab; uint8_t *len_tab; const int last_index = s->block_last_index[n]; if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away /* mpeg4 based DC predictor */ mpeg4_encode_dc(dc_pb, intra_dc, n); if(last_index<1) return; i = 1; rl = &ff_mpeg4_rl_intra; bits_tab= uni_mpeg4_intra_rl_bits; len_tab = uni_mpeg4_intra_rl_len; } else { if(last_index<0) return; i = 0; rl = &ff_h263_rl_inter; bits_tab= uni_mpeg4_inter_rl_bits; len_tab = uni_mpeg4_inter_rl_len; } /* AC coefs */ last_non_zero = i - 1; #if 1 for (; i < last_index; i++) { int level = block[ scan_table[i] ]; if (level) { int run = i - last_non_zero - 1; level+=64; if((level&(~127)) == 0){ const int index= UNI_MPEG4_ENC_INDEX(0, run, level); put_bits(ac_pb, len_tab[index], bits_tab[index]); }else{ //ESC3 put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(0<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1); } last_non_zero = i; } } /*if(i<=last_index)*/{ int level = block[ scan_table[i] ]; int run = i - last_non_zero - 1; level+=64; if((level&(~127)) == 0){ const int index= UNI_MPEG4_ENC_INDEX(1, run, level); put_bits(ac_pb, len_tab[index], bits_tab[index]); }else{ //ESC3 put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(1<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1); } } #else for (; i <= last_index; i++) { const int slevel = block[ scan_table[i] ]; if (slevel) { int level; int run = i - last_non_zero - 1; last = (i == last_index); sign = 0; level = slevel; if (level < 0) { sign = 1; level = -level; } code = get_rl_index(rl, last, run, level); put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); if (code == rl->n) { int level1, run1; level1 = level - rl->max_level[last][run]; if (level1 < 1) goto esc2; code = get_rl_index(rl, last, run, level1); if (code == rl->n) { esc2: put_bits(ac_pb, 1, 1); if (level > MAX_LEVEL) goto esc3; run1 = run - rl->max_run[last][level] - 1; if (run1 < 0) goto esc3; code = get_rl_index(rl, last, run1, level); if (code == rl->n) { esc3: /* third escape */ put_bits(ac_pb, 1, 1); put_bits(ac_pb, 1, last); put_bits(ac_pb, 6, run); put_bits(ac_pb, 1, 1); put_sbits(ac_pb, 12, slevel); put_bits(ac_pb, 1, 1); } else { /* second escape */ put_bits(ac_pb, 1, 0); put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); put_bits(ac_pb, 1, sign); } } else { /* first escape */ put_bits(ac_pb, 1, 0); put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); put_bits(ac_pb, 1, sign); } } else { put_bits(ac_pb, 1, sign); } last_non_zero = i; } } #endif } static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc, uint8_t *scan_table) { int i, last_non_zero; uint8_t *len_tab; const int last_index = s->block_last_index[n]; int len=0; if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away /* mpeg4 based DC predictor */ len += mpeg4_get_dc_length(intra_dc, n); if(last_index<1) return len; i = 1; len_tab = uni_mpeg4_intra_rl_len; } else { if(last_index<0) return 0; i = 0; len_tab = uni_mpeg4_inter_rl_len; } /* AC coefs */ last_non_zero = i - 1; for (; i < last_index; i++) { int level = block[ scan_table[i] ]; if (level) { int run = i - last_non_zero - 1; level+=64; if((level&(~127)) == 0){ const int index= UNI_MPEG4_ENC_INDEX(0, run, level); len += len_tab[index]; }else{ //ESC3 len += 7+2+1+6+1+12+1; } last_non_zero = i; } } /*if(i<=last_index)*/{ int level = block[ scan_table[i] ]; int run = i - last_non_zero - 1; level+=64; if((level&(~127)) == 0){ const int index= UNI_MPEG4_ENC_INDEX(1, run, level); len += len_tab[index]; }else{ //ESC3 len += 7+2+1+6+1+12+1; } } return len; } static inline void mpeg4_encode_blocks(MpegEncContext * s, DCTELEM block[6][64], int intra_dc[6], uint8_t **scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb){ int i; if(scan_table){ if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){ for (i = 0; i < 6; i++) { skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, intra_dc[i], scan_table[i])); } }else{ /* encode each block */ for (i = 0; i < 6; i++) { mpeg4_encode_block(s, block[i], i, intra_dc[i], scan_table[i], dc_pb, ac_pb); } } }else{ if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){ for (i = 0; i < 6; i++) { skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, 0, s->intra_scantable.permutated)); } }else{ /* encode each block */ for (i = 0; i < 6; i++) { mpeg4_encode_block(s, block[i], i, 0, s->intra_scantable.permutated, dc_pb, ac_pb); } } } } //FIXME this is duplicated to h263.c static const int dquant_code[5]= {1,0,9,2,3}; void mpeg4_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y) { int cbpc, cbpy, pred_x, pred_y; PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb; PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=FF_B_TYPE ? &s->tex_pb : &s->pb; PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=FF_I_TYPE ? &s->pb2 : &s->pb; const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0; if (!s->mb_intra) { int i, cbp; if(s->pict_type==FF_B_TYPE){ static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */ int mb_type= mb_type_table[s->mv_dir]; if(s->mb_x==0){ for(i=0; i<2; i++){ s->last_mv[i][0][0]= s->last_mv[i][0][1]= s->last_mv[i][1][0]= s->last_mv[i][1][1]= 0; } } assert(s->dquant>=-2 && s->dquant<=2); assert((s->dquant&1)==0); assert(mb_type>=0); /* nothing to do if this MB was skipped in the next P Frame */ if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ... s->skip_count++; s->mv[0][0][0]= s->mv[0][0][1]= s->mv[1][0][0]= s->mv[1][0][1]= 0; s->mv_dir= MV_DIR_FORWARD; //doesn't matter s->qscale -= s->dquant; // s->mb_skipped=1; return; } cbp= get_b_cbp(s, block, motion_x, motion_y, mb_type); if ((cbp | motion_x | motion_y | mb_type) ==0) { /* direct MB with MV={0,0} */ assert(s->dquant==0); put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */ if(interleaved_stats){ s->misc_bits++; s->last_bits++; } s->skip_count++; return; } put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */ put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we don't need it :) if(cbp) put_bits(&s->pb, 6, cbp); if(cbp && mb_type){ if(s->dquant) put_bits(&s->pb, 2, (s->dquant>>2)+3); else put_bits(&s->pb, 1, 0); }else s->qscale -= s->dquant; if(!s->progressive_sequence){ if(cbp) put_bits(&s->pb, 1, s->interlaced_dct); if(mb_type) // not direct mode put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD); } if(interleaved_stats){ s->misc_bits+= get_bits_diff(s); } if(mb_type == 0){ assert(s->mv_dir & MV_DIRECT); ff_h263_encode_motion_vector(s, motion_x, motion_y, 1); s->b_count++; s->f_count++; }else{ assert(mb_type > 0 && mb_type < 4); if(s->mv_type != MV_TYPE_FIELD){ if(s->mv_dir & MV_DIR_FORWARD){ ff_h263_encode_motion_vector(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->mv[0][0][0]; s->last_mv[0][0][1]= s->last_mv[0][1][1]= s->mv[0][0][1]; s->f_count++; } if(s->mv_dir & MV_DIR_BACKWARD){ ff_h263_encode_motion_vector(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); s->last_mv[1][0][0]= s->last_mv[1][1][0]= s->mv[1][0][0]; s->last_mv[1][0][1]= s->last_mv[1][1][1]= s->mv[1][0][1]; s->b_count++; } }else{ if(s->mv_dir & MV_DIR_FORWARD){ put_bits(&s->pb, 1, s->field_select[0][0]); put_bits(&s->pb, 1, s->field_select[0][1]); } if(s->mv_dir & MV_DIR_BACKWARD){ put_bits(&s->pb, 1, s->field_select[1][0]); put_bits(&s->pb, 1, s->field_select[1][1]); } if(s->mv_dir & MV_DIR_FORWARD){ for(i=0; i<2; i++){ ff_h263_encode_motion_vector(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->mv[0][i][1] - s->last_mv[0][i][1]/2, s->f_code); s->last_mv[0][i][0]= s->mv[0][i][0]; s->last_mv[0][i][1]= s->mv[0][i][1]*2; } s->f_count++; } if(s->mv_dir & MV_DIR_BACKWARD){ for(i=0; i<2; i++){ ff_h263_encode_motion_vector(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->mv[1][i][1] - s->last_mv[1][i][1]/2, s->b_code); s->last_mv[1][i][0]= s->mv[1][i][0]; s->last_mv[1][i][1]= s->mv[1][i][1]*2; } s->b_count++; } } } if(interleaved_stats){ s->mv_bits+= get_bits_diff(s); } mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb); if(interleaved_stats){ s->p_tex_bits+= get_bits_diff(s); } }else{ /* s->pict_type==FF_B_TYPE */ cbp= get_p_cbp(s, block, motion_x, motion_y); if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) { /* check if the B frames can skip it too, as we must skip it if we skip here why didn't they just compress the skip-mb bits instead of reusing them ?! */ if(s->max_b_frames>0){ int i; int x,y, offset; uint8_t *p_pic; x= s->mb_x*16; y= s->mb_y*16; if(x+16 > s->width) x= s->width-16; if(y+16 > s->height) y= s->height-16; offset= x + y*s->linesize; p_pic= s->new_picture.data[0] + offset; s->mb_skipped=1; for(i=0; i<s->max_b_frames; i++){ uint8_t *b_pic; int diff; Picture *pic= s->reordered_input_picture[i+1]; if(pic==NULL || pic->pict_type!=FF_B_TYPE) break; b_pic= pic->data[0] + offset; if(pic->type != FF_BUFFER_TYPE_SHARED) b_pic+= INPLACE_OFFSET; diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16); if(diff>s->qscale*70){ //FIXME check that 70 is optimal s->mb_skipped=0; break; } } }else s->mb_skipped=1; if(s->mb_skipped==1){ /* skip macroblock */ put_bits(&s->pb, 1, 1); if(interleaved_stats){ s->misc_bits++; s->last_bits++; } s->skip_count++; return; } } put_bits(&s->pb, 1, 0); /* mb coded */ cbpc = cbp & 3; cbpy = cbp >> 2; cbpy ^= 0xf; if(s->mv_type==MV_TYPE_16X16){ if(s->dquant) cbpc+= 8; put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc], ff_h263_inter_MCBPC_code[cbpc]); put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if(s->dquant) put_bits(pb2, 2, dquant_code[s->dquant+2]); if(!s->progressive_sequence){ if(cbp) put_bits(pb2, 1, s->interlaced_dct); put_bits(pb2, 1, 0); } if(interleaved_stats){ s->misc_bits+= get_bits_diff(s); } /* motion vectors: 16x16 mode */ h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_encode_motion_vector(s, motion_x - pred_x, motion_y - pred_y, s->f_code); }else if(s->mv_type==MV_TYPE_FIELD){ if(s->dquant) cbpc+= 8; put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc], ff_h263_inter_MCBPC_code[cbpc]); put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if(s->dquant) put_bits(pb2, 2, dquant_code[s->dquant+2]); assert(!s->progressive_sequence); if(cbp) put_bits(pb2, 1, s->interlaced_dct); put_bits(pb2, 1, 1); if(interleaved_stats){ s->misc_bits+= get_bits_diff(s); } /* motion vectors: 16x8 interlaced mode */ h263_pred_motion(s, 0, 0, &pred_x, &pred_y); pred_y /=2; put_bits(&s->pb, 1, s->field_select[0][0]); put_bits(&s->pb, 1, s->field_select[0][1]); ff_h263_encode_motion_vector(s, s->mv[0][0][0] - pred_x, s->mv[0][0][1] - pred_y, s->f_code); ff_h263_encode_motion_vector(s, s->mv[0][1][0] - pred_x, s->mv[0][1][1] - pred_y, s->f_code); }else{ assert(s->mv_type==MV_TYPE_8X8); put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc+16], ff_h263_inter_MCBPC_code[cbpc+16]); put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if(!s->progressive_sequence){ if(cbp) put_bits(pb2, 1, s->interlaced_dct); } if(interleaved_stats){ s->misc_bits+= get_bits_diff(s); } for(i=0; i<4; i++){ /* motion vectors: 8x8 mode*/ h263_pred_motion(s, i, 0, &pred_x, &pred_y); ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x, s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code); } } if(interleaved_stats){ s->mv_bits+= get_bits_diff(s); } mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb); if(interleaved_stats){ s->p_tex_bits+= get_bits_diff(s); } s->f_count++; } } else { int cbp; int dc_diff[6]; //dc values with the dc prediction subtracted int dir[6]; //prediction direction int zigzag_last_index[6]; uint8_t *scan_table[6]; int i; for(i=0; i<6; i++){ dc_diff[i]= ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1); } if(s->flags & CODEC_FLAG_AC_PRED){ s->ac_pred= decide_ac_pred(s, block, dir, scan_table, zigzag_last_index); }else{ for(i=0; i<6; i++) scan_table[i]= s->intra_scantable.permutated; } /* compute cbp */ cbp = 0; for (i = 0; i < 6; i++) { if (s->block_last_index[i] >= 1) cbp |= 1 << (5 - i); } cbpc = cbp & 3; if (s->pict_type == FF_I_TYPE) { if(s->dquant) cbpc+=4; put_bits(&s->pb, ff_h263_intra_MCBPC_bits[cbpc], ff_h263_intra_MCBPC_code[cbpc]); } else { if(s->dquant) cbpc+=8; put_bits(&s->pb, 1, 0); /* mb coded */ put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc + 4], ff_h263_inter_MCBPC_code[cbpc + 4]); } put_bits(pb2, 1, s->ac_pred); cbpy = cbp >> 2; put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if(s->dquant) put_bits(dc_pb, 2, dquant_code[s->dquant+2]); if(!s->progressive_sequence){ put_bits(dc_pb, 1, s->interlaced_dct); } if(interleaved_stats){ s->misc_bits+= get_bits_diff(s); } mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb); if(interleaved_stats){ s->i_tex_bits+= get_bits_diff(s); } s->i_count++; /* restore ac coeffs & last_index stuff if we messed them up with the prediction */ if(s->ac_pred) restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index); } } /** * add mpeg4 stuffing bits (01...1) */ void ff_mpeg4_stuffing(PutBitContext * pbc) { int length; put_bits(pbc, 1, 0); length= (-put_bits_count(pbc))&7; if(length) put_bits(pbc, length, (1<<length)-1); } /* must be called before writing the header */ void ff_set_mpeg4_time(MpegEncContext * s){ if(s->pict_type==FF_B_TYPE){ ff_mpeg4_init_direct_mv(s); }else{ s->last_time_base= s->time_base; s->time_base= s->time/s->avctx->time_base.den; } } static void mpeg4_encode_gop_header(MpegEncContext * s){ int hours, minutes, seconds; int64_t time; put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); time= s->current_picture_ptr->pts; if(s->reordered_input_picture[1]) time= FFMIN(time, s->reordered_input_picture[1]->pts); time= time*s->avctx->time_base.num; seconds= time/s->avctx->time_base.den; minutes= seconds/60; seconds %= 60; hours= minutes/60; minutes %= 60; hours%=24; put_bits(&s->pb, 5, hours); put_bits(&s->pb, 6, minutes); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, seconds); put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); //broken link == NO s->last_time_base= time / s->avctx->time_base.den; ff_mpeg4_stuffing(&s->pb); } static void mpeg4_encode_visual_object_header(MpegEncContext * s){ int profile_and_level_indication; int vo_ver_id; if(s->avctx->profile != FF_PROFILE_UNKNOWN){ profile_and_level_indication = s->avctx->profile << 4; }else if(s->max_b_frames || s->quarter_sample){ profile_and_level_indication= 0xF0; // adv simple }else{ profile_and_level_indication= 0x00; // simple } if(s->avctx->level != FF_LEVEL_UNKNOWN){ profile_and_level_indication |= s->avctx->level; }else{ profile_and_level_indication |= 1; //level 1 } if(profile_and_level_indication>>4 == 0xF){ vo_ver_id= 5; }else{ vo_ver_id= 1; } //FIXME levels put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, VOS_STARTCODE); put_bits(&s->pb, 8, profile_and_level_indication); put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 4, vo_ver_id); put_bits(&s->pb, 3, 1); //priority put_bits(&s->pb, 4, 1); //visual obj type== video obj put_bits(&s->pb, 1, 0); //video signal type == no clue //FIXME ff_mpeg4_stuffing(&s->pb); } static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number) { int vo_ver_id; if (!CONFIG_MPEG4_ENCODER) return; if(s->max_b_frames || s->quarter_sample){ vo_ver_id= 5; s->vo_type= ADV_SIMPLE_VO_TYPE; }else{ vo_ver_id= 1; s->vo_type= SIMPLE_VO_TYPE; } put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */ put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */ put_bits(&s->pb, 1, 0); /* random access vol */ put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */ if(s->workaround_bugs & FF_BUG_MS) { put_bits(&s->pb, 1, 0); /* is obj layer id= no */ } else { put_bits(&s->pb, 1, 1); /* is obj layer id= yes */ put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */ put_bits(&s->pb, 3, 1); /* is obj layer priority */ } s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio); put_bits(&s->pb, 4, s->aspect_ratio_info);/* aspect ratio info */ if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){ put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num); put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den); } if(s->workaround_bugs & FF_BUG_MS) { // put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */ } else { put_bits(&s->pb, 1, 1); /* vol control parameters= yes */ put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */ put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 1, 0); /* vbv parameters= no */ } put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 16, s->avctx->time_base.den); if (s->time_increment_bits < 1) s->time_increment_bits = 1; put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 1, 0); /* fixed vop rate=no */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 13, s->width); /* vol width */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 13, s->height); /* vol height */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1); put_bits(&s->pb, 1, 1); /* obmc disable */ if (vo_ver_id == 1) { put_bits(&s->pb, 1, s->vol_sprite_usage); /* sprite enable */ }else{ put_bits(&s->pb, 2, s->vol_sprite_usage); /* sprite enable */ } put_bits(&s->pb, 1, 0); /* not 8 bit == false */ put_bits(&s->pb, 1, s->mpeg_quant); /* quant type= (0=h263 style)*/ if(s->mpeg_quant){ ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); } if (vo_ver_id != 1) put_bits(&s->pb, 1, s->quarter_sample); put_bits(&s->pb, 1, 1); /* complexity estimation disable */ s->resync_marker= s->rtp_mode; put_bits(&s->pb, 1, s->resync_marker ? 0 : 1);/* resync marker disable */ put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0); if(s->data_partitioning){ put_bits(&s->pb, 1, 0); /* no rvlc */ } if (vo_ver_id != 1){ put_bits(&s->pb, 1, 0); /* newpred */ put_bits(&s->pb, 1, 0); /* reduced res vop */ } put_bits(&s->pb, 1, 0); /* scalability */ ff_mpeg4_stuffing(&s->pb); /* user data */ if(!(s->flags & CODEC_FLAG_BITEXACT)){ put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x1B2); /* user_data */ ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0); } } /* write mpeg4 VOP header */ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number) { int time_incr; int time_div, time_mod; if(s->pict_type==FF_I_TYPE){ if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){ if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy mpeg4_encode_visual_object_header(s); if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number==0) //HACK, the reference sw is buggy mpeg4_encode_vol_header(s, 0, 0); } if(!(s->workaround_bugs & FF_BUG_MS)) mpeg4_encode_gop_header(s); } s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE; put_bits(&s->pb, 16, 0); /* vop header */ put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */ put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */ assert(s->time>=0); time_div= s->time/s->avctx->time_base.den; time_mod= s->time%s->avctx->time_base.den; time_incr= time_div - s->last_time_base; assert(time_incr >= 0); while(time_incr--) put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */ put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* vop coded */ if ( s->pict_type == FF_P_TYPE || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) { put_bits(&s->pb, 1, s->no_rounding); /* rounding type */ } put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */ if(!s->progressive_sequence){ put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); put_bits(&s->pb, 1, s->alternate_scan); } //FIXME sprite stuff put_bits(&s->pb, 5, s->qscale); if (s->pict_type != FF_I_TYPE) put_bits(&s->pb, 3, s->f_code); /* fcode_for */ if (s->pict_type == FF_B_TYPE) put_bits(&s->pb, 3, s->b_code); /* fcode_back */ } static void init_uni_dc_tab(void) { int level, uni_code, uni_len; for(level=-256; level<256; level++){ int size, v, l; /* find number of bits */ size = 0; v = abs(level); while (v) { v >>= 1; size++; } if (level < 0) l= (-level) ^ ((1 << size) - 1); else l= level; /* luminance */ uni_code= ff_mpeg4_DCtab_lum[size][0]; uni_len = ff_mpeg4_DCtab_lum[size][1]; if (size > 0) { uni_code<<=size; uni_code|=l; uni_len+=size; if (size > 8){ uni_code<<=1; uni_code|=1; uni_len++; } } uni_DCtab_lum_bits[level+256]= uni_code; uni_DCtab_lum_len [level+256]= uni_len; /* chrominance */ uni_code= ff_mpeg4_DCtab_chrom[size][0]; uni_len = ff_mpeg4_DCtab_chrom[size][1]; if (size > 0) { uni_code<<=size; uni_code|=l; uni_len+=size; if (size > 8){ uni_code<<=1; uni_code|=1; uni_len++; } } uni_DCtab_chrom_bits[level+256]= uni_code; uni_DCtab_chrom_len [level+256]= uni_len; } } static void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab){ int slevel, run, last; assert(MAX_LEVEL >= 64); assert(MAX_RUN >= 63); for(slevel=-64; slevel<64; slevel++){ if(slevel==0) continue; for(run=0; run<64; run++){ for(last=0; last<=1; last++){ const int index= UNI_MPEG4_ENC_INDEX(last, run, slevel+64); int level= slevel < 0 ? -slevel : slevel; int sign= slevel < 0 ? 1 : 0; int bits, len, code; int level1, run1; len_tab[index]= 100; /* ESC0 */ code= get_rl_index(rl, last, run, level); bits= rl->table_vlc[code][0]; len= rl->table_vlc[code][1]; bits=bits*2+sign; len++; if(code!=rl->n && len < len_tab[index]){ bits_tab[index]= bits; len_tab [index]= len; } /* ESC1 */ bits= rl->table_vlc[rl->n][0]; len= rl->table_vlc[rl->n][1]; bits=bits*2; len++; //esc1 level1= level - rl->max_level[last][run]; if(level1>0){ code= get_rl_index(rl, last, run, level1); bits<<= rl->table_vlc[code][1]; len += rl->table_vlc[code][1]; bits += rl->table_vlc[code][0]; bits=bits*2+sign; len++; if(code!=rl->n && len < len_tab[index]){ bits_tab[index]= bits; len_tab [index]= len; } } /* ESC2 */ bits= rl->table_vlc[rl->n][0]; len= rl->table_vlc[rl->n][1]; bits=bits*4+2; len+=2; //esc2 run1 = run - rl->max_run[last][level] - 1; if(run1>=0){ code= get_rl_index(rl, last, run1, level); bits<<= rl->table_vlc[code][1]; len += rl->table_vlc[code][1]; bits += rl->table_vlc[code][0]; bits=bits*2+sign; len++; if(code!=rl->n && len < len_tab[index]){ bits_tab[index]= bits; len_tab [index]= len; } } /* ESC3 */ bits= rl->table_vlc[rl->n][0]; len = rl->table_vlc[rl->n][1]; bits=bits*4+3; len+=2; //esc3 bits=bits*2+last; len++; bits=bits*64+run; len+=6; bits=bits*2+1; len++; //marker bits=bits*4096+(slevel&0xfff); len+=12; bits=bits*2+1; len++; //marker if(len < len_tab[index]){ bits_tab[index]= bits; len_tab [index]= len; } } } } } static av_cold int encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int ret; static int done = 0; if((ret=MPV_encode_init(avctx)) < 0) return ret; if (!done) { done = 1; init_uni_dc_tab(); init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]); init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len); init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len); } s->min_qcoeff= -2048; s->max_qcoeff= 2047; s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len; s->intra_ac_vlc_last_length= uni_mpeg4_intra_rl_len + 128*64; s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len; s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64; s->luma_dc_vlc_length= uni_DCtab_lum_len; s->chroma_dc_vlc_length= uni_DCtab_chrom_len; s->ac_esc_length= 7+2+1+6+1+12+1; s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; if(s->flags & CODEC_FLAG_GLOBAL_HEADER){ s->avctx->extradata= av_malloc(1024); init_put_bits(&s->pb, s->avctx->extradata, 1024); if(!(s->workaround_bugs & FF_BUG_MS)) mpeg4_encode_visual_object_header(s); mpeg4_encode_vol_header(s, 0, 0); // ff_mpeg4_stuffing(&s->pb); ? flush_put_bits(&s->pb); s->avctx->extradata_size= (put_bits_count(&s->pb)+7)>>3; } return 0; } void ff_mpeg4_init_partitions(MpegEncContext *s) { uint8_t *start= put_bits_ptr(&s->pb); uint8_t *end= s->pb.buf_end; int size= end - start; int pb_size = (((intptr_t)start + size/3)&(~3)) - (intptr_t)start; int tex_size= (size - 2*pb_size)&(~3); set_put_bits_buffer_size(&s->pb, pb_size); init_put_bits(&s->tex_pb, start + pb_size , tex_size); init_put_bits(&s->pb2 , start + pb_size + tex_size, pb_size); } void ff_mpeg4_merge_partitions(MpegEncContext *s) { const int pb2_len = put_bits_count(&s->pb2 ); const int tex_pb_len= put_bits_count(&s->tex_pb); const int bits= put_bits_count(&s->pb); if(s->pict_type==FF_I_TYPE){ put_bits(&s->pb, 19, DC_MARKER); s->misc_bits+=19 + pb2_len + bits - s->last_bits; s->i_tex_bits+= tex_pb_len; }else{ put_bits(&s->pb, 17, MOTION_MARKER); s->misc_bits+=17 + pb2_len; s->mv_bits+= bits - s->last_bits; s->p_tex_bits+= tex_pb_len; } flush_put_bits(&s->pb2); flush_put_bits(&s->tex_pb); set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf); ff_copy_bits(&s->pb, s->pb2.buf , pb2_len); ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len); s->last_bits= put_bits_count(&s->pb); } void ff_mpeg4_encode_video_packet_header(MpegEncContext *s) { int mb_num_bits= av_log2(s->mb_num - 1) + 1; put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0); put_bits(&s->pb, 1, 1); put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y*s->mb_width); put_bits(&s->pb, s->quant_precision, s->qscale); put_bits(&s->pb, 1, 0); /* no HEC */ } AVCodec mpeg4_encoder = { "mpeg4", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG4, sizeof(MpegEncContext), encode_init, MPV_encode_picture, MPV_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), };
123linslouis-android-video-cutter
jni/libavcodec/mpeg4videoenc.c
C
asf20
47,308
/* * Copyright (c) 2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com > * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * data structures common to libschroedingerdec.c and libschroedingerenc.c */ #ifndef AVCODEC_LIBSCHROEDINGER_H #define AVCODEC_LIBSCHROEDINGER_H #include <schroedinger/schrobitstream.h> #include <schroedinger/schroframe.h> #include "avcodec.h" static const struct { enum PixelFormat ff_pix_fmt; SchroChromaFormat schro_pix_fmt; SchroFrameFormat schro_frame_fmt; } ffmpeg_schro_pixel_format_map[] = { { PIX_FMT_YUV420P, SCHRO_CHROMA_420, SCHRO_FRAME_FORMAT_U8_420 }, { PIX_FMT_YUV422P, SCHRO_CHROMA_422, SCHRO_FRAME_FORMAT_U8_422 }, { PIX_FMT_YUV444P, SCHRO_CHROMA_444, SCHRO_FRAME_FORMAT_U8_444 }, }; /** * Returns the video format preset matching the input video dimensions and * time base. */ SchroVideoFormatEnum ff_get_schro_video_format_preset (AVCodecContext *avccontext); /** * Sets the Schroedinger frame format corresponding to the Schro chroma format * passed. Returns 0 on success, -1 on failure. */ int ff_get_schro_frame_format(SchroChromaFormat schro_chroma_fmt, SchroFrameFormat *schro_frame_fmt); /** * Create a Schro frame based on the dimensions and frame format * passed. Returns a pointer to a frame on success, NULL on failure. */ SchroFrame *ff_create_schro_frame(AVCodecContext *avccontext, SchroFrameFormat schro_frame_fmt); #endif /* AVCODEC_LIBSCHROEDINGER_H */
123linslouis-android-video-cutter
jni/libavcodec/libschroedinger.h
C
asf20
2,244
/* * ALAC (Apple Lossless Audio Codec) decoder * Copyright (c) 2005 David Hammerton * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * ALAC (Apple Lossless Audio Codec) decoder * @author 2005 David Hammerton * * For more information on the ALAC format, visit: * http://crazney.net/programs/itunes/alac.html * * Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be * passed through the extradata[_size] fields. This atom is tacked onto * the end of an 'alac' stsd atom and has the following format: * bytes 0-3 atom size (0x24), big-endian * bytes 4-7 atom type ('alac', not the 'alac' tag from start of stsd) * bytes 8-35 data bytes needed by decoder * * Extradata: * 32bit size * 32bit tag (=alac) * 32bit zero? * 32bit max sample per frame * 8bit ?? (zero?) * 8bit sample size * 8bit history mult * 8bit initial history * 8bit kmodifier * 8bit channels? * 16bit ?? * 32bit max coded frame size * 32bit bitrate? * 32bit samplerate */ #include "avcodec.h" #include "get_bits.h" #include "bytestream.h" #include "unary.h" #include "mathops.h" #define ALAC_EXTRADATA_SIZE 36 #define MAX_CHANNELS 2 typedef struct { AVCodecContext *avctx; GetBitContext gb; /* init to 0; first frame decode should initialize from extradata and * set this to 1 */ int context_initialized; int numchannels; int bytespersample; /* buffers */ int32_t *predicterror_buffer[MAX_CHANNELS]; int32_t *outputsamples_buffer[MAX_CHANNELS]; int32_t *wasted_bits_buffer[MAX_CHANNELS]; /* stuff from setinfo */ uint32_t setinfo_max_samples_per_frame; /* 0x1000 = 4096 */ /* max samples per frame? */ uint8_t setinfo_sample_size; /* 0x10 */ uint8_t setinfo_rice_historymult; /* 0x28 */ uint8_t setinfo_rice_initialhistory; /* 0x0a */ uint8_t setinfo_rice_kmodifier; /* 0x0e */ /* end setinfo stuff */ int wasted_bits; } ALACContext; static void allocate_buffers(ALACContext *alac) { int chan; for (chan = 0; chan < MAX_CHANNELS; chan++) { alac->predicterror_buffer[chan] = av_malloc(alac->setinfo_max_samples_per_frame * 4); alac->outputsamples_buffer[chan] = av_malloc(alac->setinfo_max_samples_per_frame * 4); alac->wasted_bits_buffer[chan] = av_malloc(alac->setinfo_max_samples_per_frame * 4); } } static int alac_set_info(ALACContext *alac) { const unsigned char *ptr = alac->avctx->extradata; ptr += 4; /* size */ ptr += 4; /* alac */ ptr += 4; /* 0 ? */ if(AV_RB32(ptr) >= UINT_MAX/4){ av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n"); return -1; } /* buffer size / 2 ? */ alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr); ptr++; /* ??? */ alac->setinfo_sample_size = *ptr++; if (alac->setinfo_sample_size > 32) { av_log(alac->avctx, AV_LOG_ERROR, "setinfo_sample_size too large\n"); return -1; } alac->setinfo_rice_historymult = *ptr++; alac->setinfo_rice_initialhistory = *ptr++; alac->setinfo_rice_kmodifier = *ptr++; ptr++; /* channels? */ bytestream_get_be16(&ptr); /* ??? */ bytestream_get_be32(&ptr); /* max coded frame size */ bytestream_get_be32(&ptr); /* bitrate ? */ bytestream_get_be32(&ptr); /* samplerate */ allocate_buffers(alac); return 0; } static inline int decode_scalar(GetBitContext *gb, int k, int limit, int readsamplesize){ /* read x - number of 1s before 0 represent the rice */ int x = get_unary_0_9(gb); if (x > 8) { /* RICE THRESHOLD */ /* use alternative encoding */ x = get_bits(gb, readsamplesize); } else { if (k >= limit) k = limit; if (k != 1) { int extrabits = show_bits(gb, k); /* multiply x by 2^k - 1, as part of their strange algorithm */ x = (x << k) - x; if (extrabits > 1) { x += extrabits - 1; skip_bits(gb, k); } else skip_bits(gb, k - 1); } } return x; } static void bastardized_rice_decompress(ALACContext *alac, int32_t *output_buffer, int output_size, int readsamplesize, /* arg_10 */ int rice_initialhistory, /* arg424->b */ int rice_kmodifier, /* arg424->d */ int rice_historymult, /* arg424->c */ int rice_kmodifier_mask /* arg424->e */ ) { int output_count; unsigned int history = rice_initialhistory; int sign_modifier = 0; for (output_count = 0; output_count < output_size; output_count++) { int32_t x; int32_t x_modified; int32_t final_val; /* standard rice encoding */ int k; /* size of extra bits */ /* read k, that is bits as is */ k = av_log2((history >> 9) + 3); x= decode_scalar(&alac->gb, k, rice_kmodifier, readsamplesize); x_modified = sign_modifier + x; final_val = (x_modified + 1) / 2; if (x_modified & 1) final_val *= -1; output_buffer[output_count] = final_val; sign_modifier = 0; /* now update the history */ history += x_modified * rice_historymult - ((history * rice_historymult) >> 9); if (x_modified > 0xffff) history = 0xffff; /* special case: there may be compressed blocks of 0 */ if ((history < 128) && (output_count+1 < output_size)) { int k; unsigned int block_size; sign_modifier = 1; k = 7 - av_log2(history) + ((history + 16) >> 6 /* / 64 */); block_size= decode_scalar(&alac->gb, k, rice_kmodifier, 16); if (block_size > 0) { if(block_size >= output_size - output_count){ av_log(alac->avctx, AV_LOG_ERROR, "invalid zero block size of %d %d %d\n", block_size, output_size, output_count); block_size= output_size - output_count - 1; } memset(&output_buffer[output_count+1], 0, block_size * 4); output_count += block_size; } if (block_size > 0xffff) sign_modifier = 0; history = 0; } } } static inline int sign_only(int v) { return v ? FFSIGN(v) : 0; } static void predictor_decompress_fir_adapt(int32_t *error_buffer, int32_t *buffer_out, int output_size, int readsamplesize, int16_t *predictor_coef_table, int predictor_coef_num, int predictor_quantitization) { int i; /* first sample always copies */ *buffer_out = *error_buffer; if (!predictor_coef_num) { if (output_size <= 1) return; memcpy(buffer_out+1, error_buffer+1, (output_size-1) * 4); return; } if (predictor_coef_num == 0x1f) { /* 11111 - max value of predictor_coef_num */ /* second-best case scenario for fir decompression, * error describes a small difference from the previous sample only */ if (output_size <= 1) return; for (i = 0; i < output_size - 1; i++) { int32_t prev_value; int32_t error_value; prev_value = buffer_out[i]; error_value = error_buffer[i+1]; buffer_out[i+1] = sign_extend((prev_value + error_value), readsamplesize); } return; } /* read warm-up samples */ if (predictor_coef_num > 0) for (i = 0; i < predictor_coef_num; i++) { int32_t val; val = buffer_out[i] + error_buffer[i+1]; val = sign_extend(val, readsamplesize); buffer_out[i+1] = val; } #if 0 /* 4 and 8 are very common cases (the only ones i've seen). these * should be unrolled and optimized */ if (predictor_coef_num == 4) { /* FIXME: optimized general case */ return; } if (predictor_coef_table == 8) { /* FIXME: optimized general case */ return; } #endif /* general case */ if (predictor_coef_num > 0) { for (i = predictor_coef_num + 1; i < output_size; i++) { int j; int sum = 0; int outval; int error_val = error_buffer[i]; for (j = 0; j < predictor_coef_num; j++) { sum += (buffer_out[predictor_coef_num-j] - buffer_out[0]) * predictor_coef_table[j]; } outval = (1 << (predictor_quantitization-1)) + sum; outval = outval >> predictor_quantitization; outval = outval + buffer_out[0] + error_val; outval = sign_extend(outval, readsamplesize); buffer_out[predictor_coef_num+1] = outval; if (error_val > 0) { int predictor_num = predictor_coef_num - 1; while (predictor_num >= 0 && error_val > 0) { int val = buffer_out[0] - buffer_out[predictor_coef_num - predictor_num]; int sign = sign_only(val); predictor_coef_table[predictor_num] -= sign; val *= sign; /* absolute value */ error_val -= ((val >> predictor_quantitization) * (predictor_coef_num - predictor_num)); predictor_num--; } } else if (error_val < 0) { int predictor_num = predictor_coef_num - 1; while (predictor_num >= 0 && error_val < 0) { int val = buffer_out[0] - buffer_out[predictor_coef_num - predictor_num]; int sign = - sign_only(val); predictor_coef_table[predictor_num] -= sign; val *= sign; /* neg value */ error_val -= ((val >> predictor_quantitization) * (predictor_coef_num - predictor_num)); predictor_num--; } } buffer_out++; } } } static void reconstruct_stereo_16(int32_t *buffer[MAX_CHANNELS], int16_t *buffer_out, int numchannels, int numsamples, uint8_t interlacing_shift, uint8_t interlacing_leftweight) { int i; if (numsamples <= 0) return; /* weighted interlacing */ if (interlacing_leftweight) { for (i = 0; i < numsamples; i++) { int32_t a, b; a = buffer[0][i]; b = buffer[1][i]; a -= (b * interlacing_leftweight) >> interlacing_shift; b += a; buffer_out[i*numchannels] = b; buffer_out[i*numchannels + 1] = a; } return; } /* otherwise basic interlacing took place */ for (i = 0; i < numsamples; i++) { int16_t left, right; left = buffer[0][i]; right = buffer[1][i]; buffer_out[i*numchannels] = left; buffer_out[i*numchannels + 1] = right; } } static void decorrelate_stereo_24(int32_t *buffer[MAX_CHANNELS], int32_t *buffer_out, int32_t *wasted_bits_buffer[MAX_CHANNELS], int wasted_bits, int numchannels, int numsamples, uint8_t interlacing_shift, uint8_t interlacing_leftweight) { int i; if (numsamples <= 0) return; /* weighted interlacing */ if (interlacing_leftweight) { for (i = 0; i < numsamples; i++) { int32_t a, b; a = buffer[0][i]; b = buffer[1][i]; a -= (b * interlacing_leftweight) >> interlacing_shift; b += a; if (wasted_bits) { b = (b << wasted_bits) | wasted_bits_buffer[0][i]; a = (a << wasted_bits) | wasted_bits_buffer[1][i]; } buffer_out[i * numchannels] = b << 8; buffer_out[i * numchannels + 1] = a << 8; } } else { for (i = 0; i < numsamples; i++) { int32_t left, right; left = buffer[0][i]; right = buffer[1][i]; if (wasted_bits) { left = (left << wasted_bits) | wasted_bits_buffer[0][i]; right = (right << wasted_bits) | wasted_bits_buffer[1][i]; } buffer_out[i * numchannels] = left << 8; buffer_out[i * numchannels + 1] = right << 8; } } } static int alac_decode_frame(AVCodecContext *avctx, void *outbuffer, int *outputsize, AVPacket *avpkt) { const uint8_t *inbuffer = avpkt->data; int input_buffer_size = avpkt->size; ALACContext *alac = avctx->priv_data; int channels; unsigned int outputsamples; int hassize; unsigned int readsamplesize; int isnotcompressed; uint8_t interlacing_shift; uint8_t interlacing_leftweight; /* short-circuit null buffers */ if (!inbuffer || !input_buffer_size) return input_buffer_size; /* initialize from the extradata */ if (!alac->context_initialized) { if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n", ALAC_EXTRADATA_SIZE); return input_buffer_size; } if (alac_set_info(alac)) { av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n"); return input_buffer_size; } alac->context_initialized = 1; } init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8); channels = get_bits(&alac->gb, 3) + 1; if (channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "channels > %d not supported\n", MAX_CHANNELS); return input_buffer_size; } /* 2^result = something to do with output waiting. * perhaps matters if we read > 1 frame in a pass? */ skip_bits(&alac->gb, 4); skip_bits(&alac->gb, 12); /* unknown, skip 12 bits */ /* the output sample size is stored soon */ hassize = get_bits1(&alac->gb); alac->wasted_bits = get_bits(&alac->gb, 2) << 3; /* whether the frame is compressed */ isnotcompressed = get_bits1(&alac->gb); if (hassize) { /* now read the number of samples as a 32bit integer */ outputsamples = get_bits_long(&alac->gb, 32); if(outputsamples > alac->setinfo_max_samples_per_frame){ av_log(avctx, AV_LOG_ERROR, "outputsamples %d > %d\n", outputsamples, alac->setinfo_max_samples_per_frame); return -1; } } else outputsamples = alac->setinfo_max_samples_per_frame; switch (alac->setinfo_sample_size) { case 16: avctx->sample_fmt = SAMPLE_FMT_S16; alac->bytespersample = channels << 1; break; case 24: avctx->sample_fmt = SAMPLE_FMT_S32; alac->bytespersample = channels << 2; break; default: av_log(avctx, AV_LOG_ERROR, "Sample depth %d is not supported.\n", alac->setinfo_sample_size); return -1; } if(outputsamples > *outputsize / alac->bytespersample){ av_log(avctx, AV_LOG_ERROR, "sample buffer too small\n"); return -1; } *outputsize = outputsamples * alac->bytespersample; readsamplesize = alac->setinfo_sample_size - (alac->wasted_bits) + channels - 1; if (readsamplesize > MIN_CACHE_BITS) { av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize); return -1; } if (!isnotcompressed) { /* so it is compressed */ int16_t predictor_coef_table[channels][32]; int predictor_coef_num[channels]; int prediction_type[channels]; int prediction_quantitization[channels]; int ricemodifier[channels]; int i, chan; interlacing_shift = get_bits(&alac->gb, 8); interlacing_leftweight = get_bits(&alac->gb, 8); for (chan = 0; chan < channels; chan++) { prediction_type[chan] = get_bits(&alac->gb, 4); prediction_quantitization[chan] = get_bits(&alac->gb, 4); ricemodifier[chan] = get_bits(&alac->gb, 3); predictor_coef_num[chan] = get_bits(&alac->gb, 5); /* read the predictor table */ for (i = 0; i < predictor_coef_num[chan]; i++) predictor_coef_table[chan][i] = (int16_t)get_bits(&alac->gb, 16); } if (alac->wasted_bits) { int i, ch; for (i = 0; i < outputsamples; i++) { for (ch = 0; ch < channels; ch++) alac->wasted_bits_buffer[ch][i] = get_bits(&alac->gb, alac->wasted_bits); } } for (chan = 0; chan < channels; chan++) { bastardized_rice_decompress(alac, alac->predicterror_buffer[chan], outputsamples, readsamplesize, alac->setinfo_rice_initialhistory, alac->setinfo_rice_kmodifier, ricemodifier[chan] * alac->setinfo_rice_historymult / 4, (1 << alac->setinfo_rice_kmodifier) - 1); if (prediction_type[chan] == 0) { /* adaptive fir */ predictor_decompress_fir_adapt(alac->predicterror_buffer[chan], alac->outputsamples_buffer[chan], outputsamples, readsamplesize, predictor_coef_table[chan], predictor_coef_num[chan], prediction_quantitization[chan]); } else { av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[chan]); /* I think the only other prediction type (or perhaps this is * just a boolean?) runs adaptive fir twice.. like: * predictor_decompress_fir_adapt(predictor_error, tempout, ...) * predictor_decompress_fir_adapt(predictor_error, outputsamples ...) * little strange.. */ } } } else { /* not compressed, easy case */ int i, chan; if (alac->setinfo_sample_size <= 16) { for (i = 0; i < outputsamples; i++) for (chan = 0; chan < channels; chan++) { int32_t audiobits; audiobits = get_sbits_long(&alac->gb, alac->setinfo_sample_size); alac->outputsamples_buffer[chan][i] = audiobits; } } else { for (i = 0; i < outputsamples; i++) { for (chan = 0; chan < channels; chan++) { alac->outputsamples_buffer[chan][i] = get_bits(&alac->gb, alac->setinfo_sample_size); alac->outputsamples_buffer[chan][i] = sign_extend(alac->outputsamples_buffer[chan][i], alac->setinfo_sample_size); } } } alac->wasted_bits = 0; interlacing_shift = 0; interlacing_leftweight = 0; } if (get_bits(&alac->gb, 3) != 7) av_log(avctx, AV_LOG_ERROR, "Error : Wrong End Of Frame\n"); switch(alac->setinfo_sample_size) { case 16: if (channels == 2) { reconstruct_stereo_16(alac->outputsamples_buffer, (int16_t*)outbuffer, alac->numchannels, outputsamples, interlacing_shift, interlacing_leftweight); } else { int i; for (i = 0; i < outputsamples; i++) { ((int16_t*)outbuffer)[i] = alac->outputsamples_buffer[0][i]; } } break; case 24: if (channels == 2) { decorrelate_stereo_24(alac->outputsamples_buffer, outbuffer, alac->wasted_bits_buffer, alac->wasted_bits, alac->numchannels, outputsamples, interlacing_shift, interlacing_leftweight); } else { int i; for (i = 0; i < outputsamples; i++) ((int32_t *)outbuffer)[i] = alac->outputsamples_buffer[0][i] << 8; } break; } if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8) av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb)); return input_buffer_size; } static av_cold int alac_decode_init(AVCodecContext * avctx) { ALACContext *alac = avctx->priv_data; alac->avctx = avctx; alac->context_initialized = 0; alac->numchannels = alac->avctx->channels; return 0; } static av_cold int alac_decode_close(AVCodecContext *avctx) { ALACContext *alac = avctx->priv_data; int chan; for (chan = 0; chan < MAX_CHANNELS; chan++) { av_freep(&alac->predicterror_buffer[chan]); av_freep(&alac->outputsamples_buffer[chan]); av_freep(&alac->wasted_bits_buffer[chan]); } return 0; } AVCodec alac_decoder = { "alac", AVMEDIA_TYPE_AUDIO, CODEC_ID_ALAC, sizeof(ALACContext), alac_decode_init, NULL, alac_decode_close, alac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), };
123linslouis-android-video-cutter
jni/libavcodec/alac.c
C
asf20
23,576
/* * Generate a header file for hardcoded QDM2 tables * * Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #include "qdm2_tablegen.h" #include "tableprint.h" int main(void) { softclip_table_init(); rnd_table_init(); init_noise_samples(); write_fileheader(); printf("static const uint16_t softclip_table[HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD + 1] = {\n"); write_uint16_array(softclip_table, HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD + 1); printf("};\n"); printf("static const float noise_table[4096] = {\n"); write_float_array(noise_table, 4096); printf("};\n"); printf("static const uint8_t random_dequant_index[256][5] = {\n"); write_uint8_2d_array(random_dequant_index, 256, 5); printf("};\n"); printf("static const uint8_t random_dequant_type24[128][3] = {\n"); write_uint8_2d_array(random_dequant_type24, 128, 3); printf("};\n"); printf("static const float noise_samples[128] = {\n"); write_float_array(noise_samples, 128); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/qdm2_tablegen.c
C
asf20
1,880
/* * MPEG4 video parser prototypes * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_MPEG4VIDEO_PARSER_H #define AVCODEC_MPEG4VIDEO_PARSER_H #include "parser.h" /** * finds the end of the current frame in the bitstream. * @return the position of the first byte of the next frame, or -1 */ int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size); #endif /* AVCODEC_MPEG4VIDEO_PARSER_H */
123linslouis-android-video-cutter
jni/libavcodec/mpeg4video_parser.h
C
asf20
1,228
/* * AAC encoder psychoacoustic model * Copyright (C) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AACPSY_H #define AVCODEC_AACPSY_H #include "avcodec.h" #include "aac.h" //#include "lowpass.h" enum AACPsyModelType{ AAC_PSY_TEST, ///< a sample model to exercise encoder AAC_PSY_3GPP, ///< model following recommendations from 3GPP TS 26.403 AAC_NB_PSY_MODELS ///< total number of psychoacoustic models, since it's not a part of the ABI new models can be added freely }; /** * context used by psychoacoustic model */ typedef struct AACPsyContext { AVCodecContext *avctx; ///< encoder context }AACPsyContext; /** * Cleanup model context at the end. * * @param ctx model context */ void ff_aac_psy_end(AACPsyContext *ctx); #endif /* AVCODEC_AACPSY_H */
123linslouis-android-video-cutter
jni/libavcodec/aacpsy.h
C
asf20
1,586
/* * Musepack SV8 decoder * Copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_MPC8HUFF_H #define AVCODEC_MPC8HUFF_H #include <stdint.h> #define MPC8_BANDS_SIZE 33 #define MPC8_BANDS_BITS 9 static const uint8_t mpc8_bands_codes[MPC8_BANDS_SIZE] = { 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, 0x06, 0x01, 0x02, 0x03, 0x00, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x09, 0x0A, 0x0B, 0x07, 0x08, 0x09, 0x06, 0x07, 0x05, 0x05, 0x03, 0x03, 0x01, }; static const int8_t mpc8_bands_bits[MPC8_BANDS_SIZE] = { 1, 3, 5, 6, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 12, 12, 12, 12, 12, 13, 12, 12, 12, 11, 11, 11, 10, 10, 9, 8, 6, 5, 2, }; #define MPC8_SCFI0_SIZE 4 #define MPC8_SCFI0_BITS 3 static const uint8_t mpc8_scfi0_codes[MPC8_SCFI0_SIZE] = { 0x00, 0x01, 0x01, 0x01, }; static const int8_t mpc8_scfi0_bits[MPC8_SCFI0_SIZE] = { 3, 3, 1, 2, }; #define MPC8_SCFI1_SIZE 16 #define MPC8_SCFI1_BITS 7 static const uint8_t mpc8_scfi1_codes[MPC8_SCFI1_SIZE] = { 0x01, 0x00, 0x02, 0x03, 0x01, 0x03, 0x04, 0x05, 0x04, 0x06, 0x02, 0x02, 0x05, 0x07, 0x03, 0x03, }; static const int8_t mpc8_scfi1_bits[MPC8_SCFI1_SIZE] = { 6, 7, 6, 6, 7, 5, 5, 5, 6, 5, 2, 3, 6, 5, 3, 2, }; #define MPC8_DSCF0_SIZE 64 #define MPC8_DSCF0_BITS 9 static const uint8_t mpc8_dscf0_codes[MPC8_DSCF0_SIZE] = { 0x03, 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x07, 0x08, 0x09, 0x07, 0x08, 0x09, 0x0A, 0x07, 0x08, 0x09, 0x0A, 0x07, 0x08, 0x09, 0x0A, 0x06, 0x07, 0x05, 0x04, 0x05, 0x06, 0x06, 0x07, 0x0A, 0x08, 0x05, 0x06, 0x07, 0x09, 0x07, 0x08, 0x09, 0x0B, 0x0B, 0x0C, 0x0D, 0x0B, 0x0C, 0x0D, 0x0B, 0x0C, 0x0D, 0x07, 0x08, 0x09, 0x06, 0x07, 0x03, 0x04, 0x05, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, }; static const int8_t mpc8_dscf0_bits[MPC8_DSCF0_SIZE] = { 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 4, 4, 5, 4, 4, 10, 4, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, }; #define MPC8_DSCF1_SIZE 65 #define MPC8_DSCF1_BITS 9 static const uint8_t mpc8_dscf1_codes[MPC8_DSCF1_SIZE] = { 0x00, 0x03, 0x04, 0x04, 0x05, 0x06, 0x05, 0x06, 0x07, 0x08, 0x07, 0x08, 0x09, 0x0A, 0x07, 0x08, 0x09, 0x0A, 0x07, 0x08, 0x09, 0x06, 0x07, 0x05, 0x06, 0x04, 0x03, 0x03, 0x04, 0x03, 0x04, 0x05, 0x06, 0x07, 0x05, 0x04, 0x05, 0x05, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0B, 0x0C, 0x0D, 0x0B, 0x0C, 0x0D, 0x09, 0x0A, 0x0B, 0x0C, 0x07, 0x08, 0x09, 0x05, 0x06, 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x0D, }; static const int8_t mpc8_dscf1_bits[MPC8_DSCF1_SIZE] = { 15, 14, 14, 13, 13, 13, 12, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15, 12, }; #define MPC8_RES_SIZE 17 #define MPC8_RES_BITS 9 static const uint8_t mpc8_res_codes[2][MPC8_RES_SIZE] = { { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x01, }, { 0x01, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x02, 0x03, 0x01, 0x01, 0x01, 0x01, 0x03, } }; static const int8_t mpc8_res_bits[2][MPC8_RES_SIZE] = { { 1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 16, 8, 3, }, { 2, 2, 3, 5, 7, 8, 10, 12, 14, 14, 14, 14, 11, 9, 6, 4, 2, } }; #define MPC8_Q1_SIZE 19 #define MPC8_Q1_BITS 9 static const uint8_t mpc8_q1_codes[MPC8_Q1_SIZE] = { 0x01, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x03, 0x04, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, }; static const int8_t mpc8_q1_bits[MPC8_Q1_SIZE] = { 6, 4, 4, 3, 3, 3, 3, 3, 4, 4, 4, 5, 7, 8, 9, 10, 11, 12, 12, }; #define MPC8_Q9UP_SIZE 256 #define MPC8_Q9UP_BITS 9 static const uint8_t mpc8_q9up_codes[MPC8_Q9UP_SIZE] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x26, 0x27, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x28, 0x26, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x3E, 0x3F, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x6B, 0x7B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, }; static const int8_t mpc8_q9up_bits[MPC8_Q9UP_SIZE] = { 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, }; #define MPC8_Q2_SIZE 125 #define MPC8_Q2_BITS 9 static const uint8_t mpc8_q2_codes[2][MPC8_Q2_SIZE] = { { 0x02, 0x03, 0x0F, 0x04, 0x00, 0x05, 0x0C, 0x12, 0x0D, 0x06, 0x07, 0x13, 0x15, 0x14, 0x08, 0x09, 0x0E, 0x15, 0x0F, 0x0A, 0x03, 0x0B, 0x10, 0x0C, 0x01, 0x0D, 0x10, 0x16, 0x11, 0x0E, 0x12, 0x0F, 0x10, 0x16, 0x13, 0x17, 0x11, 0x08, 0x12, 0x18, 0x14, 0x13, 0x14, 0x17, 0x15, 0x0F, 0x16, 0x19, 0x17, 0x10, 0x11, 0x1A, 0x18, 0x1B, 0x12, 0x1C, 0x15, 0x09, 0x16, 0x1D, 0x19, 0x0A, 0x07, 0x0B, 0x1A, 0x1E, 0x17, 0x0C, 0x18, 0x1F, 0x13, 0x20, 0x1B, 0x21, 0x14, 0x11, 0x18, 0x22, 0x19, 0x12, 0x1A, 0x19, 0x1A, 0x1B, 0x1B, 0x23, 0x1C, 0x0D, 0x1D, 0x24, 0x1C, 0x1C, 0x1E, 0x1F, 0x1D, 0x13, 0x1E, 0x25, 0x1F, 0x14, 0x02, 0x15, 0x15, 0x16, 0x04, 0x17, 0x20, 0x26, 0x21, 0x18, 0x16, 0x27, 0x1D, 0x28, 0x19, 0x1A, 0x22, 0x29, 0x23, 0x1B, 0x03, 0x1C, 0x17, 0x1D, 0x05, }, { 0x02, 0x03, 0x0F, 0x04, 0x00, 0x05, 0x0C, 0x0D, 0x0E, 0x06, 0x07, 0x0F, 0x1E, 0x10, 0x10, 0x08, 0x11, 0x12, 0x13, 0x09, 0x03, 0x0A, 0x11, 0x0B, 0x01, 0x0C, 0x14, 0x15, 0x16, 0x0D, 0x17, 0x12, 0x0E, 0x13, 0x18, 0x19, 0x14, 0x0F, 0x10, 0x1A, 0x1B, 0x15, 0x11, 0x16, 0x1C, 0x0E, 0x1D, 0x1E, 0x1F, 0x0F, 0x12, 0x20, 0x1F, 0x21, 0x13, 0x22, 0x12, 0x13, 0x14, 0x23, 0x20, 0x15, 0x0F, 0x16, 0x21, 0x24, 0x17, 0x18, 0x19, 0x25, 0x14, 0x26, 0x22, 0x27, 0x15, 0x10, 0x28, 0x29, 0x2A, 0x11, 0x2B, 0x17, 0x1A, 0x18, 0x2C, 0x2D, 0x1B, 0x1C, 0x19, 0x2E, 0x2F, 0x1A, 0x1D, 0x1B, 0x30, 0x12, 0x31, 0x32, 0x33, 0x13, 0x02, 0x14, 0x15, 0x16, 0x04, 0x17, 0x34, 0x35, 0x36, 0x18, 0x16, 0x37, 0x23, 0x38, 0x19, 0x1A, 0x39, 0x3A, 0x3B, 0x1B, 0x03, 0x1C, 0x17, 0x1D, 0x05, } }; static const int8_t mpc8_q2_bits[2][MPC8_Q2_SIZE] = { { 12, 11, 10, 11, 13, 11, 9, 8, 9, 11, 11, 8, 7, 8, 11, 11, 9, 8, 9, 11, 12, 11, 10, 11, 13, 11, 9, 8, 9, 11, 9, 6, 6, 7, 9, 8, 6, 4, 6, 8, 9, 6, 6, 7, 9, 11, 9, 8, 9, 11, 10, 8, 7, 8, 10, 8, 6, 4, 6, 8, 7, 4, 3, 4, 7, 8, 6, 4, 6, 8, 10, 8, 7, 8, 10, 11, 9, 8, 9, 11, 9, 6, 6, 6, 9, 8, 6, 4, 6, 8, 9, 7, 6, 6, 9, 11, 9, 8, 9, 11, 13, 11, 10, 11, 12, 11, 9, 8, 9, 11, 10, 8, 7, 8, 11, 11, 9, 8, 9, 11, 13, 11, 10, 11, 12, }, { 11, 10, 9, 10, 12, 10, 8, 8, 8, 10, 10, 8, 7, 8, 9, 10, 8, 8, 8, 10, 11, 10, 9, 10, 12, 10, 8, 8, 8, 10, 8, 6, 5, 6, 8, 8, 6, 5, 5, 8, 8, 6, 5, 6, 8, 10, 8, 8, 8, 10, 9, 8, 7, 8, 9, 8, 5, 5, 5, 8, 7, 5, 4, 5, 7, 8, 5, 5, 5, 8, 9, 8, 7, 8, 9, 10, 8, 8, 8, 10, 8, 6, 5, 6, 8, 8, 5, 5, 6, 8, 8, 6, 5, 6, 8, 10, 8, 8, 8, 10, 12, 10, 10, 10, 11, 10, 8, 8, 8, 10, 9, 8, 7, 8, 10, 10, 8, 8, 8, 10, 12, 10, 9, 10, 11, } }; #define MPC8_Q3_SIZE 49 #define MPC8_Q3_BITS 9 #define MPC8_Q3_OFFSET -48 static const uint8_t mpc8_q3_codes[MPC8_Q3_SIZE] = { 0x07, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x13, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x03, 0x02, 0x01, 0x00, }; static const int8_t mpc8_q3_bits[MPC8_Q3_SIZE] = { 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, }; static const int8_t mpc8_q3_syms[MPC8_Q3_SIZE] = { 48, 65, 64, 49, 63, 32, 47, 80, 79, 50, 62, 33, 16, 82, 81, 95, 94, 66, 78, 34, 46, 17, 31, 30, 97, 96, 111, 67, 77, 51, 61, 35, 45, 18, 1, 0, 15, 98, 110, 83, 93, 19, 29, 2, 14, 99, 109, 3, 13, }; #define MPC8_Q4_SIZE 81 #define MPC8_Q4_BITS 9 #define MPC8_Q4_OFFSET -64 static const uint8_t mpc8_q4_codes[MPC8_Q4_SIZE] = { 0x0F, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x23, 0x22, 0x21, 0x20, 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x03, 0x02, 0x01, 0x00, }; static const int8_t mpc8_q4_bits[MPC8_Q4_SIZE] = { 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, }; static const int8_t mpc8_q4_syms[MPC8_Q4_SIZE] = { 64, 96, 81, 80, 95, 66, 65, 79, 78, 49, 48, 63, 32, 113, 112, 98, 97, 111, 110, 83, 82, 94, 93, 67, 77, 51, 50, 62, 61, 34, 33, 47, 46, 17, 16, 31, 128, 114, 127, 126, 99, 109, 68, 76, 35, 45, 18, 30, 0, 15, 130, 129, 143, 142, 115, 125, 100, 108, 84, 92, 52, 60, 36, 44, 19, 29, 2, 1, 14, 131, 141, 116, 124, 20, 28, 3, 13, 132, 140, 4, 12, }; #define MPC8_Q5_SIZE 15 #define MPC8_Q5_BITS 7 #define MPC8_Q5_OFFSET -7 static const uint8_t mpc8_q5_codes[2][MPC8_Q5_SIZE] = { { 0x00, 0x01, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x04, 0x05, 0x03, 0x03, 0x03, 0x02, 0x03, }, { 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, 0x03, 0x02, 0x03, } }; static const int8_t mpc8_q5_bits[2][MPC8_Q5_SIZE] = { { 7, 7, 6, 5, 4, 3, 3, 2, 3, 3, 4, 5, 6, 7, 7, }, { 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 4, 5, 6, 6, } }; #define MPC8_Q6_SIZE 31 #define MPC8_Q6_BITS 9 #define MPC8_Q6_OFFSET -15 static const uint8_t mpc8_q6_codes[2][MPC8_Q6_SIZE] = { { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x04, 0x03, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x04, 0x03, 0x05, 0x06, 0x07, 0x07, 0x06, 0x07, 0x08, 0x09, 0x05, 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, }, { 0x00, 0x01, 0x02, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x07, 0x08, 0x09, 0x06, 0x07, 0x05, 0x06, 0x07, 0x02, 0x03, } }; static const int8_t mpc8_q6_bits[2][MPC8_Q6_SIZE] = { { 9, 9, 9, 9, 8, 8, 7, 6, 6, 6, 5, 5, 4, 4, 3, 2, 3, 4, 4, 5, 6, 6, 6, 6, 7, 8, 8, 9, 9, 9, 9, }, { 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7, 8, 8, } }; #define MPC8_Q7_SIZE 63 #define MPC8_Q7_BITS 9 #define MPC8_Q7_OFFSET -31 static const uint8_t mpc8_q7_codes[2][MPC8_Q7_SIZE] = { { 0x00, 0x01, 0x02, 0x08, 0x09, 0x03, 0x04, 0x05, 0x06, 0x07, 0x0A, 0x0B, 0x0C, 0x0D, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0A, 0x0B, 0x0C, 0x08, 0x09, 0x06, 0x04, 0x03, 0x05, 0x07, 0x0A, 0x0B, 0x0D, 0x0E, 0x0F, 0x0F, 0x10, 0x11, 0x12, 0x0F, 0x13, 0x10, 0x11, 0x12, 0x13, 0x0E, 0x0F, 0x10, 0x11, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x12, 0x13, 0x0D, 0x0E, 0x0F, }, { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x02, 0x03, } }; static const int8_t mpc8_q7_bits[2][MPC8_Q7_SIZE] = { { 10, 10, 10, 9, 9, 10, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 4, 3, 2, 3, 4, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 9, 9, 10, 10, 10, }, { 9, 9, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, } }; #define MPC8_Q8_SIZE 127 #define MPC8_Q8_BITS 9 #define MPC8_Q8_OFFSET -63 static const uint8_t mpc8_q8_codes[2][MPC8_Q8_SIZE] = { { 0x03, 0x04, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x1A, 0x0F, 0x1B, 0x10, 0x00, 0x01, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x11, 0x0C, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1C, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x19, 0x25, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x14, 0x15, 0x16, 0x17, 0x0E, 0x0F, 0x10, 0x11, 0x0B, 0x07, 0x04, 0x03, 0x05, 0x0C, 0x0D, 0x12, 0x13, 0x14, 0x15, 0x18, 0x19, 0x1A, 0x1B, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x1F, 0x20, 0x2F, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x0D, 0x0E, 0x2A, 0x0F, 0x10, 0x11, 0x12, 0x02, 0x13, 0x03, 0x04, 0x05, 0x2B, 0x2C, 0x30, 0x31, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, }, { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x31, 0x3F, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x04, 0x05, 0x06, 0x07, } }; static const int8_t mpc8_q8_bits[2][MPC8_Q8_SIZE] = { { 11, 11, 10, 10, 10, 10, 10, 9, 10, 9, 10, 12, 12, 11, 11, 11, 11, 11, 11, 11, 10, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 3, 2, 3, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 10, 11, 11, 11, 11, 12, 11, 12, 12, 12, 10, 10, 9, 9, 10, 10, 10, 10, 10, 10, 10, }, { 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, } }; #endif /* AVCODEC_MPC8HUFF_H */
123linslouis-android-video-cutter
jni/libavcodec/mpc8huff.h
C
asf20
18,596
/** * @file * VP5 and VP6 compatible video decoder (common data) * * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "vp56data.h" const uint8_t vp56_b2p[] = { 0, 0, 0, 0, 1, 2, 3, 3, 3, 3 }; const uint8_t vp56_b6to4[] = { 0, 0, 1, 1, 2, 3 }; const uint8_t vp56_coeff_parse_table[6][11] = { { 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 145, 165, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 140, 148, 173, 0, 0, 0, 0, 0, 0, 0, 0 }, { 135, 140, 155, 176, 0, 0, 0, 0, 0, 0, 0 }, { 130, 134, 141, 157, 180, 0, 0, 0, 0, 0, 0 }, { 129, 130, 133, 140, 153, 177, 196, 230, 243, 254, 254 }, }; const uint8_t vp56_def_mb_types_stats[3][10][2] = { { { 69, 42 }, { 1, 2 }, { 1, 7 }, { 44, 42 }, { 6, 22 }, { 1, 3 }, { 0, 2 }, { 1, 5 }, { 0, 1 }, { 0, 0 }, }, { { 229, 8 }, { 1, 1 }, { 0, 8 }, { 0, 0 }, { 0, 0 }, { 1, 2 }, { 0, 1 }, { 0, 0 }, { 1, 1 }, { 0, 0 }, }, { { 122, 35 }, { 1, 1 }, { 1, 6 }, { 46, 34 }, { 0, 0 }, { 1, 2 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, }; const VP56Tree vp56_pva_tree[] = { { 8, 0}, { 4, 1}, { 2, 2}, {-0}, {-1}, { 2, 3}, {-2}, {-3}, { 4, 4}, { 2, 5}, {-4}, {-5}, { 2, 6}, {-6}, {-7}, }; const VP56Tree vp56_pc_tree[] = { { 4, 6}, { 2, 7}, {-0}, {-1}, { 4, 8}, { 2, 9}, {-2}, {-3}, { 2,10}, {-4}, {-5}, }; const uint8_t vp56_coeff_bias[] = { 0, 1, 2, 3, 4, 5, 7, 11, 19, 35, 67 }; const uint8_t vp56_coeff_bit_length[] = { 0, 1, 2, 3, 4, 10 };
123linslouis-android-video-cutter
jni/libavcodec/vp56data.c
C
asf20
2,400
/* * MPEG Audio decoder * Copyright (c) 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MPEG Audio decoder. */ #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" /* * TODO: * - in low precision mode, use more 16 bit multiplies in synth filter * - test lsf / mpeg25 extensively. */ #include "mpegaudio.h" #include "mpegaudiodecheader.h" #include "mathops.h" /* WARNING: only correct for posititive numbers */ #define FIXR(a) ((int)((a) * FRAC_ONE + 0.5)) #define FRAC_RND(a) (((a) + (FRAC_ONE/2)) >> FRAC_BITS) #define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5)) /****************/ #define HEADER_SIZE 4 #include "mpegaudiodata.h" #include "mpegaudiodectab.h" static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g); static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g); /* vlc structure for decoding layer 3 huffman tables */ static VLC huff_vlc[16]; static VLC_TYPE huff_vlc_tables[ 0+128+128+128+130+128+154+166+ 142+204+190+170+542+460+662+414 ][2]; static const int huff_vlc_tables_sizes[16] = { 0, 128, 128, 128, 130, 128, 154, 166, 142, 204, 190, 170, 542, 460, 662, 414 }; static VLC huff_quad_vlc[2]; static VLC_TYPE huff_quad_vlc_tables[128+16][2]; static const int huff_quad_vlc_tables_sizes[2] = { 128, 16 }; /* computed from band_size_long */ static uint16_t band_index_long[9][23]; #include "mpegaudio_tablegen.h" /* intensity stereo coef table */ static int32_t is_table[2][16]; static int32_t is_table_lsf[2][2][16]; static int32_t csa_table[8][4]; static float csa_table_float[8][4]; static int32_t mdct_win[8][36]; /* lower 2 bits: modulo 3, higher bits: shift */ static uint16_t scale_factor_modshift[64]; /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */ static int32_t scale_factor_mult[15][3]; /* mult table for layer 2 group quantization */ #define SCALE_GEN(v) \ { FIXR(1.0 * (v)), FIXR(0.7937005259 * (v)), FIXR(0.6299605249 * (v)) } static const int32_t scale_factor_mult2[3][3] = { SCALE_GEN(4.0 / 3.0), /* 3 steps */ SCALE_GEN(4.0 / 5.0), /* 5 steps */ SCALE_GEN(4.0 / 9.0), /* 9 steps */ }; DECLARE_ALIGNED(16, MPA_INT, ff_mpa_synth_window)[512]; /** * Convert region offsets to region sizes and truncate * size to big_values. */ static void ff_region_offset2size(GranuleDef *g){ int i, k, j=0; g->region_size[2] = (576 / 2); for(i=0;i<3;i++) { k = FFMIN(g->region_size[i], g->big_values); g->region_size[i] = k - j; j = k; } } static void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){ if (g->block_type == 2) g->region_size[0] = (36 / 2); else { if (s->sample_rate_index <= 2) g->region_size[0] = (36 / 2); else if (s->sample_rate_index != 8) g->region_size[0] = (54 / 2); else g->region_size[0] = (108 / 2); } g->region_size[1] = (576 / 2); } static void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){ int l; g->region_size[0] = band_index_long[s->sample_rate_index][ra1 + 1] >> 1; /* should not overflow */ l = FFMIN(ra1 + ra2 + 2, 22); g->region_size[1] = band_index_long[s->sample_rate_index][l] >> 1; } static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){ if (g->block_type == 2) { if (g->switch_point) { /* if switched mode, we handle the 36 first samples as long blocks. For 8000Hz, we handle the 48 first exponents as long blocks (XXX: check this!) */ if (s->sample_rate_index <= 2) g->long_end = 8; else if (s->sample_rate_index != 8) g->long_end = 6; else g->long_end = 4; /* 8000 Hz */ g->short_start = 2 + (s->sample_rate_index != 8); } else { g->long_end = 0; g->short_start = 0; } } else { g->short_start = 13; g->long_end = 22; } } /* layer 1 unscaling */ /* n = number of bits of the mantissa minus 1 */ static inline int l1_unscale(int n, int mant, int scale_factor) { int shift, mod; int64_t val; shift = scale_factor_modshift[scale_factor]; mod = shift & 3; shift >>= 2; val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]); shift += n; /* NOTE: at this point, 1 <= shift >= 21 + 15 */ return (int)((val + (1LL << (shift - 1))) >> shift); } static inline int l2_unscale_group(int steps, int mant, int scale_factor) { int shift, mod, val; shift = scale_factor_modshift[scale_factor]; mod = shift & 3; shift >>= 2; val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod]; /* NOTE: at this point, 0 <= shift <= 21 */ if (shift > 0) val = (val + (1 << (shift - 1))) >> shift; return val; } /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */ static inline int l3_unscale(int value, int exponent) { unsigned int m; int e; e = table_4_3_exp [4*value + (exponent&3)]; m = table_4_3_value[4*value + (exponent&3)]; e -= (exponent >> 2); assert(e>=1); if (e > 31) return 0; m = (m + (1 << (e-1))) >> e; return m; } /* all integer n^(4/3) computation code */ #define DEV_ORDER 13 #define POW_FRAC_BITS 24 #define POW_FRAC_ONE (1 << POW_FRAC_BITS) #define POW_FIX(a) ((int)((a) * POW_FRAC_ONE)) #define POW_MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> POW_FRAC_BITS) static int dev_4_3_coefs[DEV_ORDER]; #if 0 /* unused */ static int pow_mult3[3] = { POW_FIX(1.0), POW_FIX(1.25992104989487316476), POW_FIX(1.58740105196819947474), }; #endif static av_cold void int_pow_init(void) { int i, a; a = POW_FIX(1.0); for(i=0;i<DEV_ORDER;i++) { a = POW_MULL(a, POW_FIX(4.0 / 3.0) - i * POW_FIX(1.0)) / (i + 1); dev_4_3_coefs[i] = a; } } #if 0 /* unused, remove? */ /* return the mantissa and the binary exponent */ static int int_pow(int i, int *exp_ptr) { int e, er, eq, j; int a, a1; /* renormalize */ a = i; e = POW_FRAC_BITS; while (a < (1 << (POW_FRAC_BITS - 1))) { a = a << 1; e--; } a -= (1 << POW_FRAC_BITS); a1 = 0; for(j = DEV_ORDER - 1; j >= 0; j--) a1 = POW_MULL(a, dev_4_3_coefs[j] + a1); a = (1 << POW_FRAC_BITS) + a1; /* exponent compute (exact) */ e = e * 4; er = e % 3; eq = e / 3; a = POW_MULL(a, pow_mult3[er]); while (a >= 2 * POW_FRAC_ONE) { a = a >> 1; eq++; } /* convert to float */ while (a < POW_FRAC_ONE) { a = a << 1; eq--; } /* now POW_FRAC_ONE <= a < 2 * POW_FRAC_ONE */ #if POW_FRAC_BITS > FRAC_BITS a = (a + (1 << (POW_FRAC_BITS - FRAC_BITS - 1))) >> (POW_FRAC_BITS - FRAC_BITS); /* correct overflow */ if (a >= 2 * (1 << FRAC_BITS)) { a = a >> 1; eq++; } #endif *exp_ptr = eq; return a; } #endif static av_cold int decode_init(AVCodecContext * avctx) { MPADecodeContext *s = avctx->priv_data; static int init=0; int i, j, k; s->avctx = avctx; avctx->sample_fmt= OUT_FMT; s->error_recognition= avctx->error_recognition; if(avctx->antialias_algo != FF_AA_FLOAT) s->compute_antialias= compute_antialias_integer; else s->compute_antialias= compute_antialias_float; if (!init && !avctx->parse_only) { int offset; /* scale factors table for layer 1/2 */ for(i=0;i<64;i++) { int shift, mod; /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */ shift = (i / 3); mod = i % 3; scale_factor_modshift[i] = mod | (shift << 2); } /* scale factor multiply for layer 1 */ for(i=0;i<15;i++) { int n, norm; n = i + 2; norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1); scale_factor_mult[i][0] = MULL(FIXR(1.0 * 2.0), norm, FRAC_BITS); scale_factor_mult[i][1] = MULL(FIXR(0.7937005259 * 2.0), norm, FRAC_BITS); scale_factor_mult[i][2] = MULL(FIXR(0.6299605249 * 2.0), norm, FRAC_BITS); dprintf(avctx, "%d: norm=%x s=%x %x %x\n", i, norm, scale_factor_mult[i][0], scale_factor_mult[i][1], scale_factor_mult[i][2]); } ff_mpa_synth_init(ff_mpa_synth_window); /* huffman decode tables */ offset = 0; for(i=1;i<16;i++) { const HuffTable *h = &mpa_huff_tables[i]; int xsize, x, y; uint8_t tmp_bits [512]; uint16_t tmp_codes[512]; memset(tmp_bits , 0, sizeof(tmp_bits )); memset(tmp_codes, 0, sizeof(tmp_codes)); xsize = h->xsize; j = 0; for(x=0;x<xsize;x++) { for(y=0;y<xsize;y++){ tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ]; tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++]; } } /* XXX: fail test */ huff_vlc[i].table = huff_vlc_tables+offset; huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i]; init_vlc(&huff_vlc[i], 7, 512, tmp_bits, 1, 1, tmp_codes, 2, 2, INIT_VLC_USE_NEW_STATIC); offset += huff_vlc_tables_sizes[i]; } assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables)); offset = 0; for(i=0;i<2;i++) { huff_quad_vlc[i].table = huff_quad_vlc_tables+offset; huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i]; init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16, mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); offset += huff_quad_vlc_tables_sizes[i]; } assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables)); for(i=0;i<9;i++) { k = 0; for(j=0;j<22;j++) { band_index_long[i][j] = k; k += band_size_long[i][j]; } band_index_long[i][22] = k; } /* compute n ^ (4/3) and store it in mantissa/exp format */ int_pow_init(); mpegaudio_tableinit(); for(i=0;i<7;i++) { float f; int v; if (i != 6) { f = tan((double)i * M_PI / 12.0); v = FIXR(f / (1.0 + f)); } else { v = FIXR(1.0); } is_table[0][i] = v; is_table[1][6 - i] = v; } /* invalid values */ for(i=7;i<16;i++) is_table[0][i] = is_table[1][i] = 0.0; for(i=0;i<16;i++) { double f; int e, k; for(j=0;j<2;j++) { e = -(j + 1) * ((i + 1) >> 1); f = pow(2.0, e / 4.0); k = i & 1; is_table_lsf[j][k ^ 1][i] = FIXR(f); is_table_lsf[j][k][i] = FIXR(1.0); dprintf(avctx, "is_table_lsf %d %d: %x %x\n", i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]); } } for(i=0;i<8;i++) { float ci, cs, ca; ci = ci_table[i]; cs = 1.0 / sqrt(1.0 + ci * ci); ca = cs * ci; csa_table[i][0] = FIXHR(cs/4); csa_table[i][1] = FIXHR(ca/4); csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4); csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4); csa_table_float[i][0] = cs; csa_table_float[i][1] = ca; csa_table_float[i][2] = ca + cs; csa_table_float[i][3] = ca - cs; } /* compute mdct windows */ for(i=0;i<36;i++) { for(j=0; j<4; j++){ double d; if(j==2 && i%3 != 1) continue; d= sin(M_PI * (i + 0.5) / 36.0); if(j==1){ if (i>=30) d= 0; else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0); else if(i>=18) d= 1; }else if(j==3){ if (i< 6) d= 0; else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0); else if(i< 18) d= 1; } //merge last stage of imdct into the window coefficients d*= 0.5 / cos(M_PI*(2*i + 19)/72); if(j==2) mdct_win[j][i/3] = FIXHR((d / (1<<5))); else mdct_win[j][i ] = FIXHR((d / (1<<5))); } } /* NOTE: we do frequency inversion adter the MDCT by changing the sign of the right window coefs */ for(j=0;j<4;j++) { for(i=0;i<36;i+=2) { mdct_win[j + 4][i] = mdct_win[j][i]; mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1]; } } init = 1; } if (avctx->codec_id == CODEC_ID_MP3ADU) s->adu_mode = 1; return 0; } /* tab[i][j] = 1.0 / (2.0 * cos(pi*(2*k+1) / 2^(6 - j))) */ /* cos(i*pi/64) */ #define COS0_0 FIXHR(0.50060299823519630134/2) #define COS0_1 FIXHR(0.50547095989754365998/2) #define COS0_2 FIXHR(0.51544730992262454697/2) #define COS0_3 FIXHR(0.53104259108978417447/2) #define COS0_4 FIXHR(0.55310389603444452782/2) #define COS0_5 FIXHR(0.58293496820613387367/2) #define COS0_6 FIXHR(0.62250412303566481615/2) #define COS0_7 FIXHR(0.67480834145500574602/2) #define COS0_8 FIXHR(0.74453627100229844977/2) #define COS0_9 FIXHR(0.83934964541552703873/2) #define COS0_10 FIXHR(0.97256823786196069369/2) #define COS0_11 FIXHR(1.16943993343288495515/4) #define COS0_12 FIXHR(1.48416461631416627724/4) #define COS0_13 FIXHR(2.05778100995341155085/8) #define COS0_14 FIXHR(3.40760841846871878570/8) #define COS0_15 FIXHR(10.19000812354805681150/32) #define COS1_0 FIXHR(0.50241928618815570551/2) #define COS1_1 FIXHR(0.52249861493968888062/2) #define COS1_2 FIXHR(0.56694403481635770368/2) #define COS1_3 FIXHR(0.64682178335999012954/2) #define COS1_4 FIXHR(0.78815462345125022473/2) #define COS1_5 FIXHR(1.06067768599034747134/4) #define COS1_6 FIXHR(1.72244709823833392782/4) #define COS1_7 FIXHR(5.10114861868916385802/16) #define COS2_0 FIXHR(0.50979557910415916894/2) #define COS2_1 FIXHR(0.60134488693504528054/2) #define COS2_2 FIXHR(0.89997622313641570463/2) #define COS2_3 FIXHR(2.56291544774150617881/8) #define COS3_0 FIXHR(0.54119610014619698439/2) #define COS3_1 FIXHR(1.30656296487637652785/4) #define COS4_0 FIXHR(0.70710678118654752439/2) /* butterfly operator */ #define BF(a, b, c, s)\ {\ tmp0 = tab[a] + tab[b];\ tmp1 = tab[a] - tab[b];\ tab[a] = tmp0;\ tab[b] = MULH(tmp1<<(s), c);\ } #define BF1(a, b, c, d)\ {\ BF(a, b, COS4_0, 1);\ BF(c, d,-COS4_0, 1);\ tab[c] += tab[d];\ } #define BF2(a, b, c, d)\ {\ BF(a, b, COS4_0, 1);\ BF(c, d,-COS4_0, 1);\ tab[c] += tab[d];\ tab[a] += tab[c];\ tab[c] += tab[b];\ tab[b] += tab[d];\ } #define ADD(a, b) tab[a] += tab[b] /* DCT32 without 1/sqrt(2) coef zero scaling. */ static void dct32(int32_t *out, int32_t *tab) { int tmp0, tmp1; /* pass 1 */ BF( 0, 31, COS0_0 , 1); BF(15, 16, COS0_15, 5); /* pass 2 */ BF( 0, 15, COS1_0 , 1); BF(16, 31,-COS1_0 , 1); /* pass 1 */ BF( 7, 24, COS0_7 , 1); BF( 8, 23, COS0_8 , 1); /* pass 2 */ BF( 7, 8, COS1_7 , 4); BF(23, 24,-COS1_7 , 4); /* pass 3 */ BF( 0, 7, COS2_0 , 1); BF( 8, 15,-COS2_0 , 1); BF(16, 23, COS2_0 , 1); BF(24, 31,-COS2_0 , 1); /* pass 1 */ BF( 3, 28, COS0_3 , 1); BF(12, 19, COS0_12, 2); /* pass 2 */ BF( 3, 12, COS1_3 , 1); BF(19, 28,-COS1_3 , 1); /* pass 1 */ BF( 4, 27, COS0_4 , 1); BF(11, 20, COS0_11, 2); /* pass 2 */ BF( 4, 11, COS1_4 , 1); BF(20, 27,-COS1_4 , 1); /* pass 3 */ BF( 3, 4, COS2_3 , 3); BF(11, 12,-COS2_3 , 3); BF(19, 20, COS2_3 , 3); BF(27, 28,-COS2_3 , 3); /* pass 4 */ BF( 0, 3, COS3_0 , 1); BF( 4, 7,-COS3_0 , 1); BF( 8, 11, COS3_0 , 1); BF(12, 15,-COS3_0 , 1); BF(16, 19, COS3_0 , 1); BF(20, 23,-COS3_0 , 1); BF(24, 27, COS3_0 , 1); BF(28, 31,-COS3_0 , 1); /* pass 1 */ BF( 1, 30, COS0_1 , 1); BF(14, 17, COS0_14, 3); /* pass 2 */ BF( 1, 14, COS1_1 , 1); BF(17, 30,-COS1_1 , 1); /* pass 1 */ BF( 6, 25, COS0_6 , 1); BF( 9, 22, COS0_9 , 1); /* pass 2 */ BF( 6, 9, COS1_6 , 2); BF(22, 25,-COS1_6 , 2); /* pass 3 */ BF( 1, 6, COS2_1 , 1); BF( 9, 14,-COS2_1 , 1); BF(17, 22, COS2_1 , 1); BF(25, 30,-COS2_1 , 1); /* pass 1 */ BF( 2, 29, COS0_2 , 1); BF(13, 18, COS0_13, 3); /* pass 2 */ BF( 2, 13, COS1_2 , 1); BF(18, 29,-COS1_2 , 1); /* pass 1 */ BF( 5, 26, COS0_5 , 1); BF(10, 21, COS0_10, 1); /* pass 2 */ BF( 5, 10, COS1_5 , 2); BF(21, 26,-COS1_5 , 2); /* pass 3 */ BF( 2, 5, COS2_2 , 1); BF(10, 13,-COS2_2 , 1); BF(18, 21, COS2_2 , 1); BF(26, 29,-COS2_2 , 1); /* pass 4 */ BF( 1, 2, COS3_1 , 2); BF( 5, 6,-COS3_1 , 2); BF( 9, 10, COS3_1 , 2); BF(13, 14,-COS3_1 , 2); BF(17, 18, COS3_1 , 2); BF(21, 22,-COS3_1 , 2); BF(25, 26, COS3_1 , 2); BF(29, 30,-COS3_1 , 2); /* pass 5 */ BF1( 0, 1, 2, 3); BF2( 4, 5, 6, 7); BF1( 8, 9, 10, 11); BF2(12, 13, 14, 15); BF1(16, 17, 18, 19); BF2(20, 21, 22, 23); BF1(24, 25, 26, 27); BF2(28, 29, 30, 31); /* pass 6 */ ADD( 8, 12); ADD(12, 10); ADD(10, 14); ADD(14, 9); ADD( 9, 13); ADD(13, 11); ADD(11, 15); out[ 0] = tab[0]; out[16] = tab[1]; out[ 8] = tab[2]; out[24] = tab[3]; out[ 4] = tab[4]; out[20] = tab[5]; out[12] = tab[6]; out[28] = tab[7]; out[ 2] = tab[8]; out[18] = tab[9]; out[10] = tab[10]; out[26] = tab[11]; out[ 6] = tab[12]; out[22] = tab[13]; out[14] = tab[14]; out[30] = tab[15]; ADD(24, 28); ADD(28, 26); ADD(26, 30); ADD(30, 25); ADD(25, 29); ADD(29, 27); ADD(27, 31); out[ 1] = tab[16] + tab[24]; out[17] = tab[17] + tab[25]; out[ 9] = tab[18] + tab[26]; out[25] = tab[19] + tab[27]; out[ 5] = tab[20] + tab[28]; out[21] = tab[21] + tab[29]; out[13] = tab[22] + tab[30]; out[29] = tab[23] + tab[31]; out[ 3] = tab[24] + tab[20]; out[19] = tab[25] + tab[21]; out[11] = tab[26] + tab[22]; out[27] = tab[27] + tab[23]; out[ 7] = tab[28] + tab[18]; out[23] = tab[29] + tab[19]; out[15] = tab[30] + tab[17]; out[31] = tab[31]; } #if FRAC_BITS <= 15 static inline int round_sample(int *sum) { int sum1; sum1 = (*sum) >> OUT_SHIFT; *sum &= (1<<OUT_SHIFT)-1; return av_clip(sum1, OUT_MIN, OUT_MAX); } /* signed 16x16 -> 32 multiply add accumulate */ #define MACS(rt, ra, rb) MAC16(rt, ra, rb) /* signed 16x16 -> 32 multiply */ #define MULS(ra, rb) MUL16(ra, rb) #define MLSS(rt, ra, rb) MLS16(rt, ra, rb) #else static inline int round_sample(int64_t *sum) { int sum1; sum1 = (int)((*sum) >> OUT_SHIFT); *sum &= (1<<OUT_SHIFT)-1; return av_clip(sum1, OUT_MIN, OUT_MAX); } # define MULS(ra, rb) MUL64(ra, rb) # define MACS(rt, ra, rb) MAC64(rt, ra, rb) # define MLSS(rt, ra, rb) MLS64(rt, ra, rb) #endif #define SUM8(op, sum, w, p) \ { \ op(sum, (w)[0 * 64], (p)[0 * 64]); \ op(sum, (w)[1 * 64], (p)[1 * 64]); \ op(sum, (w)[2 * 64], (p)[2 * 64]); \ op(sum, (w)[3 * 64], (p)[3 * 64]); \ op(sum, (w)[4 * 64], (p)[4 * 64]); \ op(sum, (w)[5 * 64], (p)[5 * 64]); \ op(sum, (w)[6 * 64], (p)[6 * 64]); \ op(sum, (w)[7 * 64], (p)[7 * 64]); \ } #define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \ { \ int tmp;\ tmp = p[0 * 64];\ op1(sum1, (w1)[0 * 64], tmp);\ op2(sum2, (w2)[0 * 64], tmp);\ tmp = p[1 * 64];\ op1(sum1, (w1)[1 * 64], tmp);\ op2(sum2, (w2)[1 * 64], tmp);\ tmp = p[2 * 64];\ op1(sum1, (w1)[2 * 64], tmp);\ op2(sum2, (w2)[2 * 64], tmp);\ tmp = p[3 * 64];\ op1(sum1, (w1)[3 * 64], tmp);\ op2(sum2, (w2)[3 * 64], tmp);\ tmp = p[4 * 64];\ op1(sum1, (w1)[4 * 64], tmp);\ op2(sum2, (w2)[4 * 64], tmp);\ tmp = p[5 * 64];\ op1(sum1, (w1)[5 * 64], tmp);\ op2(sum2, (w2)[5 * 64], tmp);\ tmp = p[6 * 64];\ op1(sum1, (w1)[6 * 64], tmp);\ op2(sum2, (w2)[6 * 64], tmp);\ tmp = p[7 * 64];\ op1(sum1, (w1)[7 * 64], tmp);\ op2(sum2, (w2)[7 * 64], tmp);\ } void av_cold ff_mpa_synth_init(MPA_INT *window) { int i; /* max = 18760, max sum over all 16 coefs : 44736 */ for(i=0;i<257;i++) { int v; v = ff_mpa_enwindow[i]; #if WFRAC_BITS < 16 v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS); #endif window[i] = v; if ((i & 63) != 0) v = -v; if (i != 0) window[512 - i] = v; } } /* 32 sub band synthesis filter. Input: 32 sub band samples, Output: 32 samples. */ /* XXX: optimize by avoiding ring buffer usage */ void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset, MPA_INT *window, int *dither_state, OUT_INT *samples, int incr, int32_t sb_samples[SBLIMIT]) { register MPA_INT *synth_buf; register const MPA_INT *w, *w2, *p; int j, offset; OUT_INT *samples2; #if FRAC_BITS <= 15 int32_t tmp[32]; int sum, sum2; #else int64_t sum, sum2; #endif offset = *synth_buf_offset; synth_buf = synth_buf_ptr + offset; #if FRAC_BITS <= 15 dct32(tmp, sb_samples); for(j=0;j<32;j++) { /* NOTE: can cause a loss in precision if very high amplitude sound */ synth_buf[j] = av_clip_int16(tmp[j]); } #else dct32(synth_buf, sb_samples); #endif /* copy to avoid wrap */ memcpy(synth_buf + 512, synth_buf, 32 * sizeof(MPA_INT)); samples2 = samples + 31 * incr; w = window; w2 = window + 31; sum = *dither_state; p = synth_buf + 16; SUM8(MACS, sum, w, p); p = synth_buf + 48; SUM8(MLSS, sum, w + 32, p); *samples = round_sample(&sum); samples += incr; w++; /* we calculate two samples at the same time to avoid one memory access per two sample */ for(j=1;j<16;j++) { sum2 = 0; p = synth_buf + 16 + j; SUM8P2(sum, MACS, sum2, MLSS, w, w2, p); p = synth_buf + 48 - j; SUM8P2(sum, MLSS, sum2, MLSS, w + 32, w2 + 32, p); *samples = round_sample(&sum); samples += incr; sum += sum2; *samples2 = round_sample(&sum); samples2 -= incr; w++; w2--; } p = synth_buf + 32; SUM8(MLSS, sum, w + 32, p); *samples = round_sample(&sum); *dither_state= sum; offset = (offset - 32) & 511; *synth_buf_offset = offset; } #define C3 FIXHR(0.86602540378443864676/2) /* 0.5 / cos(pi*(2*i+1)/36) */ static const int icos36[9] = { FIXR(0.50190991877167369479), FIXR(0.51763809020504152469), //0 FIXR(0.55168895948124587824), FIXR(0.61038729438072803416), FIXR(0.70710678118654752439), //1 FIXR(0.87172339781054900991), FIXR(1.18310079157624925896), FIXR(1.93185165257813657349), //2 FIXR(5.73685662283492756461), }; /* 0.5 / cos(pi*(2*i+1)/36) */ static const int icos36h[9] = { FIXHR(0.50190991877167369479/2), FIXHR(0.51763809020504152469/2), //0 FIXHR(0.55168895948124587824/2), FIXHR(0.61038729438072803416/2), FIXHR(0.70710678118654752439/2), //1 FIXHR(0.87172339781054900991/2), FIXHR(1.18310079157624925896/4), FIXHR(1.93185165257813657349/4), //2 // FIXHR(5.73685662283492756461), }; /* 12 points IMDCT. We compute it "by hand" by factorizing obvious cases. */ static void imdct12(int *out, int *in) { int in0, in1, in2, in3, in4, in5, t1, t2; in0= in[0*3]; in1= in[1*3] + in[0*3]; in2= in[2*3] + in[1*3]; in3= in[3*3] + in[2*3]; in4= in[4*3] + in[3*3]; in5= in[5*3] + in[4*3]; in5 += in3; in3 += in1; in2= MULH(2*in2, C3); in3= MULH(4*in3, C3); t1 = in0 - in4; t2 = MULH(2*(in1 - in5), icos36h[4]); out[ 7]= out[10]= t1 + t2; out[ 1]= out[ 4]= t1 - t2; in0 += in4>>1; in4 = in0 + in2; in5 += 2*in1; in1 = MULH(in5 + in3, icos36h[1]); out[ 8]= out[ 9]= in4 + in1; out[ 2]= out[ 3]= in4 - in1; in0 -= in2; in5 = MULH(2*(in5 - in3), icos36h[7]); out[ 0]= out[ 5]= in0 - in5; out[ 6]= out[11]= in0 + in5; } /* cos(pi*i/18) */ #define C1 FIXHR(0.98480775301220805936/2) #define C2 FIXHR(0.93969262078590838405/2) #define C3 FIXHR(0.86602540378443864676/2) #define C4 FIXHR(0.76604444311897803520/2) #define C5 FIXHR(0.64278760968653932632/2) #define C6 FIXHR(0.5/2) #define C7 FIXHR(0.34202014332566873304/2) #define C8 FIXHR(0.17364817766693034885/2) /* using Lee like decomposition followed by hand coded 9 points DCT */ static void imdct36(int *out, int *buf, int *in, int *win) { int i, j, t0, t1, t2, t3, s0, s1, s2, s3; int tmp[18], *tmp1, *in1; for(i=17;i>=1;i--) in[i] += in[i-1]; for(i=17;i>=3;i-=2) in[i] += in[i-2]; for(j=0;j<2;j++) { tmp1 = tmp + j; in1 = in + j; #if 0 //more accurate but slower int64_t t0, t1, t2, t3; t2 = in1[2*4] + in1[2*8] - in1[2*2]; t3 = (in1[2*0] + (int64_t)(in1[2*6]>>1))<<32; t1 = in1[2*0] - in1[2*6]; tmp1[ 6] = t1 - (t2>>1); tmp1[16] = t1 + t2; t0 = MUL64(2*(in1[2*2] + in1[2*4]), C2); t1 = MUL64( in1[2*4] - in1[2*8] , -2*C8); t2 = MUL64(2*(in1[2*2] + in1[2*8]), -C4); tmp1[10] = (t3 - t0 - t2) >> 32; tmp1[ 2] = (t3 + t0 + t1) >> 32; tmp1[14] = (t3 + t2 - t1) >> 32; tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3); t2 = MUL64(2*(in1[2*1] + in1[2*5]), C1); t3 = MUL64( in1[2*5] - in1[2*7] , -2*C7); t0 = MUL64(2*in1[2*3], C3); t1 = MUL64(2*(in1[2*1] + in1[2*7]), -C5); tmp1[ 0] = (t2 + t3 + t0) >> 32; tmp1[12] = (t2 + t1 - t0) >> 32; tmp1[ 8] = (t3 - t1 - t0) >> 32; #else t2 = in1[2*4] + in1[2*8] - in1[2*2]; t3 = in1[2*0] + (in1[2*6]>>1); t1 = in1[2*0] - in1[2*6]; tmp1[ 6] = t1 - (t2>>1); tmp1[16] = t1 + t2; t0 = MULH(2*(in1[2*2] + in1[2*4]), C2); t1 = MULH( in1[2*4] - in1[2*8] , -2*C8); t2 = MULH(2*(in1[2*2] + in1[2*8]), -C4); tmp1[10] = t3 - t0 - t2; tmp1[ 2] = t3 + t0 + t1; tmp1[14] = t3 + t2 - t1; tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3); t2 = MULH(2*(in1[2*1] + in1[2*5]), C1); t3 = MULH( in1[2*5] - in1[2*7] , -2*C7); t0 = MULH(2*in1[2*3], C3); t1 = MULH(2*(in1[2*1] + in1[2*7]), -C5); tmp1[ 0] = t2 + t3 + t0; tmp1[12] = t2 + t1 - t0; tmp1[ 8] = t3 - t1 - t0; #endif } i = 0; for(j=0;j<4;j++) { t0 = tmp[i]; t1 = tmp[i + 2]; s0 = t1 + t0; s2 = t1 - t0; t2 = tmp[i + 1]; t3 = tmp[i + 3]; s1 = MULH(2*(t3 + t2), icos36h[j]); s3 = MULL(t3 - t2, icos36[8 - j], FRAC_BITS); t0 = s0 + s1; t1 = s0 - s1; out[(9 + j)*SBLIMIT] = MULH(t1, win[9 + j]) + buf[9 + j]; out[(8 - j)*SBLIMIT] = MULH(t1, win[8 - j]) + buf[8 - j]; buf[9 + j] = MULH(t0, win[18 + 9 + j]); buf[8 - j] = MULH(t0, win[18 + 8 - j]); t0 = s2 + s3; t1 = s2 - s3; out[(9 + 8 - j)*SBLIMIT] = MULH(t1, win[9 + 8 - j]) + buf[9 + 8 - j]; out[( j)*SBLIMIT] = MULH(t1, win[ j]) + buf[ j]; buf[9 + 8 - j] = MULH(t0, win[18 + 9 + 8 - j]); buf[ + j] = MULH(t0, win[18 + j]); i += 4; } s0 = tmp[16]; s1 = MULH(2*tmp[17], icos36h[4]); t0 = s0 + s1; t1 = s0 - s1; out[(9 + 4)*SBLIMIT] = MULH(t1, win[9 + 4]) + buf[9 + 4]; out[(8 - 4)*SBLIMIT] = MULH(t1, win[8 - 4]) + buf[8 - 4]; buf[9 + 4] = MULH(t0, win[18 + 9 + 4]); buf[8 - 4] = MULH(t0, win[18 + 8 - 4]); } /* return the number of decoded frames */ static int mp_decode_layer1(MPADecodeContext *s) { int bound, i, v, n, ch, j, mant; uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT]; uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT]; if (s->mode == MPA_JSTEREO) bound = (s->mode_ext + 1) * 4; else bound = SBLIMIT; /* allocation bits */ for(i=0;i<bound;i++) { for(ch=0;ch<s->nb_channels;ch++) { allocation[ch][i] = get_bits(&s->gb, 4); } } for(i=bound;i<SBLIMIT;i++) { allocation[0][i] = get_bits(&s->gb, 4); } /* scale factors */ for(i=0;i<bound;i++) { for(ch=0;ch<s->nb_channels;ch++) { if (allocation[ch][i]) scale_factors[ch][i] = get_bits(&s->gb, 6); } } for(i=bound;i<SBLIMIT;i++) { if (allocation[0][i]) { scale_factors[0][i] = get_bits(&s->gb, 6); scale_factors[1][i] = get_bits(&s->gb, 6); } } /* compute samples */ for(j=0;j<12;j++) { for(i=0;i<bound;i++) { for(ch=0;ch<s->nb_channels;ch++) { n = allocation[ch][i]; if (n) { mant = get_bits(&s->gb, n + 1); v = l1_unscale(n, mant, scale_factors[ch][i]); } else { v = 0; } s->sb_samples[ch][j][i] = v; } } for(i=bound;i<SBLIMIT;i++) { n = allocation[0][i]; if (n) { mant = get_bits(&s->gb, n + 1); v = l1_unscale(n, mant, scale_factors[0][i]); s->sb_samples[0][j][i] = v; v = l1_unscale(n, mant, scale_factors[1][i]); s->sb_samples[1][j][i] = v; } else { s->sb_samples[0][j][i] = 0; s->sb_samples[1][j][i] = 0; } } } return 12; } static int mp_decode_layer2(MPADecodeContext *s) { int sblimit; /* number of used subbands */ const unsigned char *alloc_table; int table, bit_alloc_bits, i, j, ch, bound, v; unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT]; unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]; unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf; int scale, qindex, bits, steps, k, l, m, b; /* select decoding table */ table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels, s->sample_rate, s->lsf); sblimit = ff_mpa_sblimit_table[table]; alloc_table = ff_mpa_alloc_tables[table]; if (s->mode == MPA_JSTEREO) bound = (s->mode_ext + 1) * 4; else bound = sblimit; dprintf(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit); /* sanity check */ if( bound > sblimit ) bound = sblimit; /* parse bit allocation */ j = 0; for(i=0;i<bound;i++) { bit_alloc_bits = alloc_table[j]; for(ch=0;ch<s->nb_channels;ch++) { bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits); } j += 1 << bit_alloc_bits; } for(i=bound;i<sblimit;i++) { bit_alloc_bits = alloc_table[j]; v = get_bits(&s->gb, bit_alloc_bits); bit_alloc[0][i] = v; bit_alloc[1][i] = v; j += 1 << bit_alloc_bits; } /* scale codes */ for(i=0;i<sblimit;i++) { for(ch=0;ch<s->nb_channels;ch++) { if (bit_alloc[ch][i]) scale_code[ch][i] = get_bits(&s->gb, 2); } } /* scale factors */ for(i=0;i<sblimit;i++) { for(ch=0;ch<s->nb_channels;ch++) { if (bit_alloc[ch][i]) { sf = scale_factors[ch][i]; switch(scale_code[ch][i]) { default: case 0: sf[0] = get_bits(&s->gb, 6); sf[1] = get_bits(&s->gb, 6); sf[2] = get_bits(&s->gb, 6); break; case 2: sf[0] = get_bits(&s->gb, 6); sf[1] = sf[0]; sf[2] = sf[0]; break; case 1: sf[0] = get_bits(&s->gb, 6); sf[2] = get_bits(&s->gb, 6); sf[1] = sf[0]; break; case 3: sf[0] = get_bits(&s->gb, 6); sf[2] = get_bits(&s->gb, 6); sf[1] = sf[2]; break; } } } } /* samples */ for(k=0;k<3;k++) { for(l=0;l<12;l+=3) { j = 0; for(i=0;i<bound;i++) { bit_alloc_bits = alloc_table[j]; for(ch=0;ch<s->nb_channels;ch++) { b = bit_alloc[ch][i]; if (b) { scale = scale_factors[ch][i][k]; qindex = alloc_table[j+b]; bits = ff_mpa_quant_bits[qindex]; if (bits < 0) { /* 3 values at the same time */ v = get_bits(&s->gb, -bits); steps = ff_mpa_quant_steps[qindex]; s->sb_samples[ch][k * 12 + l + 0][i] = l2_unscale_group(steps, v % steps, scale); v = v / steps; s->sb_samples[ch][k * 12 + l + 1][i] = l2_unscale_group(steps, v % steps, scale); v = v / steps; s->sb_samples[ch][k * 12 + l + 2][i] = l2_unscale_group(steps, v, scale); } else { for(m=0;m<3;m++) { v = get_bits(&s->gb, bits); v = l1_unscale(bits - 1, v, scale); s->sb_samples[ch][k * 12 + l + m][i] = v; } } } else { s->sb_samples[ch][k * 12 + l + 0][i] = 0; s->sb_samples[ch][k * 12 + l + 1][i] = 0; s->sb_samples[ch][k * 12 + l + 2][i] = 0; } } /* next subband in alloc table */ j += 1 << bit_alloc_bits; } /* XXX: find a way to avoid this duplication of code */ for(i=bound;i<sblimit;i++) { bit_alloc_bits = alloc_table[j]; b = bit_alloc[0][i]; if (b) { int mant, scale0, scale1; scale0 = scale_factors[0][i][k]; scale1 = scale_factors[1][i][k]; qindex = alloc_table[j+b]; bits = ff_mpa_quant_bits[qindex]; if (bits < 0) { /* 3 values at the same time */ v = get_bits(&s->gb, -bits); steps = ff_mpa_quant_steps[qindex]; mant = v % steps; v = v / steps; s->sb_samples[0][k * 12 + l + 0][i] = l2_unscale_group(steps, mant, scale0); s->sb_samples[1][k * 12 + l + 0][i] = l2_unscale_group(steps, mant, scale1); mant = v % steps; v = v / steps; s->sb_samples[0][k * 12 + l + 1][i] = l2_unscale_group(steps, mant, scale0); s->sb_samples[1][k * 12 + l + 1][i] = l2_unscale_group(steps, mant, scale1); s->sb_samples[0][k * 12 + l + 2][i] = l2_unscale_group(steps, v, scale0); s->sb_samples[1][k * 12 + l + 2][i] = l2_unscale_group(steps, v, scale1); } else { for(m=0;m<3;m++) { mant = get_bits(&s->gb, bits); s->sb_samples[0][k * 12 + l + m][i] = l1_unscale(bits - 1, mant, scale0); s->sb_samples[1][k * 12 + l + m][i] = l1_unscale(bits - 1, mant, scale1); } } } else { s->sb_samples[0][k * 12 + l + 0][i] = 0; s->sb_samples[0][k * 12 + l + 1][i] = 0; s->sb_samples[0][k * 12 + l + 2][i] = 0; s->sb_samples[1][k * 12 + l + 0][i] = 0; s->sb_samples[1][k * 12 + l + 1][i] = 0; s->sb_samples[1][k * 12 + l + 2][i] = 0; } /* next subband in alloc table */ j += 1 << bit_alloc_bits; } /* fill remaining samples to zero */ for(i=sblimit;i<SBLIMIT;i++) { for(ch=0;ch<s->nb_channels;ch++) { s->sb_samples[ch][k * 12 + l + 0][i] = 0; s->sb_samples[ch][k * 12 + l + 1][i] = 0; s->sb_samples[ch][k * 12 + l + 2][i] = 0; } } } } return 3 * 12; } #define SPLIT(dst,sf,n)\ if(n==3){\ int m= (sf*171)>>9;\ dst= sf - 3*m;\ sf=m;\ }else if(n==4){\ dst= sf&3;\ sf>>=2;\ }else if(n==5){\ int m= (sf*205)>>10;\ dst= sf - 5*m;\ sf=m;\ }else if(n==6){\ int m= (sf*171)>>10;\ dst= sf - 6*m;\ sf=m;\ }else{\ dst=0;\ } static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2, int n3) { SPLIT(slen[3], sf, n3) SPLIT(slen[2], sf, n2) SPLIT(slen[1], sf, n1) slen[0] = sf; } static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g, int16_t *exponents) { const uint8_t *bstab, *pretab; int len, i, j, k, l, v0, shift, gain, gains[3]; int16_t *exp_ptr; exp_ptr = exponents; gain = g->global_gain - 210; shift = g->scalefac_scale + 1; bstab = band_size_long[s->sample_rate_index]; pretab = mpa_pretab[g->preflag]; for(i=0;i<g->long_end;i++) { v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400; len = bstab[i]; for(j=len;j>0;j--) *exp_ptr++ = v0; } if (g->short_start < 13) { bstab = band_size_short[s->sample_rate_index]; gains[0] = gain - (g->subblock_gain[0] << 3); gains[1] = gain - (g->subblock_gain[1] << 3); gains[2] = gain - (g->subblock_gain[2] << 3); k = g->long_end; for(i=g->short_start;i<13;i++) { len = bstab[i]; for(l=0;l<3;l++) { v0 = gains[l] - (g->scale_factors[k++] << shift) + 400; for(j=len;j>0;j--) *exp_ptr++ = v0; } } } } /* handle n = 0 too */ static inline int get_bitsz(GetBitContext *s, int n) { if (n == 0) return 0; else return get_bits(s, n); } static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){ if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){ s->gb= s->in_gb; s->in_gb.buffer=NULL; assert((get_bits_count(&s->gb) & 7) == 0); skip_bits_long(&s->gb, *pos - *end_pos); *end_pos2= *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos; *pos= get_bits_count(&s->gb); } } static int huffman_decode(MPADecodeContext *s, GranuleDef *g, int16_t *exponents, int end_pos2) { int s_index; int i; int last_pos, bits_left; VLC *vlc; int end_pos= FFMIN(end_pos2, s->gb.size_in_bits); /* low frequencies (called big values) */ s_index = 0; for(i=0;i<3;i++) { int j, k, l, linbits; j = g->region_size[i]; if (j == 0) continue; /* select vlc table */ k = g->table_select[i]; l = mpa_huff_data[k][0]; linbits = mpa_huff_data[k][1]; vlc = &huff_vlc[l]; if(!l){ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j); s_index += 2*j; continue; } /* read huffcode and compute each couple */ for(;j>0;j--) { int exponent, x, y, v; int pos= get_bits_count(&s->gb); if (pos >= end_pos){ // av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index); switch_buffer(s, &pos, &end_pos, &end_pos2); // av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos); if(pos >= end_pos) break; } y = get_vlc2(&s->gb, vlc->table, 7, 3); if(!y){ g->sb_hybrid[s_index ] = g->sb_hybrid[s_index+1] = 0; s_index += 2; continue; } exponent= exponents[s_index]; dprintf(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n", i, g->region_size[i] - j, x, y, exponent); if(y&16){ x = y >> 5; y = y & 0x0f; if (x < 15){ v = expval_table[ exponent ][ x ]; // v = expval_table[ (exponent&3) ][ x ] >> FFMIN(0 - (exponent>>2), 31); }else{ x += get_bitsz(&s->gb, linbits); v = l3_unscale(x, exponent); } if (get_bits1(&s->gb)) v = -v; g->sb_hybrid[s_index] = v; if (y < 15){ v = expval_table[ exponent ][ y ]; }else{ y += get_bitsz(&s->gb, linbits); v = l3_unscale(y, exponent); } if (get_bits1(&s->gb)) v = -v; g->sb_hybrid[s_index+1] = v; }else{ x = y >> 5; y = y & 0x0f; x += y; if (x < 15){ v = expval_table[ exponent ][ x ]; }else{ x += get_bitsz(&s->gb, linbits); v = l3_unscale(x, exponent); } if (get_bits1(&s->gb)) v = -v; g->sb_hybrid[s_index+!!y] = v; g->sb_hybrid[s_index+ !y] = 0; } s_index+=2; } } /* high frequencies */ vlc = &huff_quad_vlc[g->count1table_select]; last_pos=0; while (s_index <= 572) { int pos, code; pos = get_bits_count(&s->gb); if (pos >= end_pos) { if (pos > end_pos2 && last_pos){ /* some encoders generate an incorrect size for this part. We must go back into the data */ s_index -= 4; skip_bits_long(&s->gb, last_pos - pos); av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos); if(s->error_recognition >= FF_ER_COMPLIANT) s_index=0; break; } // av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index); switch_buffer(s, &pos, &end_pos, &end_pos2); // av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index); if(pos >= end_pos) break; } last_pos= pos; code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1); dprintf(s->avctx, "t=%d code=%d\n", g->count1table_select, code); g->sb_hybrid[s_index+0]= g->sb_hybrid[s_index+1]= g->sb_hybrid[s_index+2]= g->sb_hybrid[s_index+3]= 0; while(code){ static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0}; int v; int pos= s_index+idxtab[code]; code ^= 8>>idxtab[code]; v = exp_table[ exponents[pos] ]; // v = exp_table[ (exponents[pos]&3) ] >> FFMIN(0 - (exponents[pos]>>2), 31); if(get_bits1(&s->gb)) v = -v; g->sb_hybrid[pos] = v; } s_index+=4; } /* skip extension bits */ bits_left = end_pos2 - get_bits_count(&s->gb); //av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer); if (bits_left < 0 && s->error_recognition >= FF_ER_COMPLIANT) { av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left); s_index=0; }else if(bits_left > 0 && s->error_recognition >= FF_ER_AGGRESSIVE){ av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left); s_index=0; } memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index)); skip_bits_long(&s->gb, bits_left); i= get_bits_count(&s->gb); switch_buffer(s, &i, &end_pos, &end_pos2); return 0; } /* Reorder short blocks from bitstream order to interleaved order. It would be faster to do it in parsing, but the code would be far more complicated */ static void reorder_block(MPADecodeContext *s, GranuleDef *g) { int i, j, len; int32_t *ptr, *dst, *ptr1; int32_t tmp[576]; if (g->block_type != 2) return; if (g->switch_point) { if (s->sample_rate_index != 8) { ptr = g->sb_hybrid + 36; } else { ptr = g->sb_hybrid + 48; } } else { ptr = g->sb_hybrid; } for(i=g->short_start;i<13;i++) { len = band_size_short[s->sample_rate_index][i]; ptr1 = ptr; dst = tmp; for(j=len;j>0;j--) { *dst++ = ptr[0*len]; *dst++ = ptr[1*len]; *dst++ = ptr[2*len]; ptr++; } ptr+=2*len; memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1)); } } #define ISQRT2 FIXR(0.70710678118654752440) static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1) { int i, j, k, l; int32_t v1, v2; int sf_max, tmp0, tmp1, sf, len, non_zero_found; int32_t (*is_tab)[16]; int32_t *tab0, *tab1; int non_zero_found_short[3]; /* intensity stereo */ if (s->mode_ext & MODE_EXT_I_STEREO) { if (!s->lsf) { is_tab = is_table; sf_max = 7; } else { is_tab = is_table_lsf[g1->scalefac_compress & 1]; sf_max = 16; } tab0 = g0->sb_hybrid + 576; tab1 = g1->sb_hybrid + 576; non_zero_found_short[0] = 0; non_zero_found_short[1] = 0; non_zero_found_short[2] = 0; k = (13 - g1->short_start) * 3 + g1->long_end - 3; for(i = 12;i >= g1->short_start;i--) { /* for last band, use previous scale factor */ if (i != 11) k -= 3; len = band_size_short[s->sample_rate_index][i]; for(l=2;l>=0;l--) { tab0 -= len; tab1 -= len; if (!non_zero_found_short[l]) { /* test if non zero band. if so, stop doing i-stereo */ for(j=0;j<len;j++) { if (tab1[j] != 0) { non_zero_found_short[l] = 1; goto found1; } } sf = g1->scale_factors[k + l]; if (sf >= sf_max) goto found1; v1 = is_tab[0][sf]; v2 = is_tab[1][sf]; for(j=0;j<len;j++) { tmp0 = tab0[j]; tab0[j] = MULL(tmp0, v1, FRAC_BITS); tab1[j] = MULL(tmp0, v2, FRAC_BITS); } } else { found1: if (s->mode_ext & MODE_EXT_MS_STEREO) { /* lower part of the spectrum : do ms stereo if enabled */ for(j=0;j<len;j++) { tmp0 = tab0[j]; tmp1 = tab1[j]; tab0[j] = MULL(tmp0 + tmp1, ISQRT2, FRAC_BITS); tab1[j] = MULL(tmp0 - tmp1, ISQRT2, FRAC_BITS); } } } } } non_zero_found = non_zero_found_short[0] | non_zero_found_short[1] | non_zero_found_short[2]; for(i = g1->long_end - 1;i >= 0;i--) { len = band_size_long[s->sample_rate_index][i]; tab0 -= len; tab1 -= len; /* test if non zero band. if so, stop doing i-stereo */ if (!non_zero_found) { for(j=0;j<len;j++) { if (tab1[j] != 0) { non_zero_found = 1; goto found2; } } /* for last band, use previous scale factor */ k = (i == 21) ? 20 : i; sf = g1->scale_factors[k]; if (sf >= sf_max) goto found2; v1 = is_tab[0][sf]; v2 = is_tab[1][sf]; for(j=0;j<len;j++) { tmp0 = tab0[j]; tab0[j] = MULL(tmp0, v1, FRAC_BITS); tab1[j] = MULL(tmp0, v2, FRAC_BITS); } } else { found2: if (s->mode_ext & MODE_EXT_MS_STEREO) { /* lower part of the spectrum : do ms stereo if enabled */ for(j=0;j<len;j++) { tmp0 = tab0[j]; tmp1 = tab1[j]; tab0[j] = MULL(tmp0 + tmp1, ISQRT2, FRAC_BITS); tab1[j] = MULL(tmp0 - tmp1, ISQRT2, FRAC_BITS); } } } } } else if (s->mode_ext & MODE_EXT_MS_STEREO) { /* ms stereo ONLY */ /* NOTE: the 1/sqrt(2) normalization factor is included in the global gain */ tab0 = g0->sb_hybrid; tab1 = g1->sb_hybrid; for(i=0;i<576;i++) { tmp0 = tab0[i]; tmp1 = tab1[i]; tab0[i] = tmp0 + tmp1; tab1[i] = tmp0 - tmp1; } } } static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g) { int32_t *ptr, *csa; int n, i; /* we antialias only "long" bands */ if (g->block_type == 2) { if (!g->switch_point) return; /* XXX: check this for 8000Hz case */ n = 1; } else { n = SBLIMIT - 1; } ptr = g->sb_hybrid + 18; for(i = n;i > 0;i--) { int tmp0, tmp1, tmp2; csa = &csa_table[0][0]; #define INT_AA(j) \ tmp0 = ptr[-1-j];\ tmp1 = ptr[ j];\ tmp2= MULH(tmp0 + tmp1, csa[0+4*j]);\ ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\ ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j])); INT_AA(0) INT_AA(1) INT_AA(2) INT_AA(3) INT_AA(4) INT_AA(5) INT_AA(6) INT_AA(7) ptr += 18; } } static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g) { int32_t *ptr; int n, i; /* we antialias only "long" bands */ if (g->block_type == 2) { if (!g->switch_point) return; /* XXX: check this for 8000Hz case */ n = 1; } else { n = SBLIMIT - 1; } ptr = g->sb_hybrid + 18; for(i = n;i > 0;i--) { float tmp0, tmp1; float *csa = &csa_table_float[0][0]; #define FLOAT_AA(j)\ tmp0= ptr[-1-j];\ tmp1= ptr[ j];\ ptr[-1-j] = lrintf(tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j]);\ ptr[ j] = lrintf(tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j]); FLOAT_AA(0) FLOAT_AA(1) FLOAT_AA(2) FLOAT_AA(3) FLOAT_AA(4) FLOAT_AA(5) FLOAT_AA(6) FLOAT_AA(7) ptr += 18; } } static void compute_imdct(MPADecodeContext *s, GranuleDef *g, int32_t *sb_samples, int32_t *mdct_buf) { int32_t *ptr, *win, *win1, *buf, *out_ptr, *ptr1; int32_t out2[12]; int i, j, mdct_long_end, v, sblimit; /* find last non zero block */ ptr = g->sb_hybrid + 576; ptr1 = g->sb_hybrid + 2 * 18; while (ptr >= ptr1) { ptr -= 6; v = ptr[0] | ptr[1] | ptr[2] | ptr[3] | ptr[4] | ptr[5]; if (v != 0) break; } sblimit = ((ptr - g->sb_hybrid) / 18) + 1; if (g->block_type == 2) { /* XXX: check for 8000 Hz */ if (g->switch_point) mdct_long_end = 2; else mdct_long_end = 0; } else { mdct_long_end = sblimit; } buf = mdct_buf; ptr = g->sb_hybrid; for(j=0;j<mdct_long_end;j++) { /* apply window & overlap with previous buffer */ out_ptr = sb_samples + j; /* select window */ if (g->switch_point && j < 2) win1 = mdct_win[0]; else win1 = mdct_win[g->block_type]; /* select frequency inversion */ win = win1 + ((4 * 36) & -(j & 1)); imdct36(out_ptr, buf, ptr, win); out_ptr += 18*SBLIMIT; ptr += 18; buf += 18; } for(j=mdct_long_end;j<sblimit;j++) { /* select frequency inversion */ win = mdct_win[2] + ((4 * 36) & -(j & 1)); out_ptr = sb_samples + j; for(i=0; i<6; i++){ *out_ptr = buf[i]; out_ptr += SBLIMIT; } imdct12(out2, ptr + 0); for(i=0;i<6;i++) { *out_ptr = MULH(out2[i], win[i]) + buf[i + 6*1]; buf[i + 6*2] = MULH(out2[i + 6], win[i + 6]); out_ptr += SBLIMIT; } imdct12(out2, ptr + 1); for(i=0;i<6;i++) { *out_ptr = MULH(out2[i], win[i]) + buf[i + 6*2]; buf[i + 6*0] = MULH(out2[i + 6], win[i + 6]); out_ptr += SBLIMIT; } imdct12(out2, ptr + 2); for(i=0;i<6;i++) { buf[i + 6*0] = MULH(out2[i], win[i]) + buf[i + 6*0]; buf[i + 6*1] = MULH(out2[i + 6], win[i + 6]); buf[i + 6*2] = 0; } ptr += 18; buf += 18; } /* zero bands */ for(j=sblimit;j<SBLIMIT;j++) { /* overlap */ out_ptr = sb_samples + j; for(i=0;i<18;i++) { *out_ptr = buf[i]; buf[i] = 0; out_ptr += SBLIMIT; } buf += 18; } } /* main layer3 decoding function */ static int mp_decode_layer3(MPADecodeContext *s) { int nb_granules, main_data_begin, private_bits; int gr, ch, blocksplit_flag, i, j, k, n, bits_pos; GranuleDef *g; int16_t exponents[576]; /* read side info */ if (s->lsf) { main_data_begin = get_bits(&s->gb, 8); private_bits = get_bits(&s->gb, s->nb_channels); nb_granules = 1; } else { main_data_begin = get_bits(&s->gb, 9); if (s->nb_channels == 2) private_bits = get_bits(&s->gb, 3); else private_bits = get_bits(&s->gb, 5); nb_granules = 2; for(ch=0;ch<s->nb_channels;ch++) { s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */ s->granules[ch][1].scfsi = get_bits(&s->gb, 4); } } for(gr=0;gr<nb_granules;gr++) { for(ch=0;ch<s->nb_channels;ch++) { dprintf(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch); g = &s->granules[ch][gr]; g->part2_3_length = get_bits(&s->gb, 12); g->big_values = get_bits(&s->gb, 9); if(g->big_values > 288){ av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n"); return -1; } g->global_gain = get_bits(&s->gb, 8); /* if MS stereo only is selected, we precompute the 1/sqrt(2) renormalization factor */ if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) == MODE_EXT_MS_STEREO) g->global_gain -= 2; if (s->lsf) g->scalefac_compress = get_bits(&s->gb, 9); else g->scalefac_compress = get_bits(&s->gb, 4); blocksplit_flag = get_bits1(&s->gb); if (blocksplit_flag) { g->block_type = get_bits(&s->gb, 2); if (g->block_type == 0){ av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n"); return -1; } g->switch_point = get_bits1(&s->gb); for(i=0;i<2;i++) g->table_select[i] = get_bits(&s->gb, 5); for(i=0;i<3;i++) g->subblock_gain[i] = get_bits(&s->gb, 3); ff_init_short_region(s, g); } else { int region_address1, region_address2; g->block_type = 0; g->switch_point = 0; for(i=0;i<3;i++) g->table_select[i] = get_bits(&s->gb, 5); /* compute huffman coded region sizes */ region_address1 = get_bits(&s->gb, 4); region_address2 = get_bits(&s->gb, 3); dprintf(s->avctx, "region1=%d region2=%d\n", region_address1, region_address2); ff_init_long_region(s, g, region_address1, region_address2); } ff_region_offset2size(g); ff_compute_band_indexes(s, g); g->preflag = 0; if (!s->lsf) g->preflag = get_bits1(&s->gb); g->scalefac_scale = get_bits1(&s->gb); g->count1table_select = get_bits1(&s->gb); dprintf(s->avctx, "block_type=%d switch_point=%d\n", g->block_type, g->switch_point); } } if (!s->adu_mode) { const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3); assert((get_bits_count(&s->gb) & 7) == 0); /* now we get bits from the main_data_begin offset */ dprintf(s->avctx, "seekback: %d\n", main_data_begin); //av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size); memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES); s->in_gb= s->gb; init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8); skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin)); } for(gr=0;gr<nb_granules;gr++) { for(ch=0;ch<s->nb_channels;ch++) { g = &s->granules[ch][gr]; if(get_bits_count(&s->gb)<0){ av_log(s->avctx, AV_LOG_DEBUG, "mdb:%d, lastbuf:%d skipping granule %d\n", main_data_begin, s->last_buf_size, gr); skip_bits_long(&s->gb, g->part2_3_length); memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid)); if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){ skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits); s->gb= s->in_gb; s->in_gb.buffer=NULL; } continue; } bits_pos = get_bits_count(&s->gb); if (!s->lsf) { uint8_t *sc; int slen, slen1, slen2; /* MPEG1 scale factors */ slen1 = slen_table[0][g->scalefac_compress]; slen2 = slen_table[1][g->scalefac_compress]; dprintf(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2); if (g->block_type == 2) { n = g->switch_point ? 17 : 18; j = 0; if(slen1){ for(i=0;i<n;i++) g->scale_factors[j++] = get_bits(&s->gb, slen1); }else{ for(i=0;i<n;i++) g->scale_factors[j++] = 0; } if(slen2){ for(i=0;i<18;i++) g->scale_factors[j++] = get_bits(&s->gb, slen2); for(i=0;i<3;i++) g->scale_factors[j++] = 0; }else{ for(i=0;i<21;i++) g->scale_factors[j++] = 0; } } else { sc = s->granules[ch][0].scale_factors; j = 0; for(k=0;k<4;k++) { n = (k == 0 ? 6 : 5); if ((g->scfsi & (0x8 >> k)) == 0) { slen = (k < 2) ? slen1 : slen2; if(slen){ for(i=0;i<n;i++) g->scale_factors[j++] = get_bits(&s->gb, slen); }else{ for(i=0;i<n;i++) g->scale_factors[j++] = 0; } } else { /* simply copy from last granule */ for(i=0;i<n;i++) { g->scale_factors[j] = sc[j]; j++; } } } g->scale_factors[j++] = 0; } } else { int tindex, tindex2, slen[4], sl, sf; /* LSF scale factors */ if (g->block_type == 2) { tindex = g->switch_point ? 2 : 1; } else { tindex = 0; } sf = g->scalefac_compress; if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) { /* intensity stereo case */ sf >>= 1; if (sf < 180) { lsf_sf_expand(slen, sf, 6, 6, 0); tindex2 = 3; } else if (sf < 244) { lsf_sf_expand(slen, sf - 180, 4, 4, 0); tindex2 = 4; } else { lsf_sf_expand(slen, sf - 244, 3, 0, 0); tindex2 = 5; } } else { /* normal case */ if (sf < 400) { lsf_sf_expand(slen, sf, 5, 4, 4); tindex2 = 0; } else if (sf < 500) { lsf_sf_expand(slen, sf - 400, 5, 4, 0); tindex2 = 1; } else { lsf_sf_expand(slen, sf - 500, 3, 0, 0); tindex2 = 2; g->preflag = 1; } } j = 0; for(k=0;k<4;k++) { n = lsf_nsf_table[tindex2][tindex][k]; sl = slen[k]; if(sl){ for(i=0;i<n;i++) g->scale_factors[j++] = get_bits(&s->gb, sl); }else{ for(i=0;i<n;i++) g->scale_factors[j++] = 0; } } /* XXX: should compute exact size */ for(;j<40;j++) g->scale_factors[j] = 0; } exponents_from_scale_factors(s, g, exponents); /* read Huffman coded residue */ huffman_decode(s, g, exponents, bits_pos + g->part2_3_length); } /* ch */ if (s->nb_channels == 2) compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]); for(ch=0;ch<s->nb_channels;ch++) { g = &s->granules[ch][gr]; reorder_block(s, g); s->compute_antialias(s, g); compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]); } } /* gr */ if(get_bits_count(&s->gb)<0) skip_bits_long(&s->gb, -get_bits_count(&s->gb)); return nb_granules * 18; } static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples, const uint8_t *buf, int buf_size) { int i, nb_frames, ch; OUT_INT *samples_ptr; init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8); /* skip error protection field */ if (s->error_protection) skip_bits(&s->gb, 16); dprintf(s->avctx, "frame %d:\n", s->frame_count); switch(s->layer) { case 1: s->avctx->frame_size = 384; nb_frames = mp_decode_layer1(s); break; case 2: s->avctx->frame_size = 1152; nb_frames = mp_decode_layer2(s); break; case 3: s->avctx->frame_size = s->lsf ? 576 : 1152; default: nb_frames = mp_decode_layer3(s); s->last_buf_size=0; if(s->in_gb.buffer){ align_get_bits(&s->gb); i= get_bits_left(&s->gb)>>3; if(i >= 0 && i <= BACKSTEP_SIZE){ memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i); s->last_buf_size=i; }else av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i); s->gb= s->in_gb; s->in_gb.buffer= NULL; } align_get_bits(&s->gb); assert((get_bits_count(&s->gb) & 7) == 0); i= get_bits_left(&s->gb)>>3; if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){ if(i<0) av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i); i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE); } assert(i <= buf_size - HEADER_SIZE && i>= 0); memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i); s->last_buf_size += i; break; } /* apply the synthesis filter */ for(ch=0;ch<s->nb_channels;ch++) { samples_ptr = samples + ch; for(i=0;i<nb_frames;i++) { ff_mpa_synth_filter(s->synth_buf[ch], &(s->synth_buf_offset[ch]), ff_mpa_synth_window, &s->dither_state, samples_ptr, s->nb_channels, s->sb_samples[ch][i]); samples_ptr += 32 * s->nb_channels; } } return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels; } static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPADecodeContext *s = avctx->priv_data; uint32_t header; int out_size; OUT_INT *out_samples = data; if(buf_size < HEADER_SIZE) return -1; header = AV_RB32(buf); if(ff_mpa_check_header(header) < 0){ av_log(avctx, AV_LOG_ERROR, "Header missing\n"); return -1; } if (ff_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) { /* free format: prepare to compute frame size */ s->frame_size = -1; return -1; } /* update codec info */ avctx->channels = s->nb_channels; avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; if(*data_size < 1152*avctx->channels*sizeof(OUT_INT)) return -1; *data_size = 0; if(s->frame_size<=0 || s->frame_size > buf_size){ av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); return -1; }else if(s->frame_size < buf_size){ av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n"); buf_size= s->frame_size; } out_size = mp_decode_frame(s, out_samples, buf, buf_size); if(out_size>=0){ *data_size = out_size; avctx->sample_rate = s->sample_rate; //FIXME maybe move the other codec info stuff from above here too }else av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed s->frame_size = 0; return buf_size; } static void flush(AVCodecContext *avctx){ MPADecodeContext *s = avctx->priv_data; memset(s->synth_buf, 0, sizeof(s->synth_buf)); s->last_buf_size= 0; } #if CONFIG_MP3ADU_DECODER static int decode_frame_adu(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPADecodeContext *s = avctx->priv_data; uint32_t header; int len, out_size; OUT_INT *out_samples = data; len = buf_size; // Discard too short frames if (buf_size < HEADER_SIZE) { *data_size = 0; return buf_size; } if (len > MPA_MAX_CODED_FRAME_SIZE) len = MPA_MAX_CODED_FRAME_SIZE; // Get header and restore sync word header = AV_RB32(buf) | 0xffe00000; if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame *data_size = 0; return buf_size; } ff_mpegaudio_decode_header((MPADecodeHeader *)s, header); /* update codec info */ avctx->sample_rate = s->sample_rate; avctx->channels = s->nb_channels; avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; s->frame_size = len; if (avctx->parse_only) { out_size = buf_size; } else { out_size = mp_decode_frame(s, out_samples, buf, buf_size); } *data_size = out_size; return buf_size; } #endif /* CONFIG_MP3ADU_DECODER */ #if CONFIG_MP3ON4_DECODER /** * Context for MP3On4 decoder */ typedef struct MP3On4DecodeContext { int frames; ///< number of mp3 frames per block (number of mp3 decoder instances) int syncword; ///< syncword patch const uint8_t *coff; ///< channels offsets in output buffer MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance } MP3On4DecodeContext; #include "mpeg4audio.h" /* Next 3 arrays are indexed by channel config number (passed via codecdata) */ static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */ /* offsets into output buffer, assume output order is FL FR BL BR C LFE */ static const uint8_t chan_offset[8][5] = { {0}, {0}, // C {0}, // FLR {2,0}, // C FLR {2,0,3}, // C FLR BS {4,0,2}, // C FLR BLRS {4,0,2,5}, // C FLR BLRS LFE {4,0,2,6,5}, // C FLR BLRS BLR LFE }; static int decode_init_mp3on4(AVCodecContext * avctx) { MP3On4DecodeContext *s = avctx->priv_data; MPEG4AudioConfig cfg; int i; if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) { av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n"); return -1; } ff_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size); if (!cfg.chan_config || cfg.chan_config > 7) { av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n"); return -1; } s->frames = mp3Frames[cfg.chan_config]; s->coff = chan_offset[cfg.chan_config]; avctx->channels = ff_mpeg4audio_channels[cfg.chan_config]; if (cfg.sample_rate < 16000) s->syncword = 0xffe00000; else s->syncword = 0xfff00000; /* Init the first mp3 decoder in standard way, so that all tables get builded * We replace avctx->priv_data with the context of the first decoder so that * decode_init() does not have to be changed. * Other decoders will be initialized here copying data from the first context */ // Allocate zeroed memory for the first decoder context s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext)); // Put decoder context in place to make init_decode() happy avctx->priv_data = s->mp3decctx[0]; decode_init(avctx); // Restore mp3on4 context pointer avctx->priv_data = s; s->mp3decctx[0]->adu_mode = 1; // Set adu mode /* Create a separate codec/context for each frame (first is already ok). * Each frame is 1 or 2 channels - up to 5 frames allowed */ for (i = 1; i < s->frames; i++) { s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext)); s->mp3decctx[i]->compute_antialias = s->mp3decctx[0]->compute_antialias; s->mp3decctx[i]->adu_mode = 1; s->mp3decctx[i]->avctx = avctx; } return 0; } static av_cold int decode_close_mp3on4(AVCodecContext * avctx) { MP3On4DecodeContext *s = avctx->priv_data; int i; for (i = 0; i < s->frames; i++) if (s->mp3decctx[i]) av_free(s->mp3decctx[i]); return 0; } static int decode_frame_mp3on4(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MP3On4DecodeContext *s = avctx->priv_data; MPADecodeContext *m; int fsize, len = buf_size, out_size = 0; uint32_t header; OUT_INT *out_samples = data; OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS]; OUT_INT *outptr, *bp; int fr, j, n; if(*data_size < MPA_FRAME_SIZE * MPA_MAX_CHANNELS * s->frames * sizeof(OUT_INT)) return -1; *data_size = 0; // Discard too short frames if (buf_size < HEADER_SIZE) return -1; // If only one decoder interleave is not needed outptr = s->frames == 1 ? out_samples : decoded_buf; avctx->bit_rate = 0; for (fr = 0; fr < s->frames; fr++) { fsize = AV_RB16(buf) >> 4; fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE); m = s->mp3decctx[fr]; assert (m != NULL); header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header if (ff_mpa_check_header(header) < 0) // Bad header, discard block break; ff_mpegaudio_decode_header((MPADecodeHeader *)m, header); out_size += mp_decode_frame(m, outptr, buf, fsize); buf += fsize; len -= fsize; if(s->frames > 1) { n = m->avctx->frame_size*m->nb_channels; /* interleave output data */ bp = out_samples + s->coff[fr]; if(m->nb_channels == 1) { for(j = 0; j < n; j++) { *bp = decoded_buf[j]; bp += avctx->channels; } } else { for(j = 0; j < n; j++) { bp[0] = decoded_buf[j++]; bp[1] = decoded_buf[j]; bp += avctx->channels; } } } avctx->bit_rate += m->bit_rate; } /* update codec info */ avctx->sample_rate = s->mp3decctx[0]->sample_rate; *data_size = out_size; return buf_size; } #endif /* CONFIG_MP3ON4_DECODER */ #if CONFIG_MP1_DECODER AVCodec mp1_decoder = { "mp1", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP1, sizeof(MPADecodeContext), decode_init, NULL, NULL, decode_frame, CODEC_CAP_PARSE_ONLY, .flush= flush, .long_name= NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"), }; #endif #if CONFIG_MP2_DECODER AVCodec mp2_decoder = { "mp2", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP2, sizeof(MPADecodeContext), decode_init, NULL, NULL, decode_frame, CODEC_CAP_PARSE_ONLY, .flush= flush, .long_name= NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), }; #endif #if CONFIG_MP3_DECODER AVCodec mp3_decoder = { "mp3", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3, sizeof(MPADecodeContext), decode_init, NULL, NULL, decode_frame, CODEC_CAP_PARSE_ONLY, .flush= flush, .long_name= NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), }; #endif #if CONFIG_MP3ADU_DECODER AVCodec mp3adu_decoder = { "mp3adu", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3ADU, sizeof(MPADecodeContext), decode_init, NULL, NULL, decode_frame_adu, CODEC_CAP_PARSE_ONLY, .flush= flush, .long_name= NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), }; #endif #if CONFIG_MP3ON4_DECODER AVCodec mp3on4_decoder = { "mp3on4", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3ON4, sizeof(MP3On4DecodeContext), decode_init_mp3on4, NULL, decode_close_mp3on4, decode_frame_mp3on4, .flush= flush, .long_name= NULL_IF_CONFIG_SMALL("MP3onMP4"), }; #endif
123linslouis-android-video-cutter
jni/libavcodec/mpegaudiodec.c
C
asf20
78,708
/* * Atrac common data * Copyright (c) 2009 Maxim Poliakovski * Copyright (c) 2009 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Atrac common header */ #ifndef AVCODEC_ATRAC_H #define AVCODEC_ATRAC_H extern float sf_table[64]; extern float qmf_window[48]; void atrac_generate_tables(void); void atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp); #endif /* AVCODEC_ATRAC_H */
123linslouis-android-video-cutter
jni/libavcodec/atrac.h
C
asf20
1,184
/* * DV decoder * Copyright (c) 2002 Fabrice Bellard * Copyright (c) 2004 Roman Shaposhnik * * DV encoder * Copyright (c) 2003 Roman Shaposhnik * * 50 Mbps (DVCPRO50) support * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com> * * 100 Mbps (DVCPRO HD) support * Initial code by Daniel Maas <dmaas@maasdigital.com> (funded by BBC R&D) * Final code by Roman Shaposhnik * * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth * of DV technical info. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * DV codec. */ #define ALT_BITSTREAM_READER #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" #include "put_bits.h" #include "simple_idct.h" #include "dvdata.h" #include "dv_tablegen.h" //#undef NDEBUG //#include <assert.h> typedef struct DVVideoContext { const DVprofile *sys; AVFrame picture; AVCodecContext *avctx; uint8_t *buf; uint8_t dv_zigzag[2][64]; void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size); void (*fdct[2])(DCTELEM *block); void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block); me_cmp_func ildct_cmp; } DVVideoContext; #define TEX_VLC_BITS 9 /* XXX: also include quantization */ static RL_VLC_ELEM dv_rl_vlc[1184]; static inline int dv_work_pool_size(const DVprofile *d) { int size = d->n_difchan*d->difseg_size*27; if (DV_PROFILE_IS_1080i50(d)) size -= 3*27; if (DV_PROFILE_IS_720p50(d)) size -= 4*27; return size; } static inline void dv_calc_mb_coordinates(const DVprofile *d, int chan, int seq, int slot, uint16_t *tbl) { static const uint8_t off[] = { 2, 6, 8, 0, 4 }; static const uint8_t shuf1[] = { 36, 18, 54, 0, 72 }; static const uint8_t shuf2[] = { 24, 12, 36, 0, 48 }; static const uint8_t shuf3[] = { 18, 9, 27, 0, 36 }; static const uint8_t l_start[] = {0, 4, 9, 13, 18, 22, 27, 31, 36, 40}; static const uint8_t l_start_shuffled[] = { 9, 4, 13, 0, 18 }; static const uint8_t serpent1[] = {0, 1, 2, 2, 1, 0, 0, 1, 2, 2, 1, 0, 0, 1, 2, 2, 1, 0, 0, 1, 2, 2, 1, 0, 0, 1, 2}; static const uint8_t serpent2[] = {0, 1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5}; static const uint8_t remap[][2] = {{ 0, 0}, { 0, 0}, { 0, 0}, { 0, 0}, /* dummy */ { 0, 0}, { 0, 1}, { 0, 2}, { 0, 3}, {10, 0}, {10, 1}, {10, 2}, {10, 3}, {20, 0}, {20, 1}, {20, 2}, {20, 3}, {30, 0}, {30, 1}, {30, 2}, {30, 3}, {40, 0}, {40, 1}, {40, 2}, {40, 3}, {50, 0}, {50, 1}, {50, 2}, {50, 3}, {60, 0}, {60, 1}, {60, 2}, {60, 3}, {70, 0}, {70, 1}, {70, 2}, {70, 3}, { 0,64}, { 0,65}, { 0,66}, {10,64}, {10,65}, {10,66}, {20,64}, {20,65}, {20,66}, {30,64}, {30,65}, {30,66}, {40,64}, {40,65}, {40,66}, {50,64}, {50,65}, {50,66}, {60,64}, {60,65}, {60,66}, {70,64}, {70,65}, {70,66}, { 0,67}, {20,67}, {40,67}, {60,67}}; int i, k, m; int x, y, blk; for (m=0; m<5; m++) { switch (d->width) { case 1440: blk = (chan*11+seq)*27+slot; if (chan == 0 && seq == 11) { x = m*27+slot; if (x<90) { y = 0; } else { x = (x - 90)*2; y = 67; } } else { i = (4*chan + blk + off[m])%11; k = (blk/11)%27; x = shuf1[m] + (chan&1)*9 + k%9; y = (i*3+k/9)*2 + (chan>>1) + 1; } tbl[m] = (x<<1)|(y<<9); break; case 1280: blk = (chan*10+seq)*27+slot; i = (4*chan + (seq/5) + 2*blk + off[m])%10; k = (blk/5)%27; x = shuf1[m]+(chan&1)*9 + k%9; y = (i*3+k/9)*2 + (chan>>1) + 4; if (x >= 80) { x = remap[y][0]+((x-80)<<(y>59)); y = remap[y][1]; } tbl[m] = (x<<1)|(y<<9); break; case 960: blk = (chan*10+seq)*27+slot; i = (4*chan + (seq/5) + 2*blk + off[m])%10; k = (blk/5)%27 + (i&1)*3; x = shuf2[m] + k%6 + 6*(chan&1); y = l_start[i] + k/6 + 45*(chan>>1); tbl[m] = (x<<1)|(y<<9); break; case 720: switch (d->pix_fmt) { case PIX_FMT_YUV422P: x = shuf3[m] + slot/3; y = serpent1[slot] + ((((seq + off[m]) % d->difseg_size)<<1) + chan)*3; tbl[m] = (x<<1)|(y<<8); break; case PIX_FMT_YUV420P: x = shuf3[m] + slot/3; y = serpent1[slot] + ((seq + off[m]) % d->difseg_size)*3; tbl[m] = (x<<1)|(y<<9); break; case PIX_FMT_YUV411P: i = (seq + off[m]) % d->difseg_size; k = slot + ((m==1||m==2)?3:0); x = l_start_shuffled[m] + k/6; y = serpent2[k] + i*6; if (x>21) y = y*2 - i*6; tbl[m] = (x<<2)|(y<<8); break; } default: break; } } } static int dv_init_dynamic_tables(const DVprofile *d) { int j,i,c,s,p; uint32_t *factor1, *factor2; const int *iweight1, *iweight2; if (!d->work_chunks[dv_work_pool_size(d)-1].buf_offset) { p = i = 0; for (c=0; c<d->n_difchan; c++) { for (s=0; s<d->difseg_size; s++) { p += 6; for (j=0; j<27; j++) { p += !(j%3); if (!(DV_PROFILE_IS_1080i50(d) && c != 0 && s == 11) && !(DV_PROFILE_IS_720p50(d) && s > 9)) { dv_calc_mb_coordinates(d, c, s, j, &d->work_chunks[i].mb_coordinates[0]); d->work_chunks[i++].buf_offset = p; } p += 5; } } } } if (!d->idct_factor[DV_PROFILE_IS_HD(d)?8191:5631]) { factor1 = &d->idct_factor[0]; factor2 = &d->idct_factor[DV_PROFILE_IS_HD(d)?4096:2816]; if (d->height == 720) { iweight1 = &dv_iweight_720_y[0]; iweight2 = &dv_iweight_720_c[0]; } else { iweight1 = &dv_iweight_1080_y[0]; iweight2 = &dv_iweight_1080_c[0]; } if (DV_PROFILE_IS_HD(d)) { for (c = 0; c < 4; c++) { for (s = 0; s < 16; s++) { for (i = 0; i < 64; i++) { *factor1++ = (dv100_qstep[s] << (c + 9)) * iweight1[i]; *factor2++ = (dv100_qstep[s] << (c + 9)) * iweight2[i]; } } } } else { iweight1 = &dv_iweight_88[0]; for (j = 0; j < 2; j++, iweight1 = &dv_iweight_248[0]) { for (s = 0; s < 22; s++) { for (i = c = 0; c < 4; c++) { for (; i < dv_quant_areas[c]; i++) { *factor1 = iweight1[i] << (dv_quant_shifts[s][c] + 1); *factor2++ = (*factor1++) << 1; } } } } } } return 0; } static av_cold int dvvideo_init(AVCodecContext *avctx) { DVVideoContext *s = avctx->priv_data; DSPContext dsp; static int done = 0; int i, j; if (!done) { VLC dv_vlc; uint16_t new_dv_vlc_bits[NB_DV_VLC*2]; uint8_t new_dv_vlc_len[NB_DV_VLC*2]; uint8_t new_dv_vlc_run[NB_DV_VLC*2]; int16_t new_dv_vlc_level[NB_DV_VLC*2]; done = 1; /* it's faster to include sign bit in a generic VLC parsing scheme */ for (i = 0, j = 0; i < NB_DV_VLC; i++, j++) { new_dv_vlc_bits[j] = dv_vlc_bits[i]; new_dv_vlc_len[j] = dv_vlc_len[i]; new_dv_vlc_run[j] = dv_vlc_run[i]; new_dv_vlc_level[j] = dv_vlc_level[i]; if (dv_vlc_level[i]) { new_dv_vlc_bits[j] <<= 1; new_dv_vlc_len[j]++; j++; new_dv_vlc_bits[j] = (dv_vlc_bits[i] << 1) | 1; new_dv_vlc_len[j] = dv_vlc_len[i] + 1; new_dv_vlc_run[j] = dv_vlc_run[i]; new_dv_vlc_level[j] = -dv_vlc_level[i]; } } /* NOTE: as a trick, we use the fact the no codes are unused to accelerate the parsing of partial codes */ init_vlc(&dv_vlc, TEX_VLC_BITS, j, new_dv_vlc_len, 1, 1, new_dv_vlc_bits, 2, 2, 0); assert(dv_vlc.table_size == 1184); for (i = 0; i < dv_vlc.table_size; i++){ int code = dv_vlc.table[i][0]; int len = dv_vlc.table[i][1]; int level, run; if (len < 0){ //more bits needed run = 0; level = code; } else { run = new_dv_vlc_run [code] + 1; level = new_dv_vlc_level[code]; } dv_rl_vlc[i].len = len; dv_rl_vlc[i].level = level; dv_rl_vlc[i].run = run; } free_vlc(&dv_vlc); dv_vlc_map_tableinit(); } /* Generic DSP setup */ dsputil_init(&dsp, avctx); ff_set_cmp(&dsp, dsp.ildct_cmp, avctx->ildct_cmp); s->get_pixels = dsp.get_pixels; s->ildct_cmp = dsp.ildct_cmp[5]; /* 88DCT setup */ s->fdct[0] = dsp.fdct; s->idct_put[0] = dsp.idct_put; for (i = 0; i < 64; i++) s->dv_zigzag[0][i] = dsp.idct_permutation[ff_zigzag_direct[i]]; /* 248DCT setup */ s->fdct[1] = dsp.fdct248; s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP if (avctx->lowres){ for (i = 0; i < 64; i++){ int j = ff_zigzag248_direct[i]; s->dv_zigzag[1][i] = dsp.idct_permutation[(j & 7) + (j & 8) * 4 + (j & 48) / 2]; } }else memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); avctx->coded_frame = &s->picture; s->avctx = avctx; avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; return 0; } static av_cold int dvvideo_init_encoder(AVCodecContext *avctx) { if (!ff_dv_codec_profile(avctx)) { av_log(avctx, AV_LOG_ERROR, "Found no DV profile for %ix%i %s video\n", avctx->width, avctx->height, avcodec_get_pix_fmt_name(avctx->pix_fmt)); return -1; } return dvvideo_init(avctx); } // #define VLC_DEBUG // #define printf(...) av_log(NULL, AV_LOG_ERROR, __VA_ARGS__) typedef struct BlockInfo { const uint32_t *factor_table; const uint8_t *scan_table; uint8_t pos; /* position in block */ void (*idct_put)(uint8_t *dest, int line_size, DCTELEM *block); uint8_t partial_bit_count; uint16_t partial_bit_buffer; int shift_offset; } BlockInfo; /* bit budget for AC only in 5 MBs */ static const int vs_total_ac_bits = (100 * 4 + 68*2) * 5; /* see dv_88_areas and dv_248_areas for details */ static const int mb_area_start[5] = { 1, 6, 21, 43, 64 }; static inline int put_bits_left(PutBitContext* s) { return (s->buf_end - s->buf) * 8 - put_bits_count(s); } /* decode ac coefficients */ static void dv_decode_ac(GetBitContext *gb, BlockInfo *mb, DCTELEM *block) { int last_index = gb->size_in_bits; const uint8_t *scan_table = mb->scan_table; const uint32_t *factor_table = mb->factor_table; int pos = mb->pos; int partial_bit_count = mb->partial_bit_count; int level, run, vlc_len, index; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); /* if we must parse a partial vlc, we do it here */ if (partial_bit_count > 0) { re_cache = ((unsigned)re_cache >> partial_bit_count) | (mb->partial_bit_buffer << (sizeof(re_cache) * 8 - partial_bit_count)); re_index -= partial_bit_count; mb->partial_bit_count = 0; } /* get the AC coefficients until last_index is reached */ for (;;) { #ifdef VLC_DEBUG printf("%2d: bits=%04x index=%d\n", pos, SHOW_UBITS(re, gb, 16), re_index); #endif /* our own optimized GET_RL_VLC */ index = NEG_USR32(re_cache, TEX_VLC_BITS); vlc_len = dv_rl_vlc[index].len; if (vlc_len < 0) { index = NEG_USR32((unsigned)re_cache << TEX_VLC_BITS, -vlc_len) + dv_rl_vlc[index].level; vlc_len = TEX_VLC_BITS - vlc_len; } level = dv_rl_vlc[index].level; run = dv_rl_vlc[index].run; /* gotta check if we're still within gb boundaries */ if (re_index + vlc_len > last_index) { /* should be < 16 bits otherwise a codeword could have been parsed */ mb->partial_bit_count = last_index - re_index; mb->partial_bit_buffer = NEG_USR32(re_cache, mb->partial_bit_count); re_index = last_index; break; } re_index += vlc_len; #ifdef VLC_DEBUG printf("run=%d level=%d\n", run, level); #endif pos += run; if (pos >= 64) break; level = (level * factor_table[pos] + (1 << (dv_iweight_bits - 1))) >> dv_iweight_bits; block[scan_table[pos]] = level; UPDATE_CACHE(re, gb); } CLOSE_READER(re, gb); mb->pos = pos; } static inline void bit_copy(PutBitContext *pb, GetBitContext *gb) { int bits_left = get_bits_left(gb); while (bits_left >= MIN_CACHE_BITS) { put_bits(pb, MIN_CACHE_BITS, get_bits(gb, MIN_CACHE_BITS)); bits_left -= MIN_CACHE_BITS; } if (bits_left > 0) { put_bits(pb, bits_left, get_bits(gb, bits_left)); } } static inline void dv_calculate_mb_xy(DVVideoContext *s, DVwork_chunk *work_chunk, int m, int *mb_x, int *mb_y) { *mb_x = work_chunk->mb_coordinates[m] & 0xff; *mb_y = work_chunk->mb_coordinates[m] >> 8; /* We work with 720p frames split in half. The odd half-frame (chan==2,3) is displaced :-( */ if (s->sys->height == 720 && !(s->buf[1]&0x0C)) { *mb_y -= (*mb_y>17)?18:-72; /* shifting the Y coordinate down by 72/2 macro blocks */ } } /* mb_x and mb_y are in units of 8 pixels */ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) { DVVideoContext *s = avctx->priv_data; DVwork_chunk *work_chunk = arg; int quant, dc, dct_mode, class1, j; int mb_index, mb_x, mb_y, last_index; int y_stride, linesize; DCTELEM *block, *block1; int c_offset; uint8_t *y_ptr; const uint8_t *buf_ptr; PutBitContext pb, vs_pb; GetBitContext gb; BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1; LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]); LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [80 + 4]); /* allow some slack */ LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5 * 80 + 4]); /* allow some slack */ const int log2_blocksize = 3-s->avctx->lowres; int is_field_mode[5]; assert((((int)mb_bit_buffer) & 7) == 0); assert((((int)vs_bit_buffer) & 7) == 0); memset(sblock, 0, 5*DV_MAX_BPM*sizeof(*sblock)); /* pass 1 : read DC and AC coefficients in blocks */ buf_ptr = &s->buf[work_chunk->buf_offset*80]; block1 = &sblock[0][0]; mb1 = mb_data; init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80); for (mb_index = 0; mb_index < 5; mb_index++, mb1 += s->sys->bpm, block1 += s->sys->bpm * 64) { /* skip header */ quant = buf_ptr[3] & 0x0f; buf_ptr += 4; init_put_bits(&pb, mb_bit_buffer, 80); mb = mb1; block = block1; is_field_mode[mb_index] = 0; for (j = 0; j < s->sys->bpm; j++) { last_index = s->sys->block_sizes[j]; init_get_bits(&gb, buf_ptr, last_index); /* get the dc */ dc = get_sbits(&gb, 9); dct_mode = get_bits1(&gb); class1 = get_bits(&gb, 2); if (DV_PROFILE_IS_HD(s->sys)) { mb->idct_put = s->idct_put[0]; mb->scan_table = s->dv_zigzag[0]; mb->factor_table = &s->sys->idct_factor[(j >= 4)*4*16*64 + class1*16*64 + quant*64]; is_field_mode[mb_index] |= !j && dct_mode; } else { mb->idct_put = s->idct_put[dct_mode && log2_blocksize == 3]; mb->scan_table = s->dv_zigzag[dct_mode]; mb->factor_table = &s->sys->idct_factor[(class1 == 3)*2*22*64 + dct_mode*22*64 + (quant + dv_quant_offset[class1])*64]; } dc = dc << 2; /* convert to unsigned because 128 is not added in the standard IDCT */ dc += 1024; block[0] = dc; buf_ptr += last_index >> 3; mb->pos = 0; mb->partial_bit_count = 0; #ifdef VLC_DEBUG printf("MB block: %d, %d ", mb_index, j); #endif dv_decode_ac(&gb, mb, block); /* write the remaining bits in a new buffer only if the block is finished */ if (mb->pos >= 64) bit_copy(&pb, &gb); block += 64; mb++; } /* pass 2 : we can do it just after */ #ifdef VLC_DEBUG printf("***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index); #endif block = block1; mb = mb1; init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb)); flush_put_bits(&pb); for (j = 0; j < s->sys->bpm; j++, block += 64, mb++) { if (mb->pos < 64 && get_bits_left(&gb) > 0) { dv_decode_ac(&gb, mb, block); /* if still not finished, no need to parse other blocks */ if (mb->pos < 64) break; } } /* all blocks are finished, so the extra bytes can be used at the video segment level */ if (j >= s->sys->bpm) bit_copy(&vs_pb, &gb); } /* we need a pass other the whole video segment */ #ifdef VLC_DEBUG printf("***pass 3 size=%d\n", put_bits_count(&vs_pb)); #endif block = &sblock[0][0]; mb = mb_data; init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb)); flush_put_bits(&vs_pb); for (mb_index = 0; mb_index < 5; mb_index++) { for (j = 0; j < s->sys->bpm; j++) { if (mb->pos < 64) { #ifdef VLC_DEBUG printf("start %d:%d\n", mb_index, j); #endif dv_decode_ac(&gb, mb, block); } if (mb->pos >= 64 && mb->pos < 127) av_log(avctx, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos); block += 64; mb++; } } /* compute idct and place blocks */ block = &sblock[0][0]; mb = mb_data; for (mb_index = 0; mb_index < 5; mb_index++) { dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y); /* idct_put'ting luminance */ if ((s->sys->pix_fmt == PIX_FMT_YUV420P) || (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize)); } else { y_stride = (2 << log2_blocksize); } y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize); linesize = s->picture.linesize[0] << is_field_mode[mb_index]; mb[0] .idct_put(y_ptr , linesize, block + 0*64); if (s->sys->video_stype == 4) { /* SD 422 */ mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64); } else { mb[1].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 1*64); mb[2].idct_put(y_ptr + y_stride, linesize, block + 2*64); mb[3].idct_put(y_ptr + (1 << log2_blocksize) + y_stride, linesize, block + 3*64); } mb += 4; block += 4*64; /* idct_put'ting chrominance */ c_offset = (((mb_y >> (s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] + (mb_x >> ((s->sys->pix_fmt == PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize); for (j = 2; j; j--) { uint8_t *c_ptr = s->picture.data[j] + c_offset; if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint64_t aligned_pixels[64/8]; uint8_t *pixels = (uint8_t*)aligned_pixels; uint8_t *c_ptr1, *ptr1; int x, y; mb->idct_put(pixels, 8, block); for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) { ptr1 = pixels + (1 << (log2_blocksize - 1)); c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize); for (x = 0; x < (1 << (log2_blocksize - 1)); x++) { c_ptr[x] = pixels[x]; c_ptr1[x] = ptr1[x]; } } block += 64; mb++; } else { y_stride = (mb_y == 134) ? (1 << log2_blocksize) : s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize); linesize = s->picture.linesize[j] << is_field_mode[mb_index]; (mb++)-> idct_put(c_ptr , linesize, block); block += 64; if (s->sys->bpm == 8) { (mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64; } } } } return 0; } #if CONFIG_SMALL /* Converts run and level (where level != 0) pair into vlc, returning bit size */ static av_always_inline int dv_rl2vlc(int run, int level, int sign, uint32_t* vlc) { int size; if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) { *vlc = dv_vlc_map[run][level].vlc | sign; size = dv_vlc_map[run][level].size; } else { if (level < DV_VLC_MAP_LEV_SIZE) { *vlc = dv_vlc_map[0][level].vlc | sign; size = dv_vlc_map[0][level].size; } else { *vlc = 0xfe00 | (level << 1) | sign; size = 16; } if (run) { *vlc |= ((run < 16) ? dv_vlc_map[run-1][0].vlc : (0x1f80 | (run - 1))) << size; size += (run < 16) ? dv_vlc_map[run-1][0].size : 13; } } return size; } static av_always_inline int dv_rl2vlc_size(int run, int level) { int size; if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) { size = dv_vlc_map[run][level].size; } else { size = (level < DV_VLC_MAP_LEV_SIZE) ? dv_vlc_map[0][level].size : 16; if (run) { size += (run < 16) ? dv_vlc_map[run-1][0].size : 13; } } return size; } #else static av_always_inline int dv_rl2vlc(int run, int l, int sign, uint32_t* vlc) { *vlc = dv_vlc_map[run][l].vlc | sign; return dv_vlc_map[run][l].size; } static av_always_inline int dv_rl2vlc_size(int run, int l) { return dv_vlc_map[run][l].size; } #endif typedef struct EncBlockInfo { int area_q[4]; int bit_size[4]; int prev[5]; int cur_ac; int cno; int dct_mode; DCTELEM mb[64]; uint8_t next[64]; uint8_t sign[64]; uint8_t partial_bit_count; uint32_t partial_bit_buffer; /* we can't use uint16_t here */ } EncBlockInfo; static av_always_inline PutBitContext* dv_encode_ac(EncBlockInfo* bi, PutBitContext* pb_pool, PutBitContext* pb_end) { int prev, bits_left; PutBitContext* pb = pb_pool; int size = bi->partial_bit_count; uint32_t vlc = bi->partial_bit_buffer; bi->partial_bit_count = bi->partial_bit_buffer = 0; for (;;){ /* Find suitable storage space */ for (; size > (bits_left = put_bits_left(pb)); pb++) { if (bits_left) { size -= bits_left; put_bits(pb, bits_left, vlc >> size); vlc = vlc & ((1 << size) - 1); } if (pb + 1 >= pb_end) { bi->partial_bit_count = size; bi->partial_bit_buffer = vlc; return pb; } } /* Store VLC */ put_bits(pb, size, vlc); if (bi->cur_ac >= 64) break; /* Construct the next VLC */ prev = bi->cur_ac; bi->cur_ac = bi->next[prev]; if (bi->cur_ac < 64){ size = dv_rl2vlc(bi->cur_ac - prev - 1, bi->mb[bi->cur_ac], bi->sign[bi->cur_ac], &vlc); } else { size = 4; vlc = 6; /* End Of Block stamp */ } } return pb; } static av_always_inline int dv_guess_dct_mode(DVVideoContext *s, uint8_t *data, int linesize) { if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) { int ps = s->ildct_cmp(NULL, data, NULL, linesize, 8) - 400; if (ps > 0) { int is = s->ildct_cmp(NULL, data , NULL, linesize<<1, 4) + s->ildct_cmp(NULL, data + linesize, NULL, linesize<<1, 4); return (ps > is); } } return 0; } static av_always_inline int dv_init_enc_block(EncBlockInfo* bi, uint8_t *data, int linesize, DVVideoContext *s, int bias) { const int *weight; const uint8_t* zigzag_scan; LOCAL_ALIGNED_16(DCTELEM, blk, [64]); int i, area; /* We offer two different methods for class number assignment: the method suggested in SMPTE 314M Table 22, and an improved method. The SMPTE method is very conservative; it assigns class 3 (i.e. severe quantization) to any block where the largest AC component is greater than 36. FFmpeg's DV encoder tracks AC bit consumption precisely, so there is no need to bias most blocks towards strongly lossy compression. Instead, we assign class 2 to most blocks, and use class 3 only when strictly necessary (for blocks whose largest AC component exceeds 255). */ #if 0 /* SMPTE spec method */ static const int classes[] = {12, 24, 36, 0xffff}; #else /* improved FFmpeg method */ static const int classes[] = {-1, -1, 255, 0xffff}; #endif int max = classes[0]; int prev = 0; assert((((int)blk) & 15) == 0); bi->area_q[0] = bi->area_q[1] = bi->area_q[2] = bi->area_q[3] = 0; bi->partial_bit_count = 0; bi->partial_bit_buffer = 0; bi->cur_ac = 0; if (data) { bi->dct_mode = dv_guess_dct_mode(s, data, linesize); s->get_pixels(blk, data, linesize); s->fdct[bi->dct_mode](blk); } else { /* We rely on the fact that encoding all zeros leads to an immediate EOB, which is precisely what the spec calls for in the "dummy" blocks. */ memset(blk, 0, 64*sizeof(*blk)); bi->dct_mode = 0; } bi->mb[0] = blk[0]; zigzag_scan = bi->dct_mode ? ff_zigzag248_direct : ff_zigzag_direct; weight = bi->dct_mode ? dv_weight_248 : dv_weight_88; for (area = 0; area < 4; area++) { bi->prev[area] = prev; bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :) for (i = mb_area_start[area]; i < mb_area_start[area+1]; i++) { int level = blk[zigzag_scan[i]]; if (level + 15 > 30U) { bi->sign[i] = (level >> 31) & 1; /* weigh it and and shift down into range, adding for rounding */ /* the extra division by a factor of 2^4 reverses the 8x expansion of the DCT AND the 2x doubling of the weights */ level = (FFABS(level) * weight[i] + (1 << (dv_weight_bits+3))) >> (dv_weight_bits+4); bi->mb[i] = level; if (level > max) max = level; bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, level); bi->next[prev]= i; prev = i; } } } bi->next[prev]= i; for (bi->cno = 0; max > classes[bi->cno]; bi->cno++); bi->cno += bias; if (bi->cno >= 3) { bi->cno = 3; prev = 0; i = bi->next[prev]; for (area = 0; area < 4; area++) { bi->prev[area] = prev; bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :) for (; i < mb_area_start[area+1]; i = bi->next[i]) { bi->mb[i] >>= 1; if (bi->mb[i]) { bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, bi->mb[i]); bi->next[prev]= i; prev = i; } } } bi->next[prev]= i; } return bi->bit_size[0] + bi->bit_size[1] + bi->bit_size[2] + bi->bit_size[3]; } static inline void dv_guess_qnos(EncBlockInfo* blks, int* qnos) { int size[5]; int i, j, k, a, prev, a2; EncBlockInfo* b; size[0] = size[1] = size[2] = size[3] = size[4] = 1 << 24; do { b = blks; for (i = 0; i < 5; i++) { if (!qnos[i]) continue; qnos[i]--; size[i] = 0; for (j = 0; j < 6; j++, b++) { for (a = 0; a < 4; a++) { if (b->area_q[a] != dv_quant_shifts[qnos[i] + dv_quant_offset[b->cno]][a]) { b->bit_size[a] = 1; // 4 areas 4 bits for EOB :) b->area_q[a]++; prev = b->prev[a]; assert(b->next[prev] >= mb_area_start[a+1] || b->mb[prev]); for (k = b->next[prev] ; k < mb_area_start[a+1]; k = b->next[k]) { b->mb[k] >>= 1; if (b->mb[k]) { b->bit_size[a] += dv_rl2vlc_size(k - prev - 1, b->mb[k]); prev = k; } else { if (b->next[k] >= mb_area_start[a+1] && b->next[k]<64){ for (a2 = a + 1; b->next[k] >= mb_area_start[a2+1]; a2++) b->prev[a2] = prev; assert(a2 < 4); assert(b->mb[b->next[k]]); b->bit_size[a2] += dv_rl2vlc_size(b->next[k] - prev - 1, b->mb[b->next[k]]) -dv_rl2vlc_size(b->next[k] - k - 1, b->mb[b->next[k]]); assert(b->prev[a2] == k && (a2 + 1 >= 4 || b->prev[a2+1] != k)); b->prev[a2] = prev; } b->next[prev] = b->next[k]; } } b->prev[a+1]= prev; } size[i] += b->bit_size[a]; } } if (vs_total_ac_bits >= size[0] + size[1] + size[2] + size[3] + size[4]) return; } } while (qnos[0]|qnos[1]|qnos[2]|qnos[3]|qnos[4]); for (a = 2; a == 2 || vs_total_ac_bits < size[0]; a += a){ b = blks; size[0] = 5 * 6 * 4; //EOB for (j = 0; j < 6 *5; j++, b++) { prev = b->prev[0]; for (k = b->next[prev]; k < 64; k = b->next[k]) { if (b->mb[k] < a && b->mb[k] > -a){ b->next[prev] = b->next[k]; }else{ size[0] += dv_rl2vlc_size(k - prev - 1, b->mb[k]); prev = k; } } } } } static int dv_encode_video_segment(AVCodecContext *avctx, void *arg) { DVVideoContext *s = avctx->priv_data; DVwork_chunk *work_chunk = arg; int mb_index, i, j; int mb_x, mb_y, c_offset, linesize, y_stride; uint8_t* y_ptr; uint8_t* dif; uint8_t scratch[64]; EncBlockInfo enc_blks[5*DV_MAX_BPM]; PutBitContext pbs[5*DV_MAX_BPM]; PutBitContext* pb; EncBlockInfo* enc_blk; int vs_bit_size = 0; int qnos[5] = {15, 15, 15, 15, 15}; /* No quantization */ int* qnosp = &qnos[0]; dif = &s->buf[work_chunk->buf_offset*80]; enc_blk = &enc_blks[0]; for (mb_index = 0; mb_index < 5; mb_index++) { dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y); /* initializing luminance blocks */ if ((s->sys->pix_fmt == PIX_FMT_YUV420P) || (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { y_stride = s->picture.linesize[0] << 3; } else { y_stride = 16; } y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << 3); linesize = s->picture.linesize[0]; if (s->sys->video_stype == 4) { /* SD 422 */ vs_bit_size += dv_init_enc_block(enc_blk+0, y_ptr , linesize, s, 0) + dv_init_enc_block(enc_blk+1, NULL , linesize, s, 0) + dv_init_enc_block(enc_blk+2, y_ptr + 8 , linesize, s, 0) + dv_init_enc_block(enc_blk+3, NULL , linesize, s, 0); } else { vs_bit_size += dv_init_enc_block(enc_blk+0, y_ptr , linesize, s, 0) + dv_init_enc_block(enc_blk+1, y_ptr + 8 , linesize, s, 0) + dv_init_enc_block(enc_blk+2, y_ptr + y_stride, linesize, s, 0) + dv_init_enc_block(enc_blk+3, y_ptr + 8 + y_stride, linesize, s, 0); } enc_blk += 4; /* initializing chrominance blocks */ c_offset = (((mb_y >> (s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] + (mb_x >> ((s->sys->pix_fmt == PIX_FMT_YUV411P) ? 2 : 1))) << 3); for (j = 2; j; j--) { uint8_t *c_ptr = s->picture.data[j] + c_offset; linesize = s->picture.linesize[j]; y_stride = (mb_y == 134) ? 8 : (s->picture.linesize[j] << 3); if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint8_t* d; uint8_t* b = scratch; for (i = 0; i < 8; i++) { d = c_ptr + (linesize << 3); b[0] = c_ptr[0]; b[1] = c_ptr[1]; b[2] = c_ptr[2]; b[3] = c_ptr[3]; b[4] = d[0]; b[5] = d[1]; b[6] = d[2]; b[7] = d[3]; c_ptr += linesize; b += 8; } c_ptr = scratch; linesize = 8; } vs_bit_size += dv_init_enc_block( enc_blk++, c_ptr , linesize, s, 1); if (s->sys->bpm == 8) { vs_bit_size += dv_init_enc_block(enc_blk++, c_ptr + y_stride, linesize, s, 1); } } } if (vs_total_ac_bits < vs_bit_size) dv_guess_qnos(&enc_blks[0], qnosp); /* DIF encoding process */ for (j=0; j<5*s->sys->bpm;) { int start_mb = j; dif[3] = *qnosp++; dif += 4; /* First pass over individual cells only */ for (i=0; i<s->sys->bpm; i++, j++) { int sz = s->sys->block_sizes[i]>>3; init_put_bits(&pbs[j], dif, sz); put_sbits(&pbs[j], 9, ((enc_blks[j].mb[0] >> 3) - 1024 + 2) >> 2); put_bits(&pbs[j], 1, enc_blks[j].dct_mode); put_bits(&pbs[j], 2, enc_blks[j].cno); dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j+1]); dif += sz; } /* Second pass over each MB space */ pb = &pbs[start_mb]; for (i=0; i<s->sys->bpm; i++) { if (enc_blks[start_mb+i].partial_bit_count) pb = dv_encode_ac(&enc_blks[start_mb+i], pb, &pbs[start_mb+s->sys->bpm]); } } /* Third and final pass over the whole video segment space */ pb = &pbs[0]; for (j=0; j<5*s->sys->bpm; j++) { if (enc_blks[j].partial_bit_count) pb = dv_encode_ac(&enc_blks[j], pb, &pbs[s->sys->bpm*5]); if (enc_blks[j].partial_bit_count) av_log(avctx, AV_LOG_ERROR, "ac bitstream overflow\n"); } for (j=0; j<5*s->sys->bpm; j++) { int pos; int size = pbs[j].size_in_bits >> 3; flush_put_bits(&pbs[j]); pos = put_bits_count(&pbs[j]) >> 3; if (pos > size) { av_log(avctx, AV_LOG_ERROR, "bitstream written beyond buffer size\n"); return -1; } memset(pbs[j].buf + pos, 0xff, size - pos); } return 0; } #if CONFIG_DVVIDEO_DECODER /* NOTE: exactly one frame must be given (120000 bytes for NTSC, 144000 bytes for PAL - or twice those for 50Mbps) */ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DVVideoContext *s = avctx->priv_data; s->sys = ff_dv_frame_profile(s->sys, buf, buf_size); if (!s->sys || buf_size < s->sys->frame_size || dv_init_dynamic_tables(s->sys)) { av_log(avctx, AV_LOG_ERROR, "could not find dv frame profile\n"); return -1; /* NOTE: we only accept several full frames */ } if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); s->picture.reference = 0; s->picture.key_frame = 1; s->picture.pict_type = FF_I_TYPE; avctx->pix_fmt = s->sys->pix_fmt; avctx->time_base = s->sys->time_base; avcodec_set_dimensions(avctx, s->sys->width, s->sys->height); if (avctx->get_buffer(avctx, &s->picture) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.interlaced_frame = 1; s->picture.top_field_first = 0; s->buf = buf; avctx->execute(avctx, dv_decode_video_segment, s->sys->work_chunks, NULL, dv_work_pool_size(s->sys), sizeof(DVwork_chunk)); emms_c(); /* return image */ *data_size = sizeof(AVFrame); *(AVFrame*)data = s->picture; return s->sys->frame_size; } #endif /* CONFIG_DVVIDEO_DECODER */ static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c, uint8_t* buf) { /* * Here's what SMPTE314M says about these two: * (page 6) APTn, AP1n, AP2n, AP3n: These data shall be identical * as track application IDs (APTn = 001, AP1n = * 001, AP2n = 001, AP3n = 001), if the source signal * comes from a digital VCR. If the signal source is * unknown, all bits for these data shall be set to 1. * (page 12) STYPE: STYPE defines a signal type of video signal * 00000b = 4:1:1 compression * 00100b = 4:2:2 compression * XXXXXX = Reserved * Now, I've got two problems with these statements: * 1. it looks like APT == 111b should be a safe bet, but it isn't. * It seems that for PAL as defined in IEC 61834 we have to set * APT to 000 and for SMPTE314M to 001. * 2. It is not at all clear what STYPE is used for 4:2:0 PAL * compression scheme (if any). */ int apt = (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0 : 1); uint8_t aspect = 0; if ((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */ aspect = 0x02; buf[0] = (uint8_t)pack_id; switch (pack_id) { case dv_header525: /* I can't imagine why these two weren't defined as real */ case dv_header625: /* packs in SMPTE314M -- they definitely look like ones */ buf[1] = 0xf8 | /* reserved -- always 1 */ (apt & 0x07); /* APT: Track application ID */ buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */ (0x0f << 3) | /* reserved -- always 1 */ (apt & 0x07); /* AP1: Audio application ID */ buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */ (0x0f << 3) | /* reserved -- always 1 */ (apt & 0x07); /* AP2: Video application ID */ buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */ (0x0f << 3) | /* reserved -- always 1 */ (apt & 0x07); /* AP3: Subcode application ID */ break; case dv_video_source: buf[1] = 0xff; /* reserved -- always 1 */ buf[2] = (1 << 7) | /* B/W: 0 - b/w, 1 - color */ (1 << 6) | /* following CLF is valid - 0, invalid - 1 */ (3 << 4) | /* CLF: color frames ID (see ITU-R BT.470-4) */ 0xf; /* reserved -- always 1 */ buf[3] = (3 << 6) | /* reserved -- always 1 */ (c->sys->dsf << 5) | /* system: 60fields/50fields */ c->sys->video_stype; /* signal type video compression */ buf[4] = 0xff; /* VISC: 0xff -- no information */ break; case dv_video_control: buf[1] = (0 << 6) | /* Copy generation management (CGMS) 0 -- free */ 0x3f; /* reserved -- always 1 */ buf[2] = 0xc8 | /* reserved -- always b11001xxx */ aspect; buf[3] = (1 << 7) | /* frame/field flag 1 -- frame, 0 -- field */ (1 << 6) | /* first/second field flag 0 -- field 2, 1 -- field 1 */ (1 << 5) | /* frame change flag 0 -- same picture as before, 1 -- different */ (1 << 4) | /* 1 - interlaced, 0 - noninterlaced */ 0xc; /* reserved -- always b1100 */ buf[4] = 0xff; /* reserved -- always 1 */ break; default: buf[1] = buf[2] = buf[3] = buf[4] = 0xff; } return 5; } #if CONFIG_DVVIDEO_ENCODER static void dv_format_frame(DVVideoContext* c, uint8_t* buf) { int chan, i, j, k; for (chan = 0; chan < c->sys->n_difchan; chan++) { for (i = 0; i < c->sys->difseg_size; i++) { memset(buf, 0xff, 80 * 6); /* first 6 DIF blocks are for control data */ /* DV header: 1DIF */ buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf); buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525), c, buf); buf += 72; /* unused bytes */ /* DV subcode: 2DIFs */ for (j = 0; j < 2; j++) { buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf); for (k = 0; k < 6; k++) buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size/2), buf) + 5; buf += 29; /* unused bytes */ } /* DV VAUX: 3DIFS */ for (j = 0; j < 3; j++) { buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf); buf += dv_write_pack(dv_video_source, c, buf); buf += dv_write_pack(dv_video_control, c, buf); buf += 7*5; buf += dv_write_pack(dv_video_source, c, buf); buf += dv_write_pack(dv_video_control, c, buf); buf += 4*5 + 2; /* unused bytes */ } /* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */ for (j = 0; j < 135; j++) { if (j%15 == 0) { memset(buf, 0xff, 80); buf += dv_write_dif_id(dv_sect_audio, chan, i, j/15, buf); buf += 77; /* audio control & shuffled PCM audio */ } buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf); buf += 77; /* 1 video macroblock: 1 bytes control 4 * 14 bytes Y 8x8 data 10 bytes Cr 8x8 data 10 bytes Cb 8x8 data */ } } } } static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size, void *data) { DVVideoContext *s = c->priv_data; s->sys = ff_dv_codec_profile(c); if (!s->sys || buf_size < s->sys->frame_size || dv_init_dynamic_tables(s->sys)) return -1; c->pix_fmt = s->sys->pix_fmt; s->picture = *((AVFrame *)data); s->picture.key_frame = 1; s->picture.pict_type = FF_I_TYPE; s->buf = buf; c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL, dv_work_pool_size(s->sys), sizeof(DVwork_chunk)); emms_c(); dv_format_frame(s, buf); return s->sys->frame_size; } #endif static int dvvideo_close(AVCodecContext *c) { DVVideoContext *s = c->priv_data; if (s->picture.data[0]) c->release_buffer(c, &s->picture); return 0; } #if CONFIG_DVVIDEO_ENCODER AVCodec dvvideo_encoder = { "dvvideo", AVMEDIA_TYPE_VIDEO, CODEC_ID_DVVIDEO, sizeof(DVVideoContext), dvvideo_init_encoder, dvvideo_encode_frame, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), }; #endif // CONFIG_DVVIDEO_ENCODER #if CONFIG_DVVIDEO_DECODER AVCodec dvvideo_decoder = { "dvvideo", AVMEDIA_TYPE_VIDEO, CODEC_ID_DVVIDEO, sizeof(DVVideoContext), dvvideo_init, NULL, dvvideo_close, dvvideo_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), }; #endif
123linslouis-android-video-cutter
jni/libavcodec/dv.c
C
asf20
47,481
/* * MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration bitstream filter * Copyright (c) 2009 Alex Converse <alex.converse@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "aac_parser.h" #include "put_bits.h" #include "get_bits.h" #include "mpeg4audio.h" #include "internal.h" typedef struct AACBSFContext { int first_frame_done; } AACBSFContext; /** * This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 * ADTS header and removes the ADTS header. */ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { GetBitContext gb; PutBitContext pb; AACADTSHeaderInfo hdr; AACBSFContext *ctx = bsfc->priv_data; init_get_bits(&gb, buf, AAC_ADTS_HEADER_SIZE*8); *poutbuf = (uint8_t*) buf; *poutbuf_size = buf_size; if (avctx->extradata) if (show_bits(&gb, 12) != 0xfff) return 0; if (ff_aac_parse_header(&gb, &hdr) < 0) { av_log(avctx, AV_LOG_ERROR, "Error parsing ADTS frame header!\n"); return -1; } if (!hdr.crc_absent && hdr.num_aac_frames > 1) { av_log_missing_feature(avctx, "Multiple RDBs per frame with CRC is", 0); return -1; } buf += AAC_ADTS_HEADER_SIZE + 2*!hdr.crc_absent; buf_size -= AAC_ADTS_HEADER_SIZE + 2*!hdr.crc_absent; if (!ctx->first_frame_done) { int pce_size = 0; uint8_t pce_data[MAX_PCE_SIZE]; if (!hdr.chan_config) { init_get_bits(&gb, buf, buf_size); if (get_bits(&gb, 3) != 5) { av_log_missing_feature(avctx, "PCE based channel configuration, where the PCE is not the first syntax element is", 0); return -1; } init_put_bits(&pb, pce_data, MAX_PCE_SIZE); pce_size = ff_copy_pce_data(&pb, &gb)/8; flush_put_bits(&pb); buf_size -= get_bits_count(&gb)/8; buf += get_bits_count(&gb)/8; } avctx->extradata_size = 2 + pce_size; avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); init_put_bits(&pb, avctx->extradata, avctx->extradata_size); put_bits(&pb, 5, hdr.object_type); put_bits(&pb, 4, hdr.sampling_index); put_bits(&pb, 4, hdr.chan_config); put_bits(&pb, 1, 0); //frame length - 1024 samples put_bits(&pb, 1, 0); //does not depend on core coder put_bits(&pb, 1, 0); //is not extension flush_put_bits(&pb); if (pce_size) { memcpy(avctx->extradata + 2, pce_data, pce_size); } ctx->first_frame_done = 1; } *poutbuf = (uint8_t*) buf; *poutbuf_size = buf_size; return 0; } AVBitStreamFilter aac_adtstoasc_bsf = { "aac_adtstoasc", sizeof(AACBSFContext), aac_adtstoasc_filter, };
123linslouis-android-video-cutter
jni/libavcodec/aac_adtstoasc_bsf.c
C
asf20
3,844
/* * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Chinese AVS video (AVS1-P2, JiZhun profile) decoder * @author Stefan Gehrer <stefan.gehrer@gmx.de> */ #include "avcodec.h" #include "get_bits.h" #include "golomb.h" #include "cavs.h" static const uint8_t mv_scan[4] = { MV_FWD_X0,MV_FWD_X1, MV_FWD_X2,MV_FWD_X3 }; static const uint8_t cbp_tab[64][2] = { {63, 0},{15,15},{31,63},{47,31},{ 0,16},{14,32},{13,47},{11,13}, { 7,14},{ 5,11},{10,12},{ 8, 5},{12,10},{61, 7},{ 4,48},{55, 3}, { 1, 2},{ 2, 8},{59, 4},{ 3, 1},{62,61},{ 9,55},{ 6,59},{29,62}, {45,29},{51,27},{23,23},{39,19},{27,30},{46,28},{53, 9},{30, 6}, {43,60},{37,21},{60,44},{16,26},{21,51},{28,35},{19,18},{35,20}, {42,24},{26,53},{44,17},{32,37},{58,39},{24,45},{20,58},{17,43}, {18,42},{48,46},{22,36},{33,33},{25,34},{49,40},{40,52},{36,49}, {34,50},{50,56},{52,25},{54,22},{41,54},{56,57},{38,41},{57,38} }; /***************************************************************************** * * motion vector prediction * ****************************************************************************/ static inline void store_mvs(AVSContext *h) { h->col_mv[h->mbidx*4 + 0] = h->mv[MV_FWD_X0]; h->col_mv[h->mbidx*4 + 1] = h->mv[MV_FWD_X1]; h->col_mv[h->mbidx*4 + 2] = h->mv[MV_FWD_X2]; h->col_mv[h->mbidx*4 + 3] = h->mv[MV_FWD_X3]; } static inline void mv_pred_direct(AVSContext *h, cavs_vector *pmv_fw, cavs_vector *col_mv) { cavs_vector *pmv_bw = pmv_fw + MV_BWD_OFFS; int den = h->direct_den[col_mv->ref]; int m = col_mv->x >> 31; pmv_fw->dist = h->dist[1]; pmv_bw->dist = h->dist[0]; pmv_fw->ref = 1; pmv_bw->ref = 0; /* scale the co-located motion vector according to its temporal span */ pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m; pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m); m = col_mv->y >> 31; pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m; pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m); } static inline void mv_pred_sym(AVSContext *h, cavs_vector *src, enum cavs_block size) { cavs_vector *dst = src + MV_BWD_OFFS; /* backward mv is the scaled and negated forward mv */ dst->x = -((src->x * h->sym_factor + 256) >> 9); dst->y = -((src->y * h->sym_factor + 256) >> 9); dst->ref = 0; dst->dist = h->dist[0]; set_mvs(dst, size); } /***************************************************************************** * * residual data decoding * ****************************************************************************/ /** kth-order exponential golomb code */ static inline int get_ue_code(GetBitContext *gb, int order) { if(order) { int ret = get_ue_golomb(gb) << order; return ret + get_bits(gb,order); } return get_ue_golomb(gb); } /** * decode coefficients from one 8x8 block, dequantize, inverse transform * and add them to sample block * @param r pointer to 2D VLC table * @param esc_golomb_order escape codes are k-golomb with this order k * @param qp quantizer * @param dst location of sample block * @param stride line stride in frame buffer */ static int decode_residual_block(AVSContext *h, GetBitContext *gb, const struct dec_2dvlc *r, int esc_golomb_order, int qp, uint8_t *dst, int stride) { int i, level_code, esc_code, level, run, mask; DCTELEM level_buf[65]; uint8_t run_buf[65]; DCTELEM *block = h->block; for(i=0;i<65;i++) { level_code = get_ue_code(gb,r->golomb_order); if(level_code >= ESCAPE_CODE) { run = ((level_code - ESCAPE_CODE) >> 1) + 1; esc_code = get_ue_code(gb,esc_golomb_order); level = esc_code + (run > r->max_run ? 1 : r->level_add[run]); while(level > r->inc_limit) r++; mask = -(level_code & 1); level = (level^mask) - mask; } else { level = r->rltab[level_code][0]; if(!level) //end of block signal break; run = r->rltab[level_code][1]; r += r->rltab[level_code][2]; } level_buf[i] = level; run_buf[i] = run; } if(dequant(h,level_buf, run_buf, block, ff_cavs_dequant_mul[qp], ff_cavs_dequant_shift[qp], i)) return -1; h->s.dsp.cavs_idct8_add(dst,block,stride); h->s.dsp.clear_block(block); return 0; } static inline void decode_residual_chroma(AVSContext *h) { if(h->cbp & (1<<4)) decode_residual_block(h,&h->s.gb,ff_cavs_chroma_dec,0, ff_cavs_chroma_qp[h->qp],h->cu,h->c_stride); if(h->cbp & (1<<5)) decode_residual_block(h,&h->s.gb,ff_cavs_chroma_dec,0, ff_cavs_chroma_qp[h->qp],h->cv,h->c_stride); } static inline int decode_residual_inter(AVSContext *h) { int block; /* get coded block pattern */ int cbp= get_ue_golomb(&h->s.gb); if(cbp > 63){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal inter cbp\n"); return -1; } h->cbp = cbp_tab[cbp][1]; /* get quantizer */ if(h->cbp && !h->qp_fixed) h->qp = (h->qp + get_se_golomb(&h->s.gb)) & 63; for(block=0;block<4;block++) if(h->cbp & (1<<block)) decode_residual_block(h,&h->s.gb,ff_cavs_inter_dec,0,h->qp, h->cy + h->luma_scan[block], h->l_stride); decode_residual_chroma(h); return 0; } /***************************************************************************** * * macroblock level * ****************************************************************************/ static int decode_mb_i(AVSContext *h, int cbp_code) { GetBitContext *gb = &h->s.gb; int block, pred_mode_uv; uint8_t top[18]; uint8_t *left = NULL; uint8_t *d; ff_cavs_init_mb(h); /* get intra prediction modes from stream */ for(block=0;block<4;block++) { int nA,nB,predpred; int pos = ff_cavs_scan3x3[block]; nA = h->pred_mode_Y[pos-1]; nB = h->pred_mode_Y[pos-3]; predpred = FFMIN(nA,nB); if(predpred == NOT_AVAIL) // if either is not available predpred = INTRA_L_LP; if(!get_bits1(gb)){ int rem_mode= get_bits(gb, 2); predpred = rem_mode + (rem_mode >= predpred); } h->pred_mode_Y[pos] = predpred; } pred_mode_uv = get_ue_golomb(gb); if(pred_mode_uv > 6) { av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n"); return -1; } ff_cavs_modify_mb_i(h, &pred_mode_uv); /* get coded block pattern */ if(h->pic_type == FF_I_TYPE) cbp_code = get_ue_golomb(gb); if(cbp_code > 63){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n"); return -1; } h->cbp = cbp_tab[cbp_code][0]; if(h->cbp && !h->qp_fixed) h->qp = (h->qp + get_se_golomb(gb)) & 63; //qp_delta /* luma intra prediction interleaved with residual decode/transform/add */ for(block=0;block<4;block++) { d = h->cy + h->luma_scan[block]; ff_cavs_load_intra_pred_luma(h, top, &left, block); h->intra_pred_l[h->pred_mode_Y[ff_cavs_scan3x3[block]]] (d, top, left, h->l_stride); if(h->cbp & (1<<block)) decode_residual_block(h,gb,ff_cavs_intra_dec,1,h->qp,d,h->l_stride); } /* chroma intra prediction */ ff_cavs_load_intra_pred_chroma(h); h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10], h->left_border_u, h->c_stride); h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10], h->left_border_v, h->c_stride); decode_residual_chroma(h); ff_cavs_filter(h,I_8X8); set_mv_intra(h); return 0; } static void decode_mb_p(AVSContext *h, enum cavs_mb mb_type) { GetBitContext *gb = &h->s.gb; int ref[4]; ff_cavs_init_mb(h); switch(mb_type) { case P_SKIP: ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0); break; case P_16X16: ref[0] = h->ref_flag ? 0 : get_bits1(gb); ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]); break; case P_16X8: ref[0] = h->ref_flag ? 0 : get_bits1(gb); ref[2] = h->ref_flag ? 0 : get_bits1(gb); ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]); ff_cavs_mv(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]); break; case P_8X16: ref[0] = h->ref_flag ? 0 : get_bits1(gb); ref[1] = h->ref_flag ? 0 : get_bits1(gb); ff_cavs_mv(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]); ff_cavs_mv(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, ref[1]); break; case P_8X8: ref[0] = h->ref_flag ? 0 : get_bits1(gb); ref[1] = h->ref_flag ? 0 : get_bits1(gb); ref[2] = h->ref_flag ? 0 : get_bits1(gb); ref[3] = h->ref_flag ? 0 : get_bits1(gb); ff_cavs_mv(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]); ff_cavs_mv(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]); ff_cavs_mv(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]); ff_cavs_mv(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]); } ff_cavs_inter(h, mb_type); set_intra_mode_default(h); store_mvs(h); if(mb_type != P_SKIP) decode_residual_inter(h); ff_cavs_filter(h,mb_type); h->col_type_base[h->mbidx] = mb_type; } static void decode_mb_b(AVSContext *h, enum cavs_mb mb_type) { int block; enum cavs_sub_mb sub_type[4]; int flags; ff_cavs_init_mb(h); /* reset all MVs */ h->mv[MV_FWD_X0] = ff_cavs_dir_mv; set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); h->mv[MV_BWD_X0] = ff_cavs_dir_mv; set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); switch(mb_type) { case B_SKIP: case B_DIRECT: if(!h->col_type_base[h->mbidx]) { /* intra MB at co-location, do in-plane prediction */ ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1); ff_cavs_mv(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0); } else /* direct prediction from co-located P MB, block-wise */ for(block=0;block<4;block++) mv_pred_direct(h,&h->mv[mv_scan[block]], &h->col_mv[h->mbidx*4 + block]); break; case B_FWD_16X16: ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1); break; case B_SYM_16X16: ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1); mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16); break; case B_BWD_16X16: ff_cavs_mv(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0); break; case B_8X8: for(block=0;block<4;block++) sub_type[block] = get_bits(&h->s.gb,2); for(block=0;block<4;block++) { switch(sub_type[block]) { case B_SUB_DIRECT: if(!h->col_type_base[h->mbidx]) { /* intra MB at co-location, do in-plane prediction */ ff_cavs_mv(h, mv_scan[block], mv_scan[block]-3, MV_PRED_BSKIP, BLK_8X8, 1); ff_cavs_mv(h, mv_scan[block]+MV_BWD_OFFS, mv_scan[block]-3+MV_BWD_OFFS, MV_PRED_BSKIP, BLK_8X8, 0); } else mv_pred_direct(h,&h->mv[mv_scan[block]], &h->col_mv[h->mbidx*4 + block]); break; case B_SUB_FWD: ff_cavs_mv(h, mv_scan[block], mv_scan[block]-3, MV_PRED_MEDIAN, BLK_8X8, 1); break; case B_SUB_SYM: ff_cavs_mv(h, mv_scan[block], mv_scan[block]-3, MV_PRED_MEDIAN, BLK_8X8, 1); mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8); break; } } for(block=0;block<4;block++) { if(sub_type[block] == B_SUB_BWD) ff_cavs_mv(h, mv_scan[block]+MV_BWD_OFFS, mv_scan[block]+MV_BWD_OFFS-3, MV_PRED_MEDIAN, BLK_8X8, 0); } break; default: assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8)); flags = ff_cavs_partition_flags[mb_type]; if(mb_type & 1) { /* 16x8 macroblock types */ if(flags & FWD0) ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1); if(flags & SYM0) mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8); if(flags & FWD1) ff_cavs_mv(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1); if(flags & SYM1) mv_pred_sym(h, &h->mv[MV_FWD_X2], BLK_16X8); if(flags & BWD0) ff_cavs_mv(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0); if(flags & BWD1) ff_cavs_mv(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0); } else { /* 8x16 macroblock types */ if(flags & FWD0) ff_cavs_mv(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1); if(flags & SYM0) mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16); if(flags & FWD1) ff_cavs_mv(h,MV_FWD_X1,MV_FWD_C2,MV_PRED_TOPRIGHT,BLK_8X16,1); if(flags & SYM1) mv_pred_sym(h, &h->mv[MV_FWD_X1], BLK_8X16); if(flags & BWD0) ff_cavs_mv(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0); if(flags & BWD1) ff_cavs_mv(h,MV_BWD_X1,MV_BWD_C2,MV_PRED_TOPRIGHT,BLK_8X16,0); } } ff_cavs_inter(h, mb_type); set_intra_mode_default(h); if(mb_type != B_SKIP) decode_residual_inter(h); ff_cavs_filter(h,mb_type); } /***************************************************************************** * * slice level * ****************************************************************************/ static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) { if(h->stc > 0xAF) av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc); h->mby = h->stc; h->mbidx = h->mby*h->mb_width; /* mark top macroblocks as unavailable */ h->flags &= ~(B_AVAIL|C_AVAIL); if((h->mby == 0) && (!h->qp_fixed)){ h->qp_fixed = get_bits1(gb); h->qp = get_bits(gb,6); } /* inter frame or second slice can have weighting params */ if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2)) if(get_bits1(gb)) { //slice_weighting_flag av_log(h->s.avctx, AV_LOG_ERROR, "weighted prediction not yet supported\n"); } return 0; } static inline int check_for_slice(AVSContext *h) { GetBitContext *gb = &h->s.gb; int align; if(h->mbx) return 0; align = (-get_bits_count(gb)) & 7; /* check for stuffing byte */ if(!align && (show_bits(gb,8) == 0x80)) align = 8; if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) { skip_bits_long(gb,24+align); h->stc = get_bits(gb,8); decode_slice_header(h,gb); return 1; } return 0; } /***************************************************************************** * * frame level * ****************************************************************************/ static int decode_pic(AVSContext *h) { MpegEncContext *s = &h->s; int skip_count = -1; enum cavs_mb mb_type; if (!s->context_initialized) { s->avctx->idct_algo = FF_IDCT_CAVS; if (MPV_common_init(s) < 0) return -1; ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct); } skip_bits(&s->gb,16);//bbv_dwlay if(h->stc == PIC_PB_START_CODE) { h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE; if(h->pic_type > FF_B_TYPE) { av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n"); return -1; } /* make sure we have the reference frames we need */ if(!h->DPB[0].data[0] || (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE)) return -1; } else { h->pic_type = FF_I_TYPE; if(get_bits1(&s->gb)) skip_bits(&s->gb,24);//time_code /* old sample clips were all progressive and no low_delay, bump stream revision if detected otherwise */ if((s->low_delay) || !(show_bits(&s->gb,9) & 1)) h->stream_revision = 1; /* similarly test top_field_first and repeat_first_field */ else if(show_bits(&s->gb,11) & 3) h->stream_revision = 1; if(h->stream_revision > 0) skip_bits(&s->gb,1); //marker_bit } /* release last B frame */ if(h->picture.data[0]) s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture); s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture); ff_cavs_init_pic(h); h->picture.poc = get_bits(&s->gb,8)*2; /* get temporal distances and MV scaling factors */ if(h->pic_type != FF_B_TYPE) { h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512; } else { h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512; } h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512; h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0; h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0; if(h->pic_type == FF_B_TYPE) { h->sym_factor = h->dist[0]*h->scale_den[1]; } else { h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0; h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0; } if(s->low_delay) get_ue_golomb(&s->gb); //bbv_check_times h->progressive = get_bits1(&s->gb); h->pic_structure = 1; if(!h->progressive) h->pic_structure = get_bits1(&s->gb); if(!h->pic_structure && h->stc == PIC_PB_START_CODE) skip_bits1(&s->gb); //advanced_pred_mode_disable skip_bits1(&s->gb); //top_field_first skip_bits1(&s->gb); //repeat_first_field h->qp_fixed = get_bits1(&s->gb); h->qp = get_bits(&s->gb,6); if(h->pic_type == FF_I_TYPE) { if(!h->progressive && !h->pic_structure) skip_bits1(&s->gb);//what is this? skip_bits(&s->gb,4); //reserved bits } else { if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1)) h->ref_flag = get_bits1(&s->gb); skip_bits(&s->gb,4); //reserved bits h->skip_mode_flag = get_bits1(&s->gb); } h->loop_filter_disable = get_bits1(&s->gb); if(!h->loop_filter_disable && get_bits1(&s->gb)) { h->alpha_offset = get_se_golomb(&s->gb); h->beta_offset = get_se_golomb(&s->gb); } else { h->alpha_offset = h->beta_offset = 0; } if(h->pic_type == FF_I_TYPE) { do { check_for_slice(h); decode_mb_i(h, 0); } while(ff_cavs_next_mb(h)); } else if(h->pic_type == FF_P_TYPE) { do { if(check_for_slice(h)) skip_count = -1; if(h->skip_mode_flag && (skip_count < 0)) skip_count = get_ue_golomb(&s->gb); if(h->skip_mode_flag && skip_count--) { decode_mb_p(h,P_SKIP); } else { mb_type = get_ue_golomb(&s->gb) + P_SKIP + h->skip_mode_flag; if(mb_type > P_8X8) decode_mb_i(h, mb_type - P_8X8 - 1); else decode_mb_p(h,mb_type); } } while(ff_cavs_next_mb(h)); } else { /* FF_B_TYPE */ do { if(check_for_slice(h)) skip_count = -1; if(h->skip_mode_flag && (skip_count < 0)) skip_count = get_ue_golomb(&s->gb); if(h->skip_mode_flag && skip_count--) { decode_mb_b(h,B_SKIP); } else { mb_type = get_ue_golomb(&s->gb) + B_SKIP + h->skip_mode_flag; if(mb_type > B_8X8) decode_mb_i(h, mb_type - B_8X8 - 1); else decode_mb_b(h,mb_type); } } while(ff_cavs_next_mb(h)); } if(h->pic_type != FF_B_TYPE) { if(h->DPB[1].data[0]) s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]); h->DPB[1] = h->DPB[0]; h->DPB[0] = h->picture; memset(&h->picture,0,sizeof(Picture)); } return 0; } /***************************************************************************** * * headers and interface * ****************************************************************************/ static int decode_seq_header(AVSContext *h) { MpegEncContext *s = &h->s; int frame_rate_code; h->profile = get_bits(&s->gb,8); h->level = get_bits(&s->gb,8); skip_bits1(&s->gb); //progressive sequence s->width = get_bits(&s->gb,14); s->height = get_bits(&s->gb,14); skip_bits(&s->gb,2); //chroma format skip_bits(&s->gb,3); //sample_precision h->aspect_ratio = get_bits(&s->gb,4); frame_rate_code = get_bits(&s->gb,4); skip_bits(&s->gb,18);//bit_rate_lower skip_bits1(&s->gb); //marker_bit skip_bits(&s->gb,12);//bit_rate_upper s->low_delay = get_bits1(&s->gb); h->mb_width = (s->width + 15) >> 4; h->mb_height = (s->height + 15) >> 4; h->s.avctx->time_base.den = ff_frame_rate_tab[frame_rate_code].num; h->s.avctx->time_base.num = ff_frame_rate_tab[frame_rate_code].den; h->s.avctx->width = s->width; h->s.avctx->height = s->height; if(!h->top_qp) ff_cavs_init_top_lines(h); return 0; } static void cavs_flush(AVCodecContext * avctx) { AVSContext *h = avctx->priv_data; h->got_keyframe = 0; } static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AVSContext *h = avctx->priv_data; MpegEncContext *s = &h->s; int input_size; const uint8_t *buf_end; const uint8_t *buf_ptr; AVFrame *picture = data; uint32_t stc = -1; s->avctx = avctx; if (buf_size == 0) { if(!s->low_delay && h->DPB[0].data[0]) { *data_size = sizeof(AVPicture); *picture = *(AVFrame *) &h->DPB[0]; } return 0; } buf_ptr = buf; buf_end = buf + buf_size; for(;;) { buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc); if(stc & 0xFFFFFE00) return FFMAX(0, buf_ptr - buf - s->parse_context.last_index); input_size = (buf_end - buf_ptr)*8; switch(stc) { case CAVS_START_CODE: init_get_bits(&s->gb, buf_ptr, input_size); decode_seq_header(h); break; case PIC_I_START_CODE: if(!h->got_keyframe) { if(h->DPB[0].data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]); if(h->DPB[1].data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]); h->got_keyframe = 1; } case PIC_PB_START_CODE: *data_size = 0; if(!h->got_keyframe) break; init_get_bits(&s->gb, buf_ptr, input_size); h->stc = stc; if(decode_pic(h)) break; *data_size = sizeof(AVPicture); if(h->pic_type != FF_B_TYPE) { if(h->DPB[1].data[0]) { *picture = *(AVFrame *) &h->DPB[1]; } else { *data_size = 0; } } else *picture = *(AVFrame *) &h->picture; break; case EXT_START_CODE: //mpeg_decode_extension(avctx,buf_ptr, input_size); break; case USER_START_CODE: //mpeg_decode_user_data(avctx,buf_ptr, input_size); break; default: if (stc <= SLICE_MAX_START_CODE) { init_get_bits(&s->gb, buf_ptr, input_size); decode_slice_header(h, &s->gb); } break; } } } AVCodec cavs_decoder = { "cavs", AVMEDIA_TYPE_VIDEO, CODEC_ID_CAVS, sizeof(AVSContext), ff_cavs_init, NULL, ff_cavs_end, cavs_decode_frame, CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush= cavs_flush, .long_name= NULL_IF_CONFIG_SMALL("Chinese AVS video (AVS1-P2, JiZhun profile)"), };
123linslouis-android-video-cutter
jni/libavcodec/cavsdec.c
C
asf20
25,938
/* * Copyright (C) 2003 Ivan Kalvachev * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_XVMC_H #define AVCODEC_XVMC_H #include <X11/extensions/XvMC.h> #include "avcodec.h" #if LIBAVCODEC_VERSION_MAJOR < 53 #define AV_XVMC_STATE_DISPLAY_PENDING 1 /** the surface should be shown, the video driver manipulates this */ #define AV_XVMC_STATE_PREDICTION 2 /** the surface is needed for prediction, the codec manipulates this */ #define AV_XVMC_STATE_OSD_SOURCE 4 /** the surface is needed for subpicture rendering */ #endif #define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct the number is 1337 speak for the letters IDCT MCo (motion compensation) */ struct xvmc_pix_fmt { /** The field contains the special constant value AV_XVMC_ID. It is used as a test that the application correctly uses the API, and that there is no corruption caused by pixel routines. - application - set during initialization - libavcodec - unchanged */ int xvmc_id; /** Pointer to the block array allocated by XvMCCreateBlocks(). The array has to be freed by XvMCDestroyBlocks(). Each group of 64 values represents one data block of differential pixel information (in MoCo mode) or coefficients for IDCT. - application - set the pointer during initialization - libavcodec - fills coefficients/pixel data into the array */ short* data_blocks; /** Pointer to the macroblock description array allocated by XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). - application - set the pointer during initialization - libavcodec - fills description data into the array */ XvMCMacroBlock* mv_blocks; /** Number of macroblock descriptions that can be stored in the mv_blocks array. - application - set during initialization - libavcodec - unchanged */ int allocated_mv_blocks; /** Number of blocks that can be stored at once in the data_blocks array. - application - set during initialization - libavcodec - unchanged */ int allocated_data_blocks; /** Indicates that the hardware would interpret data_blocks as IDCT coefficients and perform IDCT on them. - application - set during initialization - libavcodec - unchanged */ int idct; /** In MoCo mode it indicates that intra macroblocks are assumed to be in unsigned format; same as the XVMC_INTRA_UNSIGNED flag. - application - set during initialization - libavcodec - unchanged */ int unsigned_intra; /** Pointer to the surface allocated by XvMCCreateSurface(). It has to be freed by XvMCDestroySurface() on application exit. It identifies the frame and its state on the video hardware. - application - set during initialization - libavcodec - unchanged */ XvMCSurface* p_surface; /** Set by the decoder before calling ff_draw_horiz_band(), needed by the XvMCRenderSurface function. */ //@{ /** Pointer to the surface used as past reference - application - unchanged - libavcodec - set */ XvMCSurface* p_past_surface; /** Pointer to the surface used as future reference - application - unchanged - libavcodec - set */ XvMCSurface* p_future_surface; /** top/bottom field or frame - application - unchanged - libavcodec - set */ unsigned int picture_structure; /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence - application - unchanged - libavcodec - set */ unsigned int flags; //}@ /** Number of macroblock descriptions in the mv_blocks array that have already been passed to the hardware. - application - zeroes it on get_buffer(). A successful ff_draw_horiz_band() may increment it with filled_mb_block_num or zero both. - libavcodec - unchanged */ int start_mv_blocks_num; /** Number of new macroblock descriptions in the mv_blocks array (after start_mv_blocks_num) that are filled by libavcodec and have to be passed to the hardware. - application - zeroes it on get_buffer() or after successful ff_draw_horiz_band(). - libavcodec - increment with one of each stored MB */ int filled_mv_blocks_num; /** Number of the the next free data block; one data block consists of 64 short values in the data_blocks array. All blocks before this one have already been claimed by placing their position into the corresponding block description structure field, that are part of the mv_blocks array. - application - zeroes it on get_buffer(). A successful ff_draw_horiz_band() may zero it together with start_mb_blocks_num. - libavcodec - each decoded macroblock increases it by the number of coded blocks it contains. */ int next_free_data_block_num; /** extensions may be placed here */ #if LIBAVCODEC_VERSION_MAJOR < 53 //@{ /** State flags used to work around limitations in the MPlayer video system. 0 - Surface is not used. 1 - Surface is still held in application to be displayed or is still visible. 2 - Surface is still held in libavcodec buffer for prediction. */ int state; /** pointer to the surface where the subpicture is rendered */ void* p_osd_target_surface_render; //}@ #endif }; #endif /* AVCODEC_XVMC_H */
123linslouis-android-video-cutter
jni/libavcodec/xvmc.h
C
asf20
6,718
/* * Raw Video Codec * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Raw Video Codec */ #ifndef AVCODEC_RAW_H #define AVCODEC_RAW_H #include "avcodec.h" typedef struct PixelFormatTag { enum PixelFormat pix_fmt; unsigned int fourcc; } PixelFormatTag; extern const PixelFormatTag ff_raw_pixelFormatTags[]; #endif /* AVCODEC_RAW_H */
123linslouis-android-video-cutter
jni/libavcodec/raw.h
C
asf20
1,116
/* * FFV1 codec for libavcodec * * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * FF Video Codec 1 (a lossless codec) */ #include "avcodec.h" #include "get_bits.h" #include "put_bits.h" #include "dsputil.h" #include "rangecoder.h" #include "golomb.h" #include "mathops.h" #define MAX_PLANES 4 #define CONTEXT_SIZE 32 extern const uint8_t ff_log2_run[32]; static const int8_t quant3[256]={ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, }; static const int8_t quant5_10bit[256]={ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0, }; static const int8_t quant5[256]={ 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1, }; static const int8_t quant7[256]={ 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1, }; static const int8_t quant9[256]={ 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-1,-1, }; static const int8_t quant9_10bit[256]={ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3, -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2, -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0, }; static const int8_t quant11[256]={ 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4, -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4, -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1, }; static const int8_t quant13[256]={ 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, -6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, -6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, -6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6, -6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-2,-2,-1, }; static const uint8_t ver2_state[256]= { 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49, 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39, 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52, 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69, 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97, 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98, 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125, 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129, 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148, 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160, 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178, 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196, 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214, 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225, 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242, 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255, }; typedef struct VlcState{ int16_t drift; uint16_t error_sum; int8_t bias; uint8_t count; } VlcState; typedef struct PlaneContext{ int context_count; uint8_t (*state)[CONTEXT_SIZE]; VlcState *vlc_state; uint8_t interlace_bit_state[2]; } PlaneContext; typedef struct FFV1Context{ AVCodecContext *avctx; RangeCoder c; GetBitContext gb; PutBitContext pb; int version; int width, height; int chroma_h_shift, chroma_v_shift; int flags; int picture_number; AVFrame picture; int plane_count; int ac; ///< 1=range coder <-> 0=golomb rice PlaneContext plane[MAX_PLANES]; int16_t quant_table[5][256]; uint8_t state_transition[256]; int run_index; int colorspace; DSPContext dsp; }FFV1Context; static av_always_inline int fold(int diff, int bits){ if(bits==8) diff= (int8_t)diff; else{ diff+= 1<<(bits-1); diff&=(1<<bits)-1; diff-= 1<<(bits-1); } return diff; } static inline int predict(int_fast16_t *src, int_fast16_t *last){ const int LT= last[-1]; const int T= last[ 0]; const int L = src[-1]; return mid_pred(L, L + T - LT, T); } static inline int get_context(FFV1Context *f, int_fast16_t *src, int_fast16_t *last, int_fast16_t *last2){ const int LT= last[-1]; const int T= last[ 0]; const int RT= last[ 1]; const int L = src[-1]; if(f->quant_table[3][127]){ const int TT= last2[0]; const int LL= src[-2]; return f->quant_table[0][(L-LT) & 0xFF] + f->quant_table[1][(LT-T) & 0xFF] + f->quant_table[2][(T-RT) & 0xFF] +f->quant_table[3][(LL-L) & 0xFF] + f->quant_table[4][(TT-T) & 0xFF]; }else return f->quant_table[0][(L-LT) & 0xFF] + f->quant_table[1][(LT-T) & 0xFF] + f->quant_table[2][(T-RT) & 0xFF]; } static inline void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed){ int i; if(v){ const int a= FFABS(v); const int e= av_log2(a); put_rac(c, state+0, 0); if(e<=9){ for(i=0; i<e; i++){ put_rac(c, state+1+i, 1); //1..10 } put_rac(c, state+1+i, 0); for(i=e-1; i>=0; i--){ put_rac(c, state+22+i, (a>>i)&1); //22..31 } if(is_signed) put_rac(c, state+11 + e, v < 0); //11..21 }else{ for(i=0; i<e; i++){ put_rac(c, state+1+FFMIN(i,9), 1); //1..10 } put_rac(c, state+1+9, 0); for(i=e-1; i>=0; i--){ put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31 } if(is_signed) put_rac(c, state+11 + 10, v < 0); //11..21 } }else{ put_rac(c, state+0, 1); } } static void av_noinline put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){ put_symbol_inline(c, state, v, is_signed); } static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){ if(get_rac(c, state+0)) return 0; else{ int i, e, a; e= 0; while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10 e++; } a= 1; for(i=e-1; i>=0; i--){ a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31 } e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21 return (a^e)-e; } } static int av_noinline get_symbol(RangeCoder *c, uint8_t *state, int is_signed){ return get_symbol_inline(c, state, is_signed); } static inline void update_vlc_state(VlcState * const state, const int v){ int drift= state->drift; int count= state->count; state->error_sum += FFABS(v); drift += v; if(count == 128){ //FIXME variable count >>= 1; drift >>= 1; state->error_sum >>= 1; } count++; if(drift <= -count){ if(state->bias > -128) state->bias--; drift += count; if(drift <= -count) drift= -count + 1; }else if(drift > 0){ if(state->bias < 127) state->bias++; drift -= count; if(drift > 0) drift= 0; } state->drift= drift; state->count= count; } static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){ int i, k, code; //printf("final: %d ", v); v = fold(v - state->bias, bits); i= state->count; k=0; while(i < state->error_sum){ //FIXME optimize k++; i += i; } assert(k<=8); #if 0 // JPEG LS if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1); else code= v; #else code= v ^ ((2*state->drift + state->count)>>31); #endif //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k); set_sr_golomb(pb, code, k, 12, bits); update_vlc_state(state, v); } static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){ int k, i, v, ret; i= state->count; k=0; while(i < state->error_sum){ //FIXME optimize k++; i += i; } assert(k<=8); v= get_sr_golomb(gb, k, 12, bits); //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k); #if 0 // JPEG LS if(k==0 && 2*state->drift <= - state->count) v ^= (-1); #else v ^= ((2*state->drift + state->count)>>31); #endif ret= fold(v + state->bias, bits); update_vlc_state(state, v); //printf("final: %d\n", ret); return ret; } #if CONFIG_FFV1_ENCODER static inline int encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){ PlaneContext * const p= &s->plane[plane_index]; RangeCoder * const c= &s->c; int x; int run_index= s->run_index; int run_count=0; int run_mode=0; if(s->ac){ if(c->bytestream_end - c->bytestream < w*20){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } }else{ if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } } for(x=0; x<w; x++){ int diff, context; context= get_context(s, sample[0]+x, sample[1]+x, sample[2]+x); diff= sample[0][x] - predict(sample[0]+x, sample[1]+x); if(context < 0){ context = -context; diff= -diff; } diff= fold(diff, bits); if(s->ac){ put_symbol_inline(c, p->state[context], diff, 1); }else{ if(context == 0) run_mode=1; if(run_mode){ if(diff){ while(run_count >= 1<<ff_log2_run[run_index]){ run_count -= 1<<ff_log2_run[run_index]; run_index++; put_bits(&s->pb, 1, 1); } put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count); if(run_index) run_index--; run_count=0; run_mode=0; if(diff>0) diff--; }else{ run_count++; } } // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb)); if(run_mode == 0) put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits); } } if(run_mode){ while(run_count >= 1<<ff_log2_run[run_index]){ run_count -= 1<<ff_log2_run[run_index]; run_index++; put_bits(&s->pb, 1, 1); } if(run_count) put_bits(&s->pb, 1, 1); } s->run_index= run_index; return 0; } static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){ int x,y,i; const int ring_size= s->avctx->context_model ? 3 : 2; int_fast16_t sample_buffer[ring_size][w+6], *sample[ring_size]; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ for(i=0; i<ring_size; i++) sample[i]= sample_buffer[(h+i-y)%ring_size]+3; sample[0][-1]= sample[1][0 ]; sample[1][ w]= sample[1][w-1]; //{START_TIMER if(s->avctx->bits_per_raw_sample<=8){ for(x=0; x<w; x++){ sample[0][x]= src[x + stride*y]; } encode_line(s, w, sample, plane_index, 8); }else{ for(x=0; x<w; x++){ sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->avctx->bits_per_raw_sample); } encode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample); } //STOP_TIMER("encode line")} } } static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){ int x, y, p, i; const int ring_size= s->avctx->context_model ? 3 : 2; int_fast16_t sample_buffer[3][ring_size][w+6], *sample[3][ring_size]; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ for(i=0; i<ring_size; i++) for(p=0; p<3; p++) sample[p][i]= sample_buffer[p][(h+i-y)%ring_size]+3; for(x=0; x<w; x++){ int v= src[x + stride*y]; int b= v&0xFF; int g= (v>>8)&0xFF; int r= (v>>16)&0xFF; b -= g; r -= g; g += (b + r)>>2; b += 0x100; r += 0x100; // assert(g>=0 && b>=0 && r>=0); // assert(g<256 && b<512 && r<512); sample[0][0][x]= g; sample[1][0][x]= b; sample[2][0][x]= r; } for(p=0; p<3; p++){ sample[p][0][-1]= sample[p][1][0 ]; sample[p][1][ w]= sample[p][1][w-1]; encode_line(s, w, sample[p], FFMIN(p, 1), 9); } } } static void write_quant_table(RangeCoder *c, int16_t *quant_table){ int last=0; int i; uint8_t state[CONTEXT_SIZE]; memset(state, 128, sizeof(state)); for(i=1; i<128 ; i++){ if(quant_table[i] != quant_table[i-1]){ put_symbol(c, state, i-last-1, 0); last= i; } } put_symbol(c, state, i-last-1, 0); } static void write_header(FFV1Context *f){ uint8_t state[CONTEXT_SIZE]; int i; RangeCoder * const c= &f->c; memset(state, 128, sizeof(state)); put_symbol(c, state, f->version, 0); put_symbol(c, state, f->ac, 0); if(f->ac>1){ for(i=1; i<256; i++){ f->state_transition[i]=ver2_state[i]; put_symbol(c, state, ver2_state[i] - c->one_state[i], 1); } } put_symbol(c, state, f->colorspace, 0); //YUV cs type if(f->version>0) put_symbol(c, state, f->avctx->bits_per_raw_sample, 0); put_rac(c, state, 1); //chroma planes put_symbol(c, state, f->chroma_h_shift, 0); put_symbol(c, state, f->chroma_v_shift, 0); put_rac(c, state, 0); //no transparency plane for(i=0; i<5; i++) write_quant_table(c, f->quant_table[i]); } #endif /* CONFIG_FFV1_ENCODER */ static av_cold int common_init(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; s->avctx= avctx; s->flags= avctx->flags; dsputil_init(&s->dsp, avctx); s->width = avctx->width; s->height= avctx->height; assert(s->width && s->height); return 0; } #if CONFIG_FFV1_ENCODER static av_cold int encode_init(AVCodecContext *avctx) { FFV1Context *s = avctx->priv_data; int i; common_init(avctx); s->version=0; s->ac= avctx->coder_type ? 2:0; s->plane_count=2; for(i=0; i<256; i++){ if(avctx->bits_per_raw_sample <=8){ s->quant_table[0][i]= quant11[i]; s->quant_table[1][i]= 11*quant11[i]; if(avctx->context_model==0){ s->quant_table[2][i]= 11*11*quant11[i]; s->quant_table[3][i]= s->quant_table[4][i]=0; }else{ s->quant_table[2][i]= 11*11*quant5 [i]; s->quant_table[3][i]= 5*11*11*quant5 [i]; s->quant_table[4][i]= 5*5*11*11*quant5 [i]; } }else{ s->quant_table[0][i]= quant9_10bit[i]; s->quant_table[1][i]= 11*quant9_10bit[i]; if(avctx->context_model==0){ s->quant_table[2][i]= 11*11*quant9_10bit[i]; s->quant_table[3][i]= s->quant_table[4][i]=0; }else{ s->quant_table[2][i]= 11*11*quant5_10bit[i]; s->quant_table[3][i]= 5*11*11*quant5_10bit[i]; s->quant_table[4][i]= 5*5*11*11*quant5_10bit[i]; } } } for(i=0; i<s->plane_count; i++){ PlaneContext * const p= &s->plane[i]; if(avctx->context_model==0){ p->context_count= (11*11*11+1)/2; }else{ p->context_count= (11*11*5*5*5+1)/2; } if(s->ac){ if(!p->state) p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t)); }else{ if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState)); } } avctx->coded_frame= &s->picture; switch(avctx->pix_fmt){ case PIX_FMT_YUV444P16: case PIX_FMT_YUV422P16: case PIX_FMT_YUV420P16: if(avctx->bits_per_raw_sample <=8){ av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n"); return -1; } if(!s->ac){ av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n"); return -1; } s->version= 1; case PIX_FMT_YUV444P: case PIX_FMT_YUV422P: case PIX_FMT_YUV420P: case PIX_FMT_YUV411P: case PIX_FMT_YUV410P: s->colorspace= 0; break; case PIX_FMT_RGB32: s->colorspace= 1; break; default: av_log(avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); s->picture_number=0; return 0; } #endif /* CONFIG_FFV1_ENCODER */ static void clear_state(FFV1Context *f){ int i, j; for(i=0; i<f->plane_count; i++){ PlaneContext *p= &f->plane[i]; p->interlace_bit_state[0]= 128; p->interlace_bit_state[1]= 128; for(j=0; j<p->context_count; j++){ if(f->ac){ memset(p->state[j], 128, sizeof(uint8_t)*CONTEXT_SIZE); }else{ p->vlc_state[j].drift= 0; p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2); p->vlc_state[j].bias= 0; p->vlc_state[j].count= 1; } } } } #if CONFIG_FFV1_ENCODER static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ FFV1Context *f = avctx->priv_data; RangeCoder * const c= &f->c; AVFrame *pict = data; const int width= f->width; const int height= f->height; AVFrame * const p= &f->picture; int used_count= 0; uint8_t keystate=128; ff_init_range_encoder(c, buf, buf_size); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); *p = *pict; p->pict_type= FF_I_TYPE; if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){ put_rac(c, &keystate, 1); p->key_frame= 1; write_header(f); clear_state(f); }else{ put_rac(c, &keystate, 0); p->key_frame= 0; } if(!f->ac){ used_count += ff_rac_terminate(c); //printf("pos=%d\n", used_count); init_put_bits(&f->pb, buf + used_count, buf_size - used_count); }else if (f->ac>1){ int i; for(i=1; i<256; i++){ c->one_state[i]= f->state_transition[i]; c->zero_state[256-i]= 256-c->one_state[i]; } } if(f->colorspace==0){ const int chroma_width = -((-width )>>f->chroma_h_shift); const int chroma_height= -((-height)>>f->chroma_v_shift); encode_plane(f, p->data[0], width, height, p->linesize[0], 0); encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); }else{ encode_rgb_frame(f, (uint32_t*)(p->data[0]), width, height, p->linesize[0]/4); } emms_c(); f->picture_number++; if(f->ac){ return ff_rac_terminate(c); }else{ flush_put_bits(&f->pb); //nicer padding FIXME return used_count + (put_bits_count(&f->pb)+7)/8; } } #endif /* CONFIG_FFV1_ENCODER */ static av_cold int common_end(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; int i; for(i=0; i<s->plane_count; i++){ PlaneContext *p= &s->plane[i]; av_freep(&p->state); av_freep(&p->vlc_state); } return 0; } static av_always_inline void decode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){ PlaneContext * const p= &s->plane[plane_index]; RangeCoder * const c= &s->c; int x; int run_count=0; int run_mode=0; int run_index= s->run_index; for(x=0; x<w; x++){ int diff, context, sign; context= get_context(s, sample[1] + x, sample[0] + x, sample[1] + x); if(context < 0){ context= -context; sign=1; }else sign=0; if(s->ac){ diff= get_symbol_inline(c, p->state[context], 1); }else{ if(context == 0 && run_mode==0) run_mode=1; if(run_mode){ if(run_count==0 && run_mode==1){ if(get_bits1(&s->gb)){ run_count = 1<<ff_log2_run[run_index]; if(x + run_count <= w) run_index++; }else{ if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]); else run_count=0; if(run_index) run_index--; run_mode=2; } } run_count--; if(run_count < 0){ run_mode=0; run_count=0; diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits); if(diff>=0) diff++; }else diff=0; }else diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits); // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb)); } if(sign) diff= -diff; sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1); } s->run_index= run_index; } static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){ int x, y; int_fast16_t sample_buffer[2][w+6]; int_fast16_t *sample[2]; sample[0]=sample_buffer[0]+3; sample[1]=sample_buffer[1]+3; s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ int_fast16_t *temp= sample[0]; //FIXME try a normal buffer sample[0]= sample[1]; sample[1]= temp; sample[1][-1]= sample[0][0 ]; sample[0][ w]= sample[0][w-1]; //{START_TIMER if(s->avctx->bits_per_raw_sample <= 8){ decode_line(s, w, sample, plane_index, 8); for(x=0; x<w; x++){ src[x + stride*y]= sample[1][x]; } }else{ decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample); for(x=0; x<w; x++){ ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample); } } //STOP_TIMER("decode-line")} } } static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){ int x, y, p; int_fast16_t sample_buffer[3][2][w+6]; int_fast16_t *sample[3][2]; for(x=0; x<3; x++){ sample[x][0] = sample_buffer[x][0]+3; sample[x][1] = sample_buffer[x][1]+3; } s->run_index=0; memset(sample_buffer, 0, sizeof(sample_buffer)); for(y=0; y<h; y++){ for(p=0; p<3; p++){ int_fast16_t *temp= sample[p][0]; //FIXME try a normal buffer sample[p][0]= sample[p][1]; sample[p][1]= temp; sample[p][1][-1]= sample[p][0][0 ]; sample[p][0][ w]= sample[p][0][w-1]; decode_line(s, w, sample[p], FFMIN(p, 1), 9); } for(x=0; x<w; x++){ int g= sample[0][1][x]; int b= sample[1][1][x]; int r= sample[2][1][x]; // assert(g>=0 && b>=0 && r>=0); // assert(g<256 && b<512 && r<512); b -= 0x100; r -= 0x100; g -= (b + r)>>2; b += g; r += g; src[x + stride*y]= b + (g<<8) + (r<<16); } } } static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){ int v; int i=0; uint8_t state[CONTEXT_SIZE]; memset(state, 128, sizeof(state)); for(v=0; i<128 ; v++){ int len= get_symbol(c, state, 0) + 1; if(len + i > 128) return -1; while(len--){ quant_table[i] = scale*v; i++; //printf("%2d ",v); //if(i%16==0) printf("\n"); } } for(i=1; i<128; i++){ quant_table[256-i]= -quant_table[i]; } quant_table[128]= -quant_table[127]; return 2*v - 1; } static int read_header(FFV1Context *f){ uint8_t state[CONTEXT_SIZE]; int i, context_count; RangeCoder * const c= &f->c; memset(state, 128, sizeof(state)); f->version= get_symbol(c, state, 0); f->ac= f->avctx->coder_type= get_symbol(c, state, 0); if(f->ac>1){ for(i=1; i<256; i++){ f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i]; } } f->colorspace= get_symbol(c, state, 0); //YUV cs type if(f->version>0) f->avctx->bits_per_raw_sample= get_symbol(c, state, 0); get_rac(c, state); //no chroma = false f->chroma_h_shift= get_symbol(c, state, 0); f->chroma_v_shift= get_symbol(c, state, 0); get_rac(c, state); //transparency plane f->plane_count= 2; if(f->colorspace==0){ if(f->avctx->bits_per_raw_sample<=8){ switch(16*f->chroma_h_shift + f->chroma_v_shift){ case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break; case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break; case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break; case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break; case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break; default: av_log(f->avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } }else{ switch(16*f->chroma_h_shift + f->chroma_v_shift){ case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break; case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break; case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break; default: av_log(f->avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } } }else if(f->colorspace==1){ if(f->chroma_h_shift || f->chroma_v_shift){ av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n"); return -1; } f->avctx->pix_fmt= PIX_FMT_RGB32; }else{ av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n"); return -1; } //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt); context_count=1; for(i=0; i<5; i++){ context_count*= read_quant_table(c, f->quant_table[i], context_count); if(context_count < 0 || context_count > 32768){ av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n"); return -1; } } context_count= (context_count+1)/2; for(i=0; i<f->plane_count; i++){ PlaneContext * const p= &f->plane[i]; p->context_count= context_count; if(f->ac){ if(!p->state) p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t)); }else{ if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState)); } } return 0; } static av_cold int decode_init(AVCodecContext *avctx) { // FFV1Context *s = avctx->priv_data; common_init(avctx); return 0; } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FFV1Context *f = avctx->priv_data; RangeCoder * const c= &f->c; const int width= f->width; const int height= f->height; AVFrame * const p= &f->picture; int bytes_read; uint8_t keystate= 128; AVFrame *picture = data; ff_init_range_decoder(c, buf, buf_size); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); p->pict_type= FF_I_TYPE; //FIXME I vs. P if(get_rac(c, &keystate)){ p->key_frame= 1; if(read_header(f) < 0) return -1; clear_state(f); }else{ p->key_frame= 0; } if(f->ac>1){ int i; for(i=1; i<256; i++){ c->one_state[i]= f->state_transition[i]; c->zero_state[256-i]= 256-c->one_state[i]; } } if(!f->plane[0].state && !f->plane[0].vlc_state) return -1; p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if(avctx->debug&FF_DEBUG_PICT_INFO) av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac); if(!f->ac){ bytes_read = c->bytestream - c->bytestream_start - 1; if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME //printf("pos=%d\n", bytes_read); init_get_bits(&f->gb, buf + bytes_read, buf_size - bytes_read); } else { bytes_read = 0; /* avoid warning */ } if(f->colorspace==0){ const int chroma_width = -((-width )>>f->chroma_h_shift); const int chroma_height= -((-height)>>f->chroma_v_shift); decode_plane(f, p->data[0], width, height, p->linesize[0], 0); decode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); decode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); }else{ decode_rgb_frame(f, (uint32_t*)p->data[0], width, height, p->linesize[0]/4); } emms_c(); f->picture_number++; *picture= *p; avctx->release_buffer(avctx, p); //FIXME *data_size = sizeof(AVFrame); if(f->ac){ bytes_read= c->bytestream - c->bytestream_start - 1; if(bytes_read ==0) av_log(f->avctx, AV_LOG_ERROR, "error at end of frame\n"); }else{ bytes_read+= (get_bits_count(&f->gb)+7)/8; } return bytes_read; } AVCodec ffv1_decoder = { "ffv1", AVMEDIA_TYPE_VIDEO, CODEC_ID_FFV1, sizeof(FFV1Context), decode_init, NULL, common_end, decode_frame, CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, NULL, .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), }; #if CONFIG_FFV1_ENCODER AVCodec ffv1_encoder = { "ffv1", AVMEDIA_TYPE_VIDEO, CODEC_ID_FFV1, sizeof(FFV1Context), encode_init, encode_frame, common_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), }; #endif
123linslouis-android-video-cutter
jni/libavcodec/ffv1.c
C
asf20
37,471
/* * MPEG Audio decoder * copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * mpeg audio layer decoder tables. */ #ifndef AVCODEC_MPEGAUDIODECTAB_H #define AVCODEC_MPEGAUDIODECTAB_H #include <stdint.h> #include "mpegaudio.h" /*******************************************************/ /* layer 3 tables */ /* layer3 scale factor size */ static const uint8_t slen_table[2][16] = { { 0, 0, 0, 0, 3, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 }, { 0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 3 }, }; /* number of lsf scale factors for a given size */ static const uint8_t lsf_nsf_table[6][3][4] = { { { 6, 5, 5, 5 }, { 9, 9, 9, 9 }, { 6, 9, 9, 9 } }, { { 6, 5, 7, 3 }, { 9, 9, 12, 6 }, { 6, 9, 12, 6 } }, { { 11, 10, 0, 0 }, { 18, 18, 0, 0 }, { 15, 18, 0, 0 } }, { { 7, 7, 7, 0 }, { 12, 12, 12, 0 }, { 6, 15, 12, 0 } }, { { 6, 6, 6, 3 }, { 12, 9, 9, 6 }, { 6, 12, 9, 6 } }, { { 8, 8, 5, 0 }, { 15, 12, 9, 0 }, { 6, 18, 9, 0 } }, }; /* mpegaudio layer 3 huffman tables */ static const uint16_t mpa_huffcodes_1[4] = { 0x0001, 0x0001, 0x0001, 0x0000, }; static const uint8_t mpa_huffbits_1[4] = { 1, 3, 2, 3, }; static const uint16_t mpa_huffcodes_2[9] = { 0x0001, 0x0002, 0x0001, 0x0003, 0x0001, 0x0001, 0x0003, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_2[9] = { 1, 3, 6, 3, 3, 5, 5, 5, 6, }; static const uint16_t mpa_huffcodes_3[9] = { 0x0003, 0x0002, 0x0001, 0x0001, 0x0001, 0x0001, 0x0003, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_3[9] = { 2, 2, 6, 3, 2, 5, 5, 5, 6, }; static const uint16_t mpa_huffcodes_5[16] = { 0x0001, 0x0002, 0x0006, 0x0005, 0x0003, 0x0001, 0x0004, 0x0004, 0x0007, 0x0005, 0x0007, 0x0001, 0x0006, 0x0001, 0x0001, 0x0000, }; static const uint8_t mpa_huffbits_5[16] = { 1, 3, 6, 7, 3, 3, 6, 7, 6, 6, 7, 8, 7, 6, 7, 8, }; static const uint16_t mpa_huffcodes_6[16] = { 0x0007, 0x0003, 0x0005, 0x0001, 0x0006, 0x0002, 0x0003, 0x0002, 0x0005, 0x0004, 0x0004, 0x0001, 0x0003, 0x0003, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_6[16] = { 3, 3, 5, 7, 3, 2, 4, 5, 4, 4, 5, 6, 6, 5, 6, 7, }; static const uint16_t mpa_huffcodes_7[36] = { 0x0001, 0x0002, 0x000a, 0x0013, 0x0010, 0x000a, 0x0003, 0x0003, 0x0007, 0x000a, 0x0005, 0x0003, 0x000b, 0x0004, 0x000d, 0x0011, 0x0008, 0x0004, 0x000c, 0x000b, 0x0012, 0x000f, 0x000b, 0x0002, 0x0007, 0x0006, 0x0009, 0x000e, 0x0003, 0x0001, 0x0006, 0x0004, 0x0005, 0x0003, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_7[36] = { 1, 3, 6, 8, 8, 9, 3, 4, 6, 7, 7, 8, 6, 5, 7, 8, 8, 9, 7, 7, 8, 9, 9, 9, 7, 7, 8, 9, 9, 10, 8, 8, 9, 10, 10, 10, }; static const uint16_t mpa_huffcodes_8[36] = { 0x0003, 0x0004, 0x0006, 0x0012, 0x000c, 0x0005, 0x0005, 0x0001, 0x0002, 0x0010, 0x0009, 0x0003, 0x0007, 0x0003, 0x0005, 0x000e, 0x0007, 0x0003, 0x0013, 0x0011, 0x000f, 0x000d, 0x000a, 0x0004, 0x000d, 0x0005, 0x0008, 0x000b, 0x0005, 0x0001, 0x000c, 0x0004, 0x0004, 0x0001, 0x0001, 0x0000, }; static const uint8_t mpa_huffbits_8[36] = { 2, 3, 6, 8, 8, 9, 3, 2, 4, 8, 8, 8, 6, 4, 6, 8, 8, 9, 8, 8, 8, 9, 9, 10, 8, 7, 8, 9, 10, 10, 9, 8, 9, 9, 11, 11, }; static const uint16_t mpa_huffcodes_9[36] = { 0x0007, 0x0005, 0x0009, 0x000e, 0x000f, 0x0007, 0x0006, 0x0004, 0x0005, 0x0005, 0x0006, 0x0007, 0x0007, 0x0006, 0x0008, 0x0008, 0x0008, 0x0005, 0x000f, 0x0006, 0x0009, 0x000a, 0x0005, 0x0001, 0x000b, 0x0007, 0x0009, 0x0006, 0x0004, 0x0001, 0x000e, 0x0004, 0x0006, 0x0002, 0x0006, 0x0000, }; static const uint8_t mpa_huffbits_9[36] = { 3, 3, 5, 6, 8, 9, 3, 3, 4, 5, 6, 8, 4, 4, 5, 6, 7, 8, 6, 5, 6, 7, 7, 8, 7, 6, 7, 7, 8, 9, 8, 7, 8, 8, 9, 9, }; static const uint16_t mpa_huffcodes_10[64] = { 0x0001, 0x0002, 0x000a, 0x0017, 0x0023, 0x001e, 0x000c, 0x0011, 0x0003, 0x0003, 0x0008, 0x000c, 0x0012, 0x0015, 0x000c, 0x0007, 0x000b, 0x0009, 0x000f, 0x0015, 0x0020, 0x0028, 0x0013, 0x0006, 0x000e, 0x000d, 0x0016, 0x0022, 0x002e, 0x0017, 0x0012, 0x0007, 0x0014, 0x0013, 0x0021, 0x002f, 0x001b, 0x0016, 0x0009, 0x0003, 0x001f, 0x0016, 0x0029, 0x001a, 0x0015, 0x0014, 0x0005, 0x0003, 0x000e, 0x000d, 0x000a, 0x000b, 0x0010, 0x0006, 0x0005, 0x0001, 0x0009, 0x0008, 0x0007, 0x0008, 0x0004, 0x0004, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_10[64] = { 1, 3, 6, 8, 9, 9, 9, 10, 3, 4, 6, 7, 8, 9, 8, 8, 6, 6, 7, 8, 9, 10, 9, 9, 7, 7, 8, 9, 10, 10, 9, 10, 8, 8, 9, 10, 10, 10, 10, 10, 9, 9, 10, 10, 11, 11, 10, 11, 8, 8, 9, 10, 10, 10, 11, 11, 9, 8, 9, 10, 10, 11, 11, 11, }; static const uint16_t mpa_huffcodes_11[64] = { 0x0003, 0x0004, 0x000a, 0x0018, 0x0022, 0x0021, 0x0015, 0x000f, 0x0005, 0x0003, 0x0004, 0x000a, 0x0020, 0x0011, 0x000b, 0x000a, 0x000b, 0x0007, 0x000d, 0x0012, 0x001e, 0x001f, 0x0014, 0x0005, 0x0019, 0x000b, 0x0013, 0x003b, 0x001b, 0x0012, 0x000c, 0x0005, 0x0023, 0x0021, 0x001f, 0x003a, 0x001e, 0x0010, 0x0007, 0x0005, 0x001c, 0x001a, 0x0020, 0x0013, 0x0011, 0x000f, 0x0008, 0x000e, 0x000e, 0x000c, 0x0009, 0x000d, 0x000e, 0x0009, 0x0004, 0x0001, 0x000b, 0x0004, 0x0006, 0x0006, 0x0006, 0x0003, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_11[64] = { 2, 3, 5, 7, 8, 9, 8, 9, 3, 3, 4, 6, 8, 8, 7, 8, 5, 5, 6, 7, 8, 9, 8, 8, 7, 6, 7, 9, 8, 10, 8, 9, 8, 8, 8, 9, 9, 10, 9, 10, 8, 8, 9, 10, 10, 11, 10, 11, 8, 7, 7, 8, 9, 10, 10, 10, 8, 7, 8, 9, 10, 10, 10, 10, }; static const uint16_t mpa_huffcodes_12[64] = { 0x0009, 0x0006, 0x0010, 0x0021, 0x0029, 0x0027, 0x0026, 0x001a, 0x0007, 0x0005, 0x0006, 0x0009, 0x0017, 0x0010, 0x001a, 0x000b, 0x0011, 0x0007, 0x000b, 0x000e, 0x0015, 0x001e, 0x000a, 0x0007, 0x0011, 0x000a, 0x000f, 0x000c, 0x0012, 0x001c, 0x000e, 0x0005, 0x0020, 0x000d, 0x0016, 0x0013, 0x0012, 0x0010, 0x0009, 0x0005, 0x0028, 0x0011, 0x001f, 0x001d, 0x0011, 0x000d, 0x0004, 0x0002, 0x001b, 0x000c, 0x000b, 0x000f, 0x000a, 0x0007, 0x0004, 0x0001, 0x001b, 0x000c, 0x0008, 0x000c, 0x0006, 0x0003, 0x0001, 0x0000, }; static const uint8_t mpa_huffbits_12[64] = { 4, 3, 5, 7, 8, 9, 9, 9, 3, 3, 4, 5, 7, 7, 8, 8, 5, 4, 5, 6, 7, 8, 7, 8, 6, 5, 6, 6, 7, 8, 8, 8, 7, 6, 7, 7, 8, 8, 8, 9, 8, 7, 8, 8, 8, 9, 8, 9, 8, 7, 7, 8, 8, 9, 9, 10, 9, 8, 8, 9, 9, 9, 9, 10, }; static const uint16_t mpa_huffcodes_13[256] = { 0x0001, 0x0005, 0x000e, 0x0015, 0x0022, 0x0033, 0x002e, 0x0047, 0x002a, 0x0034, 0x0044, 0x0034, 0x0043, 0x002c, 0x002b, 0x0013, 0x0003, 0x0004, 0x000c, 0x0013, 0x001f, 0x001a, 0x002c, 0x0021, 0x001f, 0x0018, 0x0020, 0x0018, 0x001f, 0x0023, 0x0016, 0x000e, 0x000f, 0x000d, 0x0017, 0x0024, 0x003b, 0x0031, 0x004d, 0x0041, 0x001d, 0x0028, 0x001e, 0x0028, 0x001b, 0x0021, 0x002a, 0x0010, 0x0016, 0x0014, 0x0025, 0x003d, 0x0038, 0x004f, 0x0049, 0x0040, 0x002b, 0x004c, 0x0038, 0x0025, 0x001a, 0x001f, 0x0019, 0x000e, 0x0023, 0x0010, 0x003c, 0x0039, 0x0061, 0x004b, 0x0072, 0x005b, 0x0036, 0x0049, 0x0037, 0x0029, 0x0030, 0x0035, 0x0017, 0x0018, 0x003a, 0x001b, 0x0032, 0x0060, 0x004c, 0x0046, 0x005d, 0x0054, 0x004d, 0x003a, 0x004f, 0x001d, 0x004a, 0x0031, 0x0029, 0x0011, 0x002f, 0x002d, 0x004e, 0x004a, 0x0073, 0x005e, 0x005a, 0x004f, 0x0045, 0x0053, 0x0047, 0x0032, 0x003b, 0x0026, 0x0024, 0x000f, 0x0048, 0x0022, 0x0038, 0x005f, 0x005c, 0x0055, 0x005b, 0x005a, 0x0056, 0x0049, 0x004d, 0x0041, 0x0033, 0x002c, 0x002b, 0x002a, 0x002b, 0x0014, 0x001e, 0x002c, 0x0037, 0x004e, 0x0048, 0x0057, 0x004e, 0x003d, 0x002e, 0x0036, 0x0025, 0x001e, 0x0014, 0x0010, 0x0035, 0x0019, 0x0029, 0x0025, 0x002c, 0x003b, 0x0036, 0x0051, 0x0042, 0x004c, 0x0039, 0x0036, 0x0025, 0x0012, 0x0027, 0x000b, 0x0023, 0x0021, 0x001f, 0x0039, 0x002a, 0x0052, 0x0048, 0x0050, 0x002f, 0x003a, 0x0037, 0x0015, 0x0016, 0x001a, 0x0026, 0x0016, 0x0035, 0x0019, 0x0017, 0x0026, 0x0046, 0x003c, 0x0033, 0x0024, 0x0037, 0x001a, 0x0022, 0x0017, 0x001b, 0x000e, 0x0009, 0x0007, 0x0022, 0x0020, 0x001c, 0x0027, 0x0031, 0x004b, 0x001e, 0x0034, 0x0030, 0x0028, 0x0034, 0x001c, 0x0012, 0x0011, 0x0009, 0x0005, 0x002d, 0x0015, 0x0022, 0x0040, 0x0038, 0x0032, 0x0031, 0x002d, 0x001f, 0x0013, 0x000c, 0x000f, 0x000a, 0x0007, 0x0006, 0x0003, 0x0030, 0x0017, 0x0014, 0x0027, 0x0024, 0x0023, 0x0035, 0x0015, 0x0010, 0x0017, 0x000d, 0x000a, 0x0006, 0x0001, 0x0004, 0x0002, 0x0010, 0x000f, 0x0011, 0x001b, 0x0019, 0x0014, 0x001d, 0x000b, 0x0011, 0x000c, 0x0010, 0x0008, 0x0001, 0x0001, 0x0000, 0x0001, }; static const uint8_t mpa_huffbits_13[256] = { 1, 4, 6, 7, 8, 9, 9, 10, 9, 10, 11, 11, 12, 12, 13, 13, 3, 4, 6, 7, 8, 8, 9, 9, 9, 9, 10, 10, 11, 12, 12, 12, 6, 6, 7, 8, 9, 9, 10, 10, 9, 10, 10, 11, 11, 12, 13, 13, 7, 7, 8, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 13, 13, 8, 7, 9, 9, 10, 10, 11, 11, 10, 11, 11, 12, 12, 13, 13, 14, 9, 8, 9, 10, 10, 10, 11, 11, 11, 11, 12, 11, 13, 13, 14, 14, 9, 9, 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 13, 13, 14, 14, 10, 9, 10, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 16, 16, 9, 8, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 14, 15, 15, 10, 9, 10, 10, 11, 11, 11, 13, 12, 13, 13, 14, 14, 14, 16, 15, 10, 10, 10, 11, 11, 12, 12, 13, 12, 13, 14, 13, 14, 15, 16, 17, 11, 10, 10, 11, 12, 12, 12, 12, 13, 13, 13, 14, 15, 15, 15, 16, 11, 11, 11, 12, 12, 13, 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 12, 11, 12, 13, 13, 13, 14, 14, 14, 14, 14, 15, 16, 15, 16, 16, 13, 12, 12, 13, 13, 13, 15, 14, 14, 17, 15, 15, 15, 17, 16, 16, 12, 12, 13, 14, 14, 14, 15, 14, 15, 15, 16, 16, 19, 18, 19, 16, }; static const uint16_t mpa_huffcodes_15[256] = { 0x0007, 0x000c, 0x0012, 0x0035, 0x002f, 0x004c, 0x007c, 0x006c, 0x0059, 0x007b, 0x006c, 0x0077, 0x006b, 0x0051, 0x007a, 0x003f, 0x000d, 0x0005, 0x0010, 0x001b, 0x002e, 0x0024, 0x003d, 0x0033, 0x002a, 0x0046, 0x0034, 0x0053, 0x0041, 0x0029, 0x003b, 0x0024, 0x0013, 0x0011, 0x000f, 0x0018, 0x0029, 0x0022, 0x003b, 0x0030, 0x0028, 0x0040, 0x0032, 0x004e, 0x003e, 0x0050, 0x0038, 0x0021, 0x001d, 0x001c, 0x0019, 0x002b, 0x0027, 0x003f, 0x0037, 0x005d, 0x004c, 0x003b, 0x005d, 0x0048, 0x0036, 0x004b, 0x0032, 0x001d, 0x0034, 0x0016, 0x002a, 0x0028, 0x0043, 0x0039, 0x005f, 0x004f, 0x0048, 0x0039, 0x0059, 0x0045, 0x0031, 0x0042, 0x002e, 0x001b, 0x004d, 0x0025, 0x0023, 0x0042, 0x003a, 0x0034, 0x005b, 0x004a, 0x003e, 0x0030, 0x004f, 0x003f, 0x005a, 0x003e, 0x0028, 0x0026, 0x007d, 0x0020, 0x003c, 0x0038, 0x0032, 0x005c, 0x004e, 0x0041, 0x0037, 0x0057, 0x0047, 0x0033, 0x0049, 0x0033, 0x0046, 0x001e, 0x006d, 0x0035, 0x0031, 0x005e, 0x0058, 0x004b, 0x0042, 0x007a, 0x005b, 0x0049, 0x0038, 0x002a, 0x0040, 0x002c, 0x0015, 0x0019, 0x005a, 0x002b, 0x0029, 0x004d, 0x0049, 0x003f, 0x0038, 0x005c, 0x004d, 0x0042, 0x002f, 0x0043, 0x0030, 0x0035, 0x0024, 0x0014, 0x0047, 0x0022, 0x0043, 0x003c, 0x003a, 0x0031, 0x0058, 0x004c, 0x0043, 0x006a, 0x0047, 0x0036, 0x0026, 0x0027, 0x0017, 0x000f, 0x006d, 0x0035, 0x0033, 0x002f, 0x005a, 0x0052, 0x003a, 0x0039, 0x0030, 0x0048, 0x0039, 0x0029, 0x0017, 0x001b, 0x003e, 0x0009, 0x0056, 0x002a, 0x0028, 0x0025, 0x0046, 0x0040, 0x0034, 0x002b, 0x0046, 0x0037, 0x002a, 0x0019, 0x001d, 0x0012, 0x000b, 0x000b, 0x0076, 0x0044, 0x001e, 0x0037, 0x0032, 0x002e, 0x004a, 0x0041, 0x0031, 0x0027, 0x0018, 0x0010, 0x0016, 0x000d, 0x000e, 0x0007, 0x005b, 0x002c, 0x0027, 0x0026, 0x0022, 0x003f, 0x0034, 0x002d, 0x001f, 0x0034, 0x001c, 0x0013, 0x000e, 0x0008, 0x0009, 0x0003, 0x007b, 0x003c, 0x003a, 0x0035, 0x002f, 0x002b, 0x0020, 0x0016, 0x0025, 0x0018, 0x0011, 0x000c, 0x000f, 0x000a, 0x0002, 0x0001, 0x0047, 0x0025, 0x0022, 0x001e, 0x001c, 0x0014, 0x0011, 0x001a, 0x0015, 0x0010, 0x000a, 0x0006, 0x0008, 0x0006, 0x0002, 0x0000, }; static const uint8_t mpa_huffbits_15[256] = { 3, 4, 5, 7, 7, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 13, 4, 3, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 5, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 6, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 7, 6, 7, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 11, 11, 11, 8, 7, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 11, 11, 11, 12, 9, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 11, 11, 12, 12, 9, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 12, 9, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 12, 9, 8, 9, 9, 9, 9, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 10, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 12, 13, 12, 10, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 13, 11, 10, 9, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 13, 13, 11, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 12, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 12, 13, 12, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, }; static const uint16_t mpa_huffcodes_16[256] = { 0x0001, 0x0005, 0x000e, 0x002c, 0x004a, 0x003f, 0x006e, 0x005d, 0x00ac, 0x0095, 0x008a, 0x00f2, 0x00e1, 0x00c3, 0x0178, 0x0011, 0x0003, 0x0004, 0x000c, 0x0014, 0x0023, 0x003e, 0x0035, 0x002f, 0x0053, 0x004b, 0x0044, 0x0077, 0x00c9, 0x006b, 0x00cf, 0x0009, 0x000f, 0x000d, 0x0017, 0x0026, 0x0043, 0x003a, 0x0067, 0x005a, 0x00a1, 0x0048, 0x007f, 0x0075, 0x006e, 0x00d1, 0x00ce, 0x0010, 0x002d, 0x0015, 0x0027, 0x0045, 0x0040, 0x0072, 0x0063, 0x0057, 0x009e, 0x008c, 0x00fc, 0x00d4, 0x00c7, 0x0183, 0x016d, 0x001a, 0x004b, 0x0024, 0x0044, 0x0041, 0x0073, 0x0065, 0x00b3, 0x00a4, 0x009b, 0x0108, 0x00f6, 0x00e2, 0x018b, 0x017e, 0x016a, 0x0009, 0x0042, 0x001e, 0x003b, 0x0038, 0x0066, 0x00b9, 0x00ad, 0x0109, 0x008e, 0x00fd, 0x00e8, 0x0190, 0x0184, 0x017a, 0x01bd, 0x0010, 0x006f, 0x0036, 0x0034, 0x0064, 0x00b8, 0x00b2, 0x00a0, 0x0085, 0x0101, 0x00f4, 0x00e4, 0x00d9, 0x0181, 0x016e, 0x02cb, 0x000a, 0x0062, 0x0030, 0x005b, 0x0058, 0x00a5, 0x009d, 0x0094, 0x0105, 0x00f8, 0x0197, 0x018d, 0x0174, 0x017c, 0x0379, 0x0374, 0x0008, 0x0055, 0x0054, 0x0051, 0x009f, 0x009c, 0x008f, 0x0104, 0x00f9, 0x01ab, 0x0191, 0x0188, 0x017f, 0x02d7, 0x02c9, 0x02c4, 0x0007, 0x009a, 0x004c, 0x0049, 0x008d, 0x0083, 0x0100, 0x00f5, 0x01aa, 0x0196, 0x018a, 0x0180, 0x02df, 0x0167, 0x02c6, 0x0160, 0x000b, 0x008b, 0x0081, 0x0043, 0x007d, 0x00f7, 0x00e9, 0x00e5, 0x00db, 0x0189, 0x02e7, 0x02e1, 0x02d0, 0x0375, 0x0372, 0x01b7, 0x0004, 0x00f3, 0x0078, 0x0076, 0x0073, 0x00e3, 0x00df, 0x018c, 0x02ea, 0x02e6, 0x02e0, 0x02d1, 0x02c8, 0x02c2, 0x00df, 0x01b4, 0x0006, 0x00ca, 0x00e0, 0x00de, 0x00da, 0x00d8, 0x0185, 0x0182, 0x017d, 0x016c, 0x0378, 0x01bb, 0x02c3, 0x01b8, 0x01b5, 0x06c0, 0x0004, 0x02eb, 0x00d3, 0x00d2, 0x00d0, 0x0172, 0x017b, 0x02de, 0x02d3, 0x02ca, 0x06c7, 0x0373, 0x036d, 0x036c, 0x0d83, 0x0361, 0x0002, 0x0179, 0x0171, 0x0066, 0x00bb, 0x02d6, 0x02d2, 0x0166, 0x02c7, 0x02c5, 0x0362, 0x06c6, 0x0367, 0x0d82, 0x0366, 0x01b2, 0x0000, 0x000c, 0x000a, 0x0007, 0x000b, 0x000a, 0x0011, 0x000b, 0x0009, 0x000d, 0x000c, 0x000a, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003, }; static const uint8_t mpa_huffbits_16[256] = { 1, 4, 6, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 13, 9, 3, 4, 6, 7, 8, 9, 9, 9, 10, 10, 10, 11, 12, 11, 12, 8, 6, 6, 7, 8, 9, 9, 10, 10, 11, 10, 11, 11, 11, 12, 12, 9, 8, 7, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 12, 13, 13, 10, 9, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 9, 9, 8, 9, 9, 10, 11, 11, 12, 11, 12, 12, 13, 13, 13, 14, 10, 10, 9, 9, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 14, 10, 10, 9, 10, 10, 11, 11, 11, 12, 12, 13, 13, 13, 13, 15, 15, 10, 10, 10, 10, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 10, 11, 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 13, 14, 13, 11, 11, 11, 10, 11, 12, 12, 12, 12, 13, 14, 14, 14, 15, 15, 14, 10, 12, 11, 11, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 13, 14, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 15, 14, 14, 14, 14, 16, 11, 14, 12, 12, 12, 13, 13, 14, 14, 14, 16, 15, 15, 15, 17, 15, 11, 13, 13, 11, 12, 14, 14, 13, 14, 14, 15, 16, 15, 17, 15, 14, 11, 9, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 8, }; static const uint16_t mpa_huffcodes_24[256] = { 0x000f, 0x000d, 0x002e, 0x0050, 0x0092, 0x0106, 0x00f8, 0x01b2, 0x01aa, 0x029d, 0x028d, 0x0289, 0x026d, 0x0205, 0x0408, 0x0058, 0x000e, 0x000c, 0x0015, 0x0026, 0x0047, 0x0082, 0x007a, 0x00d8, 0x00d1, 0x00c6, 0x0147, 0x0159, 0x013f, 0x0129, 0x0117, 0x002a, 0x002f, 0x0016, 0x0029, 0x004a, 0x0044, 0x0080, 0x0078, 0x00dd, 0x00cf, 0x00c2, 0x00b6, 0x0154, 0x013b, 0x0127, 0x021d, 0x0012, 0x0051, 0x0027, 0x004b, 0x0046, 0x0086, 0x007d, 0x0074, 0x00dc, 0x00cc, 0x00be, 0x00b2, 0x0145, 0x0137, 0x0125, 0x010f, 0x0010, 0x0093, 0x0048, 0x0045, 0x0087, 0x007f, 0x0076, 0x0070, 0x00d2, 0x00c8, 0x00bc, 0x0160, 0x0143, 0x0132, 0x011d, 0x021c, 0x000e, 0x0107, 0x0042, 0x0081, 0x007e, 0x0077, 0x0072, 0x00d6, 0x00ca, 0x00c0, 0x00b4, 0x0155, 0x013d, 0x012d, 0x0119, 0x0106, 0x000c, 0x00f9, 0x007b, 0x0079, 0x0075, 0x0071, 0x00d7, 0x00ce, 0x00c3, 0x00b9, 0x015b, 0x014a, 0x0134, 0x0123, 0x0110, 0x0208, 0x000a, 0x01b3, 0x0073, 0x006f, 0x006d, 0x00d3, 0x00cb, 0x00c4, 0x00bb, 0x0161, 0x014c, 0x0139, 0x012a, 0x011b, 0x0213, 0x017d, 0x0011, 0x01ab, 0x00d4, 0x00d0, 0x00cd, 0x00c9, 0x00c1, 0x00ba, 0x00b1, 0x00a9, 0x0140, 0x012f, 0x011e, 0x010c, 0x0202, 0x0179, 0x0010, 0x014f, 0x00c7, 0x00c5, 0x00bf, 0x00bd, 0x00b5, 0x00ae, 0x014d, 0x0141, 0x0131, 0x0121, 0x0113, 0x0209, 0x017b, 0x0173, 0x000b, 0x029c, 0x00b8, 0x00b7, 0x00b3, 0x00af, 0x0158, 0x014b, 0x013a, 0x0130, 0x0122, 0x0115, 0x0212, 0x017f, 0x0175, 0x016e, 0x000a, 0x028c, 0x015a, 0x00ab, 0x00a8, 0x00a4, 0x013e, 0x0135, 0x012b, 0x011f, 0x0114, 0x0107, 0x0201, 0x0177, 0x0170, 0x016a, 0x0006, 0x0288, 0x0142, 0x013c, 0x0138, 0x0133, 0x012e, 0x0124, 0x011c, 0x010d, 0x0105, 0x0200, 0x0178, 0x0172, 0x016c, 0x0167, 0x0004, 0x026c, 0x012c, 0x0128, 0x0126, 0x0120, 0x011a, 0x0111, 0x010a, 0x0203, 0x017c, 0x0176, 0x0171, 0x016d, 0x0169, 0x0165, 0x0002, 0x0409, 0x0118, 0x0116, 0x0112, 0x010b, 0x0108, 0x0103, 0x017e, 0x017a, 0x0174, 0x016f, 0x016b, 0x0168, 0x0166, 0x0164, 0x0000, 0x002b, 0x0014, 0x0013, 0x0011, 0x000f, 0x000d, 0x000b, 0x0009, 0x0007, 0x0006, 0x0004, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003, }; static const uint8_t mpa_huffbits_24[256] = { 4, 4, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 12, 9, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10, 8, 6, 5, 6, 7, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 11, 7, 7, 6, 7, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 7, 8, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 7, 9, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 7, 9, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 7, 10, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 8, 10, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 8, 10, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 8, 11, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 8, 11, 10, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 8, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 8, 11, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 8, 12, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 4, }; static const HuffTable mpa_huff_tables[16] = { { 1, NULL, NULL }, { 2, mpa_huffbits_1, mpa_huffcodes_1 }, { 3, mpa_huffbits_2, mpa_huffcodes_2 }, { 3, mpa_huffbits_3, mpa_huffcodes_3 }, { 4, mpa_huffbits_5, mpa_huffcodes_5 }, { 4, mpa_huffbits_6, mpa_huffcodes_6 }, { 6, mpa_huffbits_7, mpa_huffcodes_7 }, { 6, mpa_huffbits_8, mpa_huffcodes_8 }, { 6, mpa_huffbits_9, mpa_huffcodes_9 }, { 8, mpa_huffbits_10, mpa_huffcodes_10 }, { 8, mpa_huffbits_11, mpa_huffcodes_11 }, { 8, mpa_huffbits_12, mpa_huffcodes_12 }, { 16, mpa_huffbits_13, mpa_huffcodes_13 }, { 16, mpa_huffbits_15, mpa_huffcodes_15 }, { 16, mpa_huffbits_16, mpa_huffcodes_16 }, { 16, mpa_huffbits_24, mpa_huffcodes_24 }, }; static const uint8_t mpa_huff_data[32][2] = { { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 }, { 0, 0 }, { 4, 0 }, { 5, 0 }, { 6, 0 }, { 7, 0 }, { 8, 0 }, { 9, 0 }, { 10, 0 }, { 11, 0 }, { 12, 0 }, { 0, 0 }, { 13, 0 }, { 14, 1 }, { 14, 2 }, { 14, 3 }, { 14, 4 }, { 14, 6 }, { 14, 8 }, { 14, 10 }, { 14, 13 }, { 15, 4 }, { 15, 5 }, { 15, 6 }, { 15, 7 }, { 15, 8 }, { 15, 9 }, { 15, 11 }, { 15, 13 }, }; /* huffman tables for quadrules */ static const uint8_t mpa_quad_codes[2][16] = { { 1, 5, 4, 5, 6, 5, 4, 4, 7, 3, 6, 0, 7, 2, 3, 1, }, { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, }, }; static const uint8_t mpa_quad_bits[2][16] = { { 1, 4, 4, 5, 4, 6, 5, 6, 4, 5, 5, 6, 5, 6, 6, 6, }, { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, }, }; /* band size tables */ static const uint8_t band_size_long[9][22] = { { 4, 4, 4, 4, 4, 4, 6, 6, 8, 8, 10, 12, 16, 20, 24, 28, 34, 42, 50, 54, 76, 158, }, /* 44100 */ { 4, 4, 4, 4, 4, 4, 6, 6, 6, 8, 10, 12, 16, 18, 22, 28, 34, 40, 46, 54, 54, 192, }, /* 48000 */ { 4, 4, 4, 4, 4, 4, 6, 6, 8, 10, 12, 16, 20, 24, 30, 38, 46, 56, 68, 84, 102, 26, }, /* 32000 */ { 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 22050 */ { 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16, 18, 22, 26, 32, 38, 46, 52, 64, 70, 76, 36, }, /* 24000 */ { 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 16000 */ { 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 11025 */ { 6, 6, 6, 6, 6, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32, 38, 46, 52, 60, 68, 58, 54, }, /* 12000 */ { 12, 12, 12, 12, 12, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 76, 90, 2, 2, 2, 2, 2, }, /* 8000 */ }; static const uint8_t band_size_short[9][13] = { { 4, 4, 4, 4, 6, 8, 10, 12, 14, 18, 22, 30, 56, }, /* 44100 */ { 4, 4, 4, 4, 6, 6, 10, 12, 14, 16, 20, 26, 66, }, /* 48000 */ { 4, 4, 4, 4, 6, 8, 12, 16, 20, 26, 34, 42, 12, }, /* 32000 */ { 4, 4, 4, 6, 6, 8, 10, 14, 18, 26, 32, 42, 18, }, /* 22050 */ { 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 32, 44, 12, }, /* 24000 */ { 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 16000 */ { 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 11025 */ { 4, 4, 4, 6, 8, 10, 12, 14, 18, 24, 30, 40, 18, }, /* 12000 */ { 8, 8, 8, 12, 16, 20, 24, 28, 36, 2, 2, 2, 26, }, /* 8000 */ }; static const uint8_t mpa_pretab[2][22] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 2, 0 }, }; /* table for alias reduction (XXX: store it as integer !) */ static const float ci_table[8] = { -0.6, -0.535, -0.33, -0.185, -0.095, -0.041, -0.0142, -0.0037, }; #endif /* AVCODEC_MPEGAUDIODECTAB_H */
123linslouis-android-video-cutter
jni/libavcodec/mpegaudiodectab.h
C
asf20
23,641
/* * copyright (c) 2002 Mark Hills <mark@pogo.org.uk> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Ogg Vorbis codec support via libvorbisenc. * @author Mark Hills <mark@pogo.org.uk> */ #include <vorbis/vorbisenc.h> #include "avcodec.h" #include "bytestream.h" #undef NDEBUG #include <assert.h> #define OGGVORBIS_FRAME_SIZE 64 #define BUFFER_SIZE (1024*64) typedef struct OggVorbisContext { vorbis_info vi ; vorbis_dsp_state vd ; vorbis_block vb ; uint8_t buffer[BUFFER_SIZE]; int buffer_index; int eof; /* decoder */ vorbis_comment vc ; ogg_packet op; } OggVorbisContext ; static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) { double cfreq; if(avccontext->flags & CODEC_FLAG_QSCALE) { /* variable bitrate */ if(vorbis_encode_setup_vbr(vi, avccontext->channels, avccontext->sample_rate, avccontext->global_quality / (float)FF_QP2LAMBDA / 10.0)) return -1; } else { /* constant bitrate */ if(vorbis_encode_setup_managed(vi, avccontext->channels, avccontext->sample_rate, -1, avccontext->bit_rate, -1)) return -1; #ifdef OGGVORBIS_VBR_BY_ESTIMATE /* variable bitrate by estimate */ if(vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE_AVG, NULL)) return -1; #endif } /* cutoff frequency */ if(avccontext->cutoff > 0) { cfreq = avccontext->cutoff / 1000.0; if(vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)) return -1; } return vorbis_encode_setup_init(vi); } static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) { OggVorbisContext *context = avccontext->priv_data ; ogg_packet header, header_comm, header_code; uint8_t *p; unsigned int offset, len; vorbis_info_init(&context->vi) ; if(oggvorbis_init_encoder(&context->vi, avccontext) < 0) { av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed\n") ; return -1 ; } vorbis_analysis_init(&context->vd, &context->vi) ; vorbis_block_init(&context->vd, &context->vb) ; vorbis_comment_init(&context->vc); vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT) ; vorbis_analysis_headerout(&context->vd, &context->vc, &header, &header_comm, &header_code); len = header.bytes + header_comm.bytes + header_code.bytes; avccontext->extradata_size= 64 + len + len/255; p = avccontext->extradata= av_mallocz(avccontext->extradata_size); p[0] = 2; offset = 1; offset += av_xiphlacing(&p[offset], header.bytes); offset += av_xiphlacing(&p[offset], header_comm.bytes); memcpy(&p[offset], header.packet, header.bytes); offset += header.bytes; memcpy(&p[offset], header_comm.packet, header_comm.bytes); offset += header_comm.bytes; memcpy(&p[offset], header_code.packet, header_code.bytes); offset += header_code.bytes; avccontext->extradata_size = offset; avccontext->extradata= av_realloc(avccontext->extradata, avccontext->extradata_size); /* vorbis_block_clear(&context->vb); vorbis_dsp_clear(&context->vd); vorbis_info_clear(&context->vi);*/ vorbis_comment_clear(&context->vc); avccontext->frame_size = OGGVORBIS_FRAME_SIZE ; avccontext->coded_frame= avcodec_alloc_frame(); avccontext->coded_frame->key_frame= 1; return 0 ; } static int oggvorbis_encode_frame(AVCodecContext *avccontext, unsigned char *packets, int buf_size, void *data) { OggVorbisContext *context = avccontext->priv_data ; ogg_packet op ; signed short *audio = data ; int l; if(data) { int samples = OGGVORBIS_FRAME_SIZE; float **buffer ; buffer = vorbis_analysis_buffer(&context->vd, samples) ; if(context->vi.channels == 1) { for(l = 0 ; l < samples ; l++) buffer[0][l]=audio[l]/32768.f; } else { for(l = 0 ; l < samples ; l++){ buffer[0][l]=audio[l*2]/32768.f; buffer[1][l]=audio[l*2+1]/32768.f; } } vorbis_analysis_wrote(&context->vd, samples) ; } else { if(!context->eof) vorbis_analysis_wrote(&context->vd, 0) ; context->eof = 1; } while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) { vorbis_analysis(&context->vb, NULL); vorbis_bitrate_addblock(&context->vb) ; while(vorbis_bitrate_flushpacket(&context->vd, &op)) { /* i'd love to say the following line is a hack, but sadly it's * not, apparently the end of stream decision is in libogg. */ if(op.bytes==1) continue; memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet)); context->buffer_index += sizeof(ogg_packet); memcpy(context->buffer + context->buffer_index, op.packet, op.bytes); context->buffer_index += op.bytes; // av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes); } } l=0; if(context->buffer_index){ ogg_packet *op2= (ogg_packet*)context->buffer; op2->packet = context->buffer + sizeof(ogg_packet); l= op2->bytes; avccontext->coded_frame->pts= av_rescale_q(op2->granulepos, (AVRational){1, avccontext->sample_rate}, avccontext->time_base); //FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate memcpy(packets, op2->packet, l); context->buffer_index -= l + sizeof(ogg_packet); memcpy(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index); // av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l); } return l; } static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) { OggVorbisContext *context = avccontext->priv_data ; /* ogg_packet op ; */ vorbis_analysis_wrote(&context->vd, 0) ; /* notify vorbisenc this is EOF */ vorbis_block_clear(&context->vb); vorbis_dsp_clear(&context->vd); vorbis_info_clear(&context->vi); av_freep(&avccontext->coded_frame); av_freep(&avccontext->extradata); return 0 ; } AVCodec libvorbis_encoder = { "libvorbis", AVMEDIA_TYPE_AUDIO, CODEC_ID_VORBIS, sizeof(OggVorbisContext), oggvorbis_encode_init, oggvorbis_encode_frame, oggvorbis_encode_close, .capabilities= CODEC_CAP_DELAY, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("libvorbis Vorbis"), } ;
123linslouis-android-video-cutter
jni/libavcodec/libvorbis.c
C
asf20
7,502
/* * LSP computing for ACELP-based codecs * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_LSP_H #define AVCODEC_LSP_H #include <stdint.h> /** (I.F) means fixed-point value with F fractional and I integer bits */ /** * \brief ensure a minimum distance between LSFs * \param lsfq [in/out] LSF to check and adjust * \param lsfq_min_distance minimum distance between LSFs * \param lsfq_min minimum allowed LSF value * \param lsfq_max maximum allowed LSF value * \param lp_order LP filter order */ void ff_acelp_reorder_lsf(int16_t* lsfq, int lsfq_min_distance, int lsfq_min, int lsfq_max, int lp_order); /** * Adjust the quantized LSFs so they are increasing and not too close. * * This step is not mentioned in the AMR spec but is in the reference C decoder. * Omitting this step creates audible distortion on the sinusoidal sweep * test vectors in 3GPP TS 26.074. * * @param[in,out] lsf LSFs in Hertz * @param min_spacing minimum distance between two consecutive lsf values * @param size size of the lsf vector */ void ff_set_min_dist_lsf(float *lsf, double min_spacing, int order); /** * \brief Convert LSF to LSP * \param lsp [out] LSP coefficients (-0x8000 <= (0.15) < 0x8000) * \param lsf normalized LSF coefficients (0 <= (2.13) < 0x2000 * PI) * \param lp_order LP filter order * * \remark It is safe to pass the same array into the lsf and lsp parameters. */ void ff_acelp_lsf2lsp(int16_t *lsp, const int16_t *lsf, int lp_order); /** * \brief LSP to LP conversion (3.2.6 of G.729) * \param lp [out] decoded LP coefficients (-0x8000 <= (3.12) < 0x8000) * \param lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000) * \param lp_half_order LP filter order, divided by 2 */ void ff_acelp_lsp2lpc(int16_t* lp, const int16_t* lsp, int lp_half_order); /** * \brief Interpolate LSP for the first subframe and convert LSP -> LP for both subframes (3.2.5 and 3.2.6 of G.729) * \param lp_1st [out] decoded LP coefficients for first subframe (-0x8000 <= (3.12) < 0x8000) * \param lp_2nd [out] decoded LP coefficients for second subframe (-0x8000 <= (3.12) < 0x8000) * \param lsp_2nd LSP coefficients of the second subframe (-0x8000 <= (0.15) < 0x8000) * \param lsp_prev LSP coefficients from the second subframe of the previous frame (-0x8000 <= (0.15) < 0x8000) * \param lp_order LP filter order */ void ff_acelp_lp_decode(int16_t* lp_1st, int16_t* lp_2nd, const int16_t* lsp_2nd, const int16_t* lsp_prev, int lp_order); #define MAX_LP_HALF_ORDER 8 /** * Reconstructs LPC coefficients from the line spectral pair frequencies. * * @param lsp line spectral pairs in cosine domain * @param lpc linear predictive coding coefficients * @param lp_half_order half the number of the amount of LPCs to be * reconstructed, need to be smaller or equal to MAX_LP_HALF_ORDER * * @note buffers should have a minimux size of 2*lp_half_order elements. * * TIA/EIA/IS-733 2.4.3.3.5 */ void ff_acelp_lspd2lpc(const double *lsp, float *lpc, int lp_half_order); /** * Sort values in ascending order. * * @note O(n) if data already sorted, O(n^2) - otherwise */ void ff_sort_nearly_sorted_floats(float *vals, int len); /** * Computes the Pa / (1 + z(-1)) or Qa / (1 - z(-1)) coefficients * needed for LSP to LPC conversion. * We only need to calculate the 6 first elements of the polynomial. * * @param lsp line spectral pairs in cosine domain * @param f [out] polynomial input/output as a vector * * TIA/EIA/IS-733 2.4.3.3.5-1/2 */ void ff_lsp2polyf(const double *lsp, double *f, int lp_half_order); #endif /* AVCODEC_LSP_H */
123linslouis-android-video-cutter
jni/libavcodec/lsp.h
C
asf20
4,378
/* * VC3/DNxHD SIMD functions * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> * * VC-3 encoder funded by the British Broadcasting Corporation * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dnxhdenc.h" static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int line_size) { __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "movq (%0), %%xmm0 \n\t" "add %2, %0 \n\t" "movq (%0), %%xmm1 \n\t" "movq (%0, %2), %%xmm2 \n\t" "movq (%0, %2,2), %%xmm3 \n\t" "punpcklbw %%xmm7, %%xmm0 \n\t" "punpcklbw %%xmm7, %%xmm1 \n\t" "punpcklbw %%xmm7, %%xmm2 \n\t" "punpcklbw %%xmm7, %%xmm3 \n\t" "movdqa %%xmm0, (%1) \n\t" "movdqa %%xmm1, 16(%1) \n\t" "movdqa %%xmm2, 32(%1) \n\t" "movdqa %%xmm3, 48(%1) \n\t" "movdqa %%xmm3 , 64(%1) \n\t" "movdqa %%xmm2 , 80(%1) \n\t" "movdqa %%xmm1 , 96(%1) \n\t" "movdqa %%xmm0, 112(%1) \n\t" : "+r" (pixels) : "r" (block), "r" ((x86_reg)line_size) ); } void ff_dnxhd_init_mmx(DNXHDEncContext *ctx) { if (mm_flags & FF_MM_SSE2) { ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; } }
123linslouis-android-video-cutter
jni/libavcodec/x86/dnxhd_mmx.c
C
asf20
2,183
/* * Copyright (C) 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * SSE2-optimized functions cribbed from the original VP3 source code. */ #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #include "vp3dsp_sse2.h" DECLARE_ALIGNED(16, const uint16_t, ff_vp3_idct_data)[7 * 8] = { 64277,64277,64277,64277,64277,64277,64277,64277, 60547,60547,60547,60547,60547,60547,60547,60547, 54491,54491,54491,54491,54491,54491,54491,54491, 46341,46341,46341,46341,46341,46341,46341,46341, 36410,36410,36410,36410,36410,36410,36410,36410, 25080,25080,25080,25080,25080,25080,25080,25080, 12785,12785,12785,12785,12785,12785,12785,12785 }; #define VP3_1D_IDCT_SSE2(ADD, SHIFT) \ "movdqa "I(3)", %%xmm2 \n\t" /* xmm2 = i3 */ \ "movdqa "C(3)", %%xmm6 \n\t" /* xmm6 = c3 */ \ "movdqa %%xmm2, %%xmm4 \n\t" /* xmm4 = i3 */ \ "movdqa "I(5)", %%xmm7 \n\t" /* xmm7 = i5 */ \ "pmulhw %%xmm6, %%xmm4 \n\t" /* xmm4 = c3 * i3 - i3 */ \ "movdqa "C(5)", %%xmm1 \n\t" /* xmm1 = c5 */ \ "pmulhw %%xmm7, %%xmm6 \n\t" /* xmm6 = c3 * i5 - i5 */ \ "movdqa %%xmm1, %%xmm5 \n\t" /* xmm5 = c5 */ \ "pmulhw %%xmm2, %%xmm1 \n\t" /* xmm1 = c5 * i3 - i3 */ \ "movdqa "I(1)", %%xmm3 \n\t" /* xmm3 = i1 */ \ "pmulhw %%xmm7, %%xmm5 \n\t" /* xmm5 = c5 * i5 - i5 */ \ "movdqa "C(1)", %%xmm0 \n\t" /* xmm0 = c1 */ \ "paddw %%xmm2, %%xmm4 \n\t" /* xmm4 = c3 * i3 */ \ "paddw %%xmm7, %%xmm6 \n\t" /* xmm6 = c3 * i5 */ \ "paddw %%xmm1, %%xmm2 \n\t" /* xmm2 = c5 * i3 */ \ "movdqa "I(7)", %%xmm1 \n\t" /* xmm1 = i7 */ \ "paddw %%xmm5, %%xmm7 \n\t" /* xmm7 = c5 * i5 */ \ "movdqa %%xmm0, %%xmm5 \n\t" /* xmm5 = c1 */ \ "pmulhw %%xmm3, %%xmm0 \n\t" /* xmm0 = c1 * i1 - i1 */ \ "paddsw %%xmm7, %%xmm4 \n\t" /* xmm4 = c3 * i3 + c5 * i5 = C */ \ "pmulhw %%xmm1, %%xmm5 \n\t" /* xmm5 = c1 * i7 - i7 */ \ "movdqa "C(7)", %%xmm7 \n\t" /* xmm7 = c7 */ \ "psubsw %%xmm2, %%xmm6 \n\t" /* xmm6 = c3 * i5 - c5 * i3 = D */ \ "paddw %%xmm3, %%xmm0 \n\t" /* xmm0 = c1 * i1 */ \ "pmulhw %%xmm7, %%xmm3 \n\t" /* xmm3 = c7 * i1 */ \ "movdqa "I(2)", %%xmm2 \n\t" /* xmm2 = i2 */ \ "pmulhw %%xmm1, %%xmm7 \n\t" /* xmm7 = c7 * i7 */ \ "paddw %%xmm1, %%xmm5 \n\t" /* xmm5 = c1 * i7 */ \ "movdqa %%xmm2, %%xmm1 \n\t" /* xmm1 = i2 */ \ "pmulhw "C(2)", %%xmm2 \n\t" /* xmm2 = i2 * c2 -i2 */ \ "psubsw %%xmm5, %%xmm3 \n\t" /* xmm3 = c7 * i1 - c1 * i7 = B */ \ "movdqa "I(6)", %%xmm5 \n\t" /* xmm5 = i6 */ \ "paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = c1 * i1 + c7 * i7 = A */ \ "movdqa %%xmm5, %%xmm7 \n\t" /* xmm7 = i6 */ \ "psubsw %%xmm4, %%xmm0 \n\t" /* xmm0 = A - C */ \ "pmulhw "C(2)", %%xmm5 \n\t" /* xmm5 = c2 * i6 - i6 */ \ "paddw %%xmm1, %%xmm2 \n\t" /* xmm2 = i2 * c2 */ \ "pmulhw "C(6)", %%xmm1 \n\t" /* xmm1 = c6 * i2 */ \ "paddsw %%xmm4, %%xmm4 \n\t" /* xmm4 = C + C */ \ "paddsw %%xmm0, %%xmm4 \n\t" /* xmm4 = A + C = C. */ \ "psubsw %%xmm6, %%xmm3 \n\t" /* xmm3 = B - D */ \ "paddw %%xmm7, %%xmm5 \n\t" /* xmm5 = c2 * i6 */ \ "paddsw %%xmm6, %%xmm6 \n\t" /* xmm6 = D + D */ \ "pmulhw "C(6)", %%xmm7 \n\t" /* xmm7 = c6 * i6 */ \ "paddsw %%xmm3, %%xmm6 \n\t" /* xmm6 = B + D = D. */ \ "movdqa %%xmm4, "I(1)" \n\t" /* Save C. at I(1) */ \ "psubsw %%xmm5, %%xmm1 \n\t" /* xmm1 = c6 * i2 - c2 * i6 = H */ \ "movdqa "C(4)", %%xmm4 \n\t" /* xmm4 = c4 */ \ "movdqa %%xmm3, %%xmm5 \n\t" /* xmm5 = B - D */ \ "pmulhw %%xmm4, %%xmm3 \n\t" /* xmm3 = ( c4 -1 ) * ( B - D ) */ \ "paddsw %%xmm2, %%xmm7 \n\t" /* xmm7 = c2 * i2 + c6 * i6 = G */ \ "movdqa %%xmm6, "I(2)" \n\t" /* Save D. at I(2) */ \ "movdqa %%xmm0, %%xmm2 \n\t" /* xmm2 = A - C */ \ "movdqa "I(0)", %%xmm6 \n\t" /* xmm6 = i0 */ \ "pmulhw %%xmm4, %%xmm0 \n\t" /* xmm0 = ( c4 - 1 ) * ( A - C ) = A. */ \ "paddw %%xmm3, %%xmm5 \n\t" /* xmm5 = c4 * ( B - D ) = B. */ \ "movdqa "I(4)", %%xmm3 \n\t" /* xmm3 = i4 */ \ "psubsw %%xmm1, %%xmm5 \n\t" /* xmm5 = B. - H = B.. */ \ "paddw %%xmm0, %%xmm2 \n\t" /* xmm2 = c4 * ( A - C) = A. */ \ "psubsw %%xmm3, %%xmm6 \n\t" /* xmm6 = i0 - i4 */ \ "movdqa %%xmm6, %%xmm0 \n\t" /* xmm0 = i0 - i4 */ \ "pmulhw %%xmm4, %%xmm6 \n\t" /* xmm6 = (c4 - 1) * (i0 - i4) = F */ \ "paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = i4 + i4 */ \ "paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H + H */ \ "paddsw %%xmm0, %%xmm3 \n\t" /* xmm3 = i0 + i4 */ \ "paddsw %%xmm5, %%xmm1 \n\t" /* xmm1 = B. + H = H. */ \ "pmulhw %%xmm3, %%xmm4 \n\t" /* xmm4 = ( c4 - 1 ) * ( i0 + i4 ) */ \ "paddw %%xmm0, %%xmm6 \n\t" /* xmm6 = c4 * ( i0 - i4 ) */ \ "psubsw %%xmm2, %%xmm6 \n\t" /* xmm6 = F - A. = F. */ \ "paddsw %%xmm2, %%xmm2 \n\t" /* xmm2 = A. + A. */ \ "movdqa "I(1)", %%xmm0 \n\t" /* Load C. from I(1) */ \ "paddsw %%xmm6, %%xmm2 \n\t" /* xmm2 = F + A. = A.. */ \ "paddw %%xmm3, %%xmm4 \n\t" /* xmm4 = c4 * ( i0 + i4 ) = 3 */ \ "psubsw %%xmm1, %%xmm2 \n\t" /* xmm2 = A.. - H. = R2 */ \ ADD(%%xmm2) /* Adjust R2 and R1 before shifting */ \ "paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H. + H. */ \ "paddsw %%xmm2, %%xmm1 \n\t" /* xmm1 = A.. + H. = R1 */ \ SHIFT(%%xmm2) /* xmm2 = op2 */ \ "psubsw %%xmm7, %%xmm4 \n\t" /* xmm4 = E - G = E. */ \ SHIFT(%%xmm1) /* xmm1 = op1 */ \ "movdqa "I(2)", %%xmm3 \n\t" /* Load D. from I(2) */ \ "paddsw %%xmm7, %%xmm7 \n\t" /* xmm7 = G + G */ \ "paddsw %%xmm4, %%xmm7 \n\t" /* xmm7 = E + G = G. */ \ "psubsw %%xmm3, %%xmm4 \n\t" /* xmm4 = E. - D. = R4 */ \ ADD(%%xmm4) /* Adjust R4 and R3 before shifting */ \ "paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = D. + D. */ \ "paddsw %%xmm4, %%xmm3 \n\t" /* xmm3 = E. + D. = R3 */ \ SHIFT(%%xmm4) /* xmm4 = op4 */ \ "psubsw %%xmm5, %%xmm6 \n\t" /* xmm6 = F. - B..= R6 */ \ SHIFT(%%xmm3) /* xmm3 = op3 */ \ ADD(%%xmm6) /* Adjust R6 and R5 before shifting */ \ "paddsw %%xmm5, %%xmm5 \n\t" /* xmm5 = B.. + B.. */ \ "paddsw %%xmm6, %%xmm5 \n\t" /* xmm5 = F. + B.. = R5 */ \ SHIFT(%%xmm6) /* xmm6 = op6 */ \ SHIFT(%%xmm5) /* xmm5 = op5 */ \ "psubsw %%xmm0, %%xmm7 \n\t" /* xmm7 = G. - C. = R7 */ \ ADD(%%xmm7) /* Adjust R7 and R0 before shifting */ \ "paddsw %%xmm0, %%xmm0 \n\t" /* xmm0 = C. + C. */ \ "paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = G. + C. */ \ SHIFT(%%xmm7) /* xmm7 = op7 */ \ SHIFT(%%xmm0) /* xmm0 = op0 */ #define PUT_BLOCK(r0, r1, r2, r3, r4, r5, r6, r7) \ "movdqa " #r0 ", " O(0) "\n\t" \ "movdqa " #r1 ", " O(1) "\n\t" \ "movdqa " #r2 ", " O(2) "\n\t" \ "movdqa " #r3 ", " O(3) "\n\t" \ "movdqa " #r4 ", " O(4) "\n\t" \ "movdqa " #r5 ", " O(5) "\n\t" \ "movdqa " #r6 ", " O(6) "\n\t" \ "movdqa " #r7 ", " O(7) "\n\t" #define NOP(xmm) #define SHIFT4(xmm) "psraw $4, "#xmm"\n\t" #define ADD8(xmm) "paddsw %2, "#xmm"\n\t" void ff_vp3_idct_sse2(int16_t *input_data) { #define I(x) AV_STRINGIFY(16*x)"(%0)" #define O(x) I(x) #define C(x) AV_STRINGIFY(16*(x-1))"(%1)" __asm__ volatile ( VP3_1D_IDCT_SSE2(NOP, NOP) TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%0)) PUT_BLOCK(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1) VP3_1D_IDCT_SSE2(ADD8, SHIFT4) PUT_BLOCK(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) :: "r"(input_data), "r"(ff_vp3_idct_data), "m"(ff_pw_8) ); } void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block) { ff_vp3_idct_sse2(block); put_signed_pixels_clamped_mmx(block, dest, line_size); } void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block) { ff_vp3_idct_sse2(block); add_pixels_clamped_mmx(block, dest, line_size); }
123linslouis-android-video-cutter
jni/libavcodec/x86/vp3dsp_sse2.c
C
asf20
9,119
/* * The simplest mpeg encoder (well, it was the simplest!) * Copyright (c) 2000,2001 Fabrice Bellard * * Optimized for ia32 CPUs by Nick Kurshev <nickols_k@mail.ru> * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "dsputil_mmx.h" extern uint16_t inv_zigzag_direct16[64]; static void dct_unquantize_h263_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { x86_reg level, qmul, qadd, nCoeffs; qmul = qscale << 1; assert(s->block_last_index[n]>=0 || s->h263_aic); if (!s->h263_aic) { if (n < 4) level = block[0] * s->y_dc_scale; else level = block[0] * s->c_dc_scale; qadd = (qscale - 1) | 1; }else{ qadd = 0; level= block[0]; } if(s->ac_pred) nCoeffs=63; else nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; //printf("%d %d ", qmul, qadd); __asm__ volatile( "movd %1, %%mm6 \n\t" //qmul "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "movd %2, %%mm5 \n\t" //qadd "pxor %%mm7, %%mm7 \n\t" "packssdw %%mm5, %%mm5 \n\t" "packssdw %%mm5, %%mm5 \n\t" "psubw %%mm5, %%mm7 \n\t" "pxor %%mm4, %%mm4 \n\t" ASMALIGN(4) "1: \n\t" "movq (%0, %3), %%mm0 \n\t" "movq 8(%0, %3), %%mm1 \n\t" "pmullw %%mm6, %%mm0 \n\t" "pmullw %%mm6, %%mm1 \n\t" "movq (%0, %3), %%mm2 \n\t" "movq 8(%0, %3), %%mm3 \n\t" "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "paddw %%mm7, %%mm0 \n\t" "paddw %%mm7, %%mm1 \n\t" "pxor %%mm0, %%mm2 \n\t" "pxor %%mm1, %%mm3 \n\t" "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 "pandn %%mm2, %%mm0 \n\t" "pandn %%mm3, %%mm1 \n\t" "movq %%mm0, (%0, %3) \n\t" "movq %%mm1, 8(%0, %3) \n\t" "add $16, %3 \n\t" "jng 1b \n\t" ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs)) : "memory" ); block[0]= level; } static void dct_unquantize_h263_inter_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { x86_reg qmul, qadd, nCoeffs; qmul = qscale << 1; qadd = (qscale - 1) | 1; assert(s->block_last_index[n]>=0 || s->h263_aic); nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; //printf("%d %d ", qmul, qadd); __asm__ volatile( "movd %1, %%mm6 \n\t" //qmul "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "movd %2, %%mm5 \n\t" //qadd "pxor %%mm7, %%mm7 \n\t" "packssdw %%mm5, %%mm5 \n\t" "packssdw %%mm5, %%mm5 \n\t" "psubw %%mm5, %%mm7 \n\t" "pxor %%mm4, %%mm4 \n\t" ASMALIGN(4) "1: \n\t" "movq (%0, %3), %%mm0 \n\t" "movq 8(%0, %3), %%mm1 \n\t" "pmullw %%mm6, %%mm0 \n\t" "pmullw %%mm6, %%mm1 \n\t" "movq (%0, %3), %%mm2 \n\t" "movq 8(%0, %3), %%mm3 \n\t" "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "paddw %%mm7, %%mm0 \n\t" "paddw %%mm7, %%mm1 \n\t" "pxor %%mm0, %%mm2 \n\t" "pxor %%mm1, %%mm3 \n\t" "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 "pandn %%mm2, %%mm0 \n\t" "pandn %%mm3, %%mm1 \n\t" "movq %%mm0, (%0, %3) \n\t" "movq %%mm1, 8(%0, %3) \n\t" "add $16, %3 \n\t" "jng 1b \n\t" ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs)) : "memory" ); } /* NK: Note: looking at PARANOID: "enable all paranoid tests for rounding, overflows, etc..." #ifdef PARANOID if (level < -2048 || level > 2047) fprintf(stderr, "unquant error %d %d\n", i, level); #endif We can suppose that result of two multiplications can't be greater than 0xFFFF i.e. is 16-bit, so we use here only PMULLW instruction and can avoid a complex multiplication. ===================================================== Full formula for multiplication of 2 integer numbers which are represent as high:low words: input: value1 = high1:low1 value2 = high2:low2 output: value3 = value1*value2 value3=high3:low3 (on overflow: modulus 2^32 wrap-around) this mean that for 0x123456 * 0x123456 correct result is 0x766cb0ce4 but this algorithm will compute only 0x66cb0ce4 this limited by 16-bit size of operands --------------------------------- tlow1 = high1*low2 tlow2 = high2*low1 tlow1 = tlow1 + tlow2 high3:low3 = low1*low2 high3 += tlow1 */ static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { x86_reg nCoeffs; const uint16_t *quant_matrix; int block0; assert(s->block_last_index[n]>=0); nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; if (n < 4) block0 = block[0] * s->y_dc_scale; else block0 = block[0] * s->c_dc_scale; /* XXX: only mpeg1 */ quant_matrix = s->intra_matrix; __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm4 \n\t" "movq 8(%1, %%"REG_a"), %%mm5 \n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" "pxor %%mm3, %%mm3 \n\t" "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psraw $3, %%mm0 \n\t" "psraw $3, %%mm1 \n\t" "psubw %%mm7, %%mm0 \n\t" "psubw %%mm7, %%mm1 \n\t" "por %%mm7, %%mm0 \n\t" "por %%mm7, %%mm1 \n\t" "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" "movq %%mm4, (%0, %%"REG_a") \n\t" "movq %%mm5, 8(%0, %%"REG_a") \n\t" "add $16, %%"REG_a" \n\t" "js 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) : "%"REG_a, "memory" ); block[0]= block0; } static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { x86_reg nCoeffs; const uint16_t *quant_matrix; assert(s->block_last_index[n]>=0); nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; quant_matrix = s->inter_matrix; __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm4 \n\t" "movq 8(%1, %%"REG_a"), %%mm5 \n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" "pxor %%mm3, %%mm3 \n\t" "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2 "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2 "paddw %%mm7, %%mm0 \n\t" // abs(block[i])*2 + 1 "paddw %%mm7, %%mm1 \n\t" // abs(block[i])*2 + 1 "pmullw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q "pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psraw $4, %%mm0 \n\t" "psraw $4, %%mm1 \n\t" "psubw %%mm7, %%mm0 \n\t" "psubw %%mm7, %%mm1 \n\t" "por %%mm7, %%mm0 \n\t" "por %%mm7, %%mm1 \n\t" "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" "movq %%mm4, (%0, %%"REG_a") \n\t" "movq %%mm5, 8(%0, %%"REG_a") \n\t" "add $16, %%"REG_a" \n\t" "js 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) : "%"REG_a, "memory" ); } static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { x86_reg nCoeffs; const uint16_t *quant_matrix; int block0; assert(s->block_last_index[n]>=0); if(s->alternate_scan) nCoeffs= 63; //FIXME else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; if (n < 4) block0 = block[0] * s->y_dc_scale; else block0 = block[0] * s->c_dc_scale; quant_matrix = s->intra_matrix; __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm4 \n\t" "movq 8(%1, %%"REG_a"), %%mm5 \n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" "pxor %%mm3, %%mm3 \n\t" "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psraw $3, %%mm0 \n\t" "psraw $3, %%mm1 \n\t" "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" "movq %%mm4, (%0, %%"REG_a") \n\t" "movq %%mm5, 8(%0, %%"REG_a") \n\t" "add $16, %%"REG_a" \n\t" "jng 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) : "%"REG_a, "memory" ); block[0]= block0; //Note, we do not do mismatch control for intra as errors cannot accumulate } static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { x86_reg nCoeffs; const uint16_t *quant_matrix; assert(s->block_last_index[n]>=0); if(s->alternate_scan) nCoeffs= 63; //FIXME else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; quant_matrix = s->inter_matrix; __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlq $48, %%mm7 \n\t" "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" "movq 8(%0, %%"REG_a"), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm4 \n\t" "movq 8(%1, %%"REG_a"), %%mm5 \n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" "pxor %%mm3, %%mm3 \n\t" "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2 "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2 "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q "paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psrlw $4, %%mm0 \n\t" "psrlw $4, %%mm1 \n\t" "pxor %%mm2, %%mm0 \n\t" "pxor %%mm3, %%mm1 \n\t" "psubw %%mm2, %%mm0 \n\t" "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" "pxor %%mm4, %%mm7 \n\t" "pxor %%mm5, %%mm7 \n\t" "movq %%mm4, (%0, %%"REG_a") \n\t" "movq %%mm5, 8(%0, %%"REG_a") \n\t" "add $16, %%"REG_a" \n\t" "jng 1b \n\t" "movd 124(%0, %3), %%mm0 \n\t" "movq %%mm7, %%mm6 \n\t" "psrlq $32, %%mm7 \n\t" "pxor %%mm6, %%mm7 \n\t" "movq %%mm7, %%mm6 \n\t" "psrlq $16, %%mm7 \n\t" "pxor %%mm6, %%mm7 \n\t" "pslld $31, %%mm7 \n\t" "psrlq $15, %%mm7 \n\t" "pxor %%mm7, %%mm0 \n\t" "movd %%mm0, 124(%0, %3) \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs) : "%"REG_a, "memory" ); } static void denoise_dct_mmx(MpegEncContext *s, DCTELEM *block){ const int intra= s->mb_intra; int *sum= s->dct_error_sum[intra]; uint16_t *offset= s->dct_offset[intra]; s->dct_count[intra]++; __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "1: \n\t" "pxor %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "movq (%0), %%mm2 \n\t" "movq 8(%0), %%mm3 \n\t" "pcmpgtw %%mm2, %%mm0 \n\t" "pcmpgtw %%mm3, %%mm1 \n\t" "pxor %%mm0, %%mm2 \n\t" "pxor %%mm1, %%mm3 \n\t" "psubw %%mm0, %%mm2 \n\t" "psubw %%mm1, %%mm3 \n\t" "movq %%mm2, %%mm4 \n\t" "movq %%mm3, %%mm5 \n\t" "psubusw (%2), %%mm2 \n\t" "psubusw 8(%2), %%mm3 \n\t" "pxor %%mm0, %%mm2 \n\t" "pxor %%mm1, %%mm3 \n\t" "psubw %%mm0, %%mm2 \n\t" "psubw %%mm1, %%mm3 \n\t" "movq %%mm2, (%0) \n\t" "movq %%mm3, 8(%0) \n\t" "movq %%mm4, %%mm2 \n\t" "movq %%mm5, %%mm3 \n\t" "punpcklwd %%mm7, %%mm4 \n\t" "punpckhwd %%mm7, %%mm2 \n\t" "punpcklwd %%mm7, %%mm5 \n\t" "punpckhwd %%mm7, %%mm3 \n\t" "paddd (%1), %%mm4 \n\t" "paddd 8(%1), %%mm2 \n\t" "paddd 16(%1), %%mm5 \n\t" "paddd 24(%1), %%mm3 \n\t" "movq %%mm4, (%1) \n\t" "movq %%mm2, 8(%1) \n\t" "movq %%mm5, 16(%1) \n\t" "movq %%mm3, 24(%1) \n\t" "add $16, %0 \n\t" "add $32, %1 \n\t" "add $16, %2 \n\t" "cmp %3, %0 \n\t" " jb 1b \n\t" : "+r" (block), "+r" (sum), "+r" (offset) : "r"(block+64) ); } static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){ const int intra= s->mb_intra; int *sum= s->dct_error_sum[intra]; uint16_t *offset= s->dct_offset[intra]; s->dct_count[intra]++; __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "1: \n\t" "pxor %%xmm0, %%xmm0 \n\t" "pxor %%xmm1, %%xmm1 \n\t" "movdqa (%0), %%xmm2 \n\t" "movdqa 16(%0), %%xmm3 \n\t" "pcmpgtw %%xmm2, %%xmm0 \n\t" "pcmpgtw %%xmm3, %%xmm1 \n\t" "pxor %%xmm0, %%xmm2 \n\t" "pxor %%xmm1, %%xmm3 \n\t" "psubw %%xmm0, %%xmm2 \n\t" "psubw %%xmm1, %%xmm3 \n\t" "movdqa %%xmm2, %%xmm4 \n\t" "movdqa %%xmm3, %%xmm5 \n\t" "psubusw (%2), %%xmm2 \n\t" "psubusw 16(%2), %%xmm3 \n\t" "pxor %%xmm0, %%xmm2 \n\t" "pxor %%xmm1, %%xmm3 \n\t" "psubw %%xmm0, %%xmm2 \n\t" "psubw %%xmm1, %%xmm3 \n\t" "movdqa %%xmm2, (%0) \n\t" "movdqa %%xmm3, 16(%0) \n\t" "movdqa %%xmm4, %%xmm6 \n\t" "movdqa %%xmm5, %%xmm0 \n\t" "punpcklwd %%xmm7, %%xmm4 \n\t" "punpckhwd %%xmm7, %%xmm6 \n\t" "punpcklwd %%xmm7, %%xmm5 \n\t" "punpckhwd %%xmm7, %%xmm0 \n\t" "paddd (%1), %%xmm4 \n\t" "paddd 16(%1), %%xmm6 \n\t" "paddd 32(%1), %%xmm5 \n\t" "paddd 48(%1), %%xmm0 \n\t" "movdqa %%xmm4, (%1) \n\t" "movdqa %%xmm6, 16(%1) \n\t" "movdqa %%xmm5, 32(%1) \n\t" "movdqa %%xmm0, 48(%1) \n\t" "add $32, %0 \n\t" "add $64, %1 \n\t" "add $32, %2 \n\t" "cmp %3, %0 \n\t" " jb 1b \n\t" : "+r" (block), "+r" (sum), "+r" (offset) : "r"(block+64) ); } #if HAVE_SSSE3 #define HAVE_SSSE3_BAK #endif #undef HAVE_SSSE3 #define HAVE_SSSE3 0 #undef HAVE_SSE2 #undef HAVE_MMX2 #define HAVE_SSE2 0 #define HAVE_MMX2 0 #define RENAME(a) a ## _MMX #define RENAMEl(a) a ## _mmx #include "mpegvideo_mmx_template.c" #undef HAVE_MMX2 #define HAVE_MMX2 1 #undef RENAME #undef RENAMEl #define RENAME(a) a ## _MMX2 #define RENAMEl(a) a ## _mmx2 #include "mpegvideo_mmx_template.c" #undef HAVE_SSE2 #define HAVE_SSE2 1 #undef RENAME #undef RENAMEl #define RENAME(a) a ## _SSE2 #define RENAMEl(a) a ## _sse2 #include "mpegvideo_mmx_template.c" #ifdef HAVE_SSSE3_BAK #undef HAVE_SSSE3 #define HAVE_SSSE3 1 #undef RENAME #undef RENAMEl #define RENAME(a) a ## _SSSE3 #define RENAMEl(a) a ## _sse2 #include "mpegvideo_mmx_template.c" #endif void MPV_common_init_mmx(MpegEncContext *s) { if (mm_flags & FF_MM_MMX) { const int dct_algo = s->avctx->dct_algo; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx; if(!(s->flags & CODEC_FLAG_BITEXACT)) s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx; if (mm_flags & FF_MM_SSE2) { s->denoise_dct= denoise_dct_sse2; } else { s->denoise_dct= denoise_dct_mmx; } if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ #if HAVE_SSSE3 if(mm_flags & FF_MM_SSSE3){ s->dct_quantize= dct_quantize_SSSE3; } else #endif if(mm_flags & FF_MM_SSE2){ s->dct_quantize= dct_quantize_SSE2; } else if(mm_flags & FF_MM_MMX2){ s->dct_quantize= dct_quantize_MMX2; } else { s->dct_quantize= dct_quantize_MMX; } } } }
123linslouis-android-video-cutter
jni/libavcodec/x86/mpegvideo_mmx.c
C
asf20
28,258
/* * MMX optimized forward DCT * The gcc porting is Copyright (c) 2001 Fabrice Bellard. * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * SSE2 optimization is Copyright (c) 2004 Denes Balatoni. * * from fdctam32.c - AP922 MMX(3D-Now) forward-DCT * * Intel Application Note AP-922 - fast, precise implementation of DCT * http://developer.intel.com/vtune/cbts/appnotes.htm * * Also of inspiration: * a page about fdct at http://www.geocities.com/ssavekar/dct.htm * Skal's fdct at http://skal.planet-d.net/coding/dct.html * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/common.h" #include "libavcodec/dsputil.h" ////////////////////////////////////////////////////////////////////// // // constants for the forward DCT // ----------------------------- // // Be sure to check that your compiler is aligning all constants to QWORD // (8-byte) memory boundaries! Otherwise the unaligned memory access will // severely stall MMX execution. // ////////////////////////////////////////////////////////////////////// #define BITS_FRW_ACC 3 //; 2 or 3 for accuracy #define SHIFT_FRW_COL BITS_FRW_ACC #define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3) #define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1)) //#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1)) #define X8(x) x,x,x,x,x,x,x,x //concatenated table, for forward DCT transformation DECLARE_ALIGNED(16, static const int16_t, fdct_tg_all_16)[24] = { X8(13036), // tg * (2<<16) + 0.5 X8(27146), // tg * (2<<16) + 0.5 X8(-21746) // tg * (2<<16) + 0.5 }; DECLARE_ALIGNED(16, static const int16_t, ocos_4_16)[8] = { X8(23170) //cos * (2<<15) + 0.5 }; DECLARE_ALIGNED(16, static const int16_t, fdct_one_corr)[8] = { X8(1) }; DECLARE_ALIGNED(8, static const int32_t, fdct_r_row)[2] = {RND_FRW_ROW, RND_FRW_ROW }; static struct { DECLARE_ALIGNED(16, const int32_t, fdct_r_row_sse2)[4]; } fdct_r_row_sse2 = {{ RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW }}; //DECLARE_ALIGNED(16, static const long, fdct_r_row_sse2)[4] = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW}; DECLARE_ALIGNED(8, static const int16_t, tab_frw_01234567)[] = { // forward_dct coeff table 16384, 16384, 22725, 19266, 16384, 16384, 12873, 4520, 21407, 8867, 19266, -4520, -8867, -21407, -22725, -12873, 16384, -16384, 12873, -22725, -16384, 16384, 4520, 19266, 8867, -21407, 4520, -12873, 21407, -8867, 19266, -22725, 22725, 22725, 31521, 26722, 22725, 22725, 17855, 6270, 29692, 12299, 26722, -6270, -12299, -29692, -31521, -17855, 22725, -22725, 17855, -31521, -22725, 22725, 6270, 26722, 12299, -29692, 6270, -17855, 29692, -12299, 26722, -31521, 21407, 21407, 29692, 25172, 21407, 21407, 16819, 5906, 27969, 11585, 25172, -5906, -11585, -27969, -29692, -16819, 21407, -21407, 16819, -29692, -21407, 21407, 5906, 25172, 11585, -27969, 5906, -16819, 27969, -11585, 25172, -29692, 19266, 19266, 26722, 22654, 19266, 19266, 15137, 5315, 25172, 10426, 22654, -5315, -10426, -25172, -26722, -15137, 19266, -19266, 15137, -26722, -19266, 19266, 5315, 22654, 10426, -25172, 5315, -15137, 25172, -10426, 22654, -26722, 16384, 16384, 22725, 19266, 16384, 16384, 12873, 4520, 21407, 8867, 19266, -4520, -8867, -21407, -22725, -12873, 16384, -16384, 12873, -22725, -16384, 16384, 4520, 19266, 8867, -21407, 4520, -12873, 21407, -8867, 19266, -22725, 19266, 19266, 26722, 22654, 19266, 19266, 15137, 5315, 25172, 10426, 22654, -5315, -10426, -25172, -26722, -15137, 19266, -19266, 15137, -26722, -19266, 19266, 5315, 22654, 10426, -25172, 5315, -15137, 25172, -10426, 22654, -26722, 21407, 21407, 29692, 25172, 21407, 21407, 16819, 5906, 27969, 11585, 25172, -5906, -11585, -27969, -29692, -16819, 21407, -21407, 16819, -29692, -21407, 21407, 5906, 25172, 11585, -27969, 5906, -16819, 27969, -11585, 25172, -29692, 22725, 22725, 31521, 26722, 22725, 22725, 17855, 6270, 29692, 12299, 26722, -6270, -12299, -29692, -31521, -17855, 22725, -22725, 17855, -31521, -22725, 22725, 6270, 26722, 12299, -29692, 6270, -17855, 29692, -12299, 26722, -31521, }; static struct { DECLARE_ALIGNED(16, const int16_t, tab_frw_01234567_sse2)[256]; } tab_frw_01234567_sse2 = {{ //DECLARE_ALIGNED(16, static const int16_t, tab_frw_01234567_sse2)[] = { // forward_dct coeff table #define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \ C4, C4, C5, C7, C2, C6, C3, -C7, \ -C4, C4, C7, C3, C6, -C2, C7, -C5, \ C4, -C4, C5, -C1, C2, -C6, C3, -C1, // c1..c7 * cos(pi/4) * 2^15 #define C1 22725 #define C2 21407 #define C3 19266 #define C4 16384 #define C5 12873 #define C6 8867 #define C7 4520 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 31521 #define C2 29692 #define C3 26722 #define C4 22725 #define C5 17855 #define C6 12299 #define C7 6270 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 29692 #define C2 27969 #define C3 25172 #define C4 21407 #define C5 16819 #define C6 11585 #define C7 5906 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 26722 #define C2 25172 #define C3 22654 #define C4 19266 #define C5 15137 #define C6 10426 #define C7 5315 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 22725 #define C2 21407 #define C3 19266 #define C4 16384 #define C5 12873 #define C6 8867 #define C7 4520 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 26722 #define C2 25172 #define C3 22654 #define C4 19266 #define C5 15137 #define C6 10426 #define C7 5315 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 29692 #define C2 27969 #define C3 25172 #define C4 21407 #define C5 16819 #define C6 11585 #define C7 5906 TABLE_SSE2 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #define C1 31521 #define C2 29692 #define C3 26722 #define C4 22725 #define C5 17855 #define C6 12299 #define C7 6270 TABLE_SSE2 }}; #define S(s) AV_TOSTRING(s) //AV_STRINGIFY is too long #define FDCT_COL(cpu, mm, mov)\ static av_always_inline void fdct_col_##cpu(const int16_t *in, int16_t *out, int offset)\ {\ __asm__ volatile (\ #mov" 16(%0), %%"#mm"0 \n\t" \ #mov" 96(%0), %%"#mm"1 \n\t" \ #mov" %%"#mm"0, %%"#mm"2 \n\t" \ #mov" 32(%0), %%"#mm"3 \n\t" \ "paddsw %%"#mm"1, %%"#mm"0 \n\t" \ #mov" 80(%0), %%"#mm"4 \n\t" \ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"0 \n\t" \ #mov" (%0), %%"#mm"5 \n\t" \ "paddsw %%"#mm"3, %%"#mm"4 \n\t" \ "paddsw 112(%0), %%"#mm"5 \n\t" \ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"4 \n\t" \ #mov" %%"#mm"0, %%"#mm"6 \n\t" \ "psubsw %%"#mm"1, %%"#mm"2 \n\t" \ #mov" 16(%1), %%"#mm"1 \n\t" \ "psubsw %%"#mm"4, %%"#mm"0 \n\t" \ #mov" 48(%0), %%"#mm"7 \n\t" \ "pmulhw %%"#mm"0, %%"#mm"1 \n\t" \ "paddsw 64(%0), %%"#mm"7 \n\t" \ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"5 \n\t" \ "paddsw %%"#mm"4, %%"#mm"6 \n\t" \ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"7 \n\t" \ #mov" %%"#mm"5, %%"#mm"4 \n\t" \ "psubsw %%"#mm"7, %%"#mm"5 \n\t" \ "paddsw %%"#mm"5, %%"#mm"1 \n\t" \ "paddsw %%"#mm"7, %%"#mm"4 \n\t" \ "por (%2), %%"#mm"1 \n\t" \ "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"2 \n\t" \ "pmulhw 16(%1), %%"#mm"5 \n\t" \ #mov" %%"#mm"4, %%"#mm"7 \n\t" \ "psubsw 80(%0), %%"#mm"3 \n\t" \ "psubsw %%"#mm"6, %%"#mm"4 \n\t" \ #mov" %%"#mm"1, 32(%3) \n\t" \ "paddsw %%"#mm"6, %%"#mm"7 \n\t" \ #mov" 48(%0), %%"#mm"1 \n\t" \ "psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"3 \n\t" \ "psubsw 64(%0), %%"#mm"1 \n\t" \ #mov" %%"#mm"2, %%"#mm"6 \n\t" \ #mov" %%"#mm"4, 64(%3) \n\t" \ "paddsw %%"#mm"3, %%"#mm"2 \n\t" \ "pmulhw (%4), %%"#mm"2 \n\t" \ "psubsw %%"#mm"3, %%"#mm"6 \n\t" \ "pmulhw (%4), %%"#mm"6 \n\t" \ "psubsw %%"#mm"0, %%"#mm"5 \n\t" \ "por (%2), %%"#mm"5 \n\t" \ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"1 \n\t" \ "por (%2), %%"#mm"2 \n\t" \ #mov" %%"#mm"1, %%"#mm"4 \n\t" \ #mov" (%0), %%"#mm"3 \n\t" \ "paddsw %%"#mm"6, %%"#mm"1 \n\t" \ "psubsw 112(%0), %%"#mm"3 \n\t" \ "psubsw %%"#mm"6, %%"#mm"4 \n\t" \ #mov" (%1), %%"#mm"0 \n\t" \ "psllw $"S(SHIFT_FRW_COL)", %%"#mm"3 \n\t" \ #mov" 32(%1), %%"#mm"6 \n\t" \ "pmulhw %%"#mm"1, %%"#mm"0 \n\t" \ #mov" %%"#mm"7, (%3) \n\t" \ "pmulhw %%"#mm"4, %%"#mm"6 \n\t" \ #mov" %%"#mm"5, 96(%3) \n\t" \ #mov" %%"#mm"3, %%"#mm"7 \n\t" \ #mov" 32(%1), %%"#mm"5 \n\t" \ "psubsw %%"#mm"2, %%"#mm"7 \n\t" \ "paddsw %%"#mm"2, %%"#mm"3 \n\t" \ "pmulhw %%"#mm"7, %%"#mm"5 \n\t" \ "paddsw %%"#mm"3, %%"#mm"0 \n\t" \ "paddsw %%"#mm"4, %%"#mm"6 \n\t" \ "pmulhw (%1), %%"#mm"3 \n\t" \ "por (%2), %%"#mm"0 \n\t" \ "paddsw %%"#mm"7, %%"#mm"5 \n\t" \ "psubsw %%"#mm"6, %%"#mm"7 \n\t" \ #mov" %%"#mm"0, 16(%3) \n\t" \ "paddsw %%"#mm"4, %%"#mm"5 \n\t" \ #mov" %%"#mm"7, 48(%3) \n\t" \ "psubsw %%"#mm"1, %%"#mm"3 \n\t" \ #mov" %%"#mm"5, 80(%3) \n\t" \ #mov" %%"#mm"3, 112(%3) \n\t" \ : \ : "r" (in + offset), "r" (fdct_tg_all_16), "r" (fdct_one_corr), \ "r" (out + offset), "r" (ocos_4_16)); \ } FDCT_COL(mmx, mm, movq) FDCT_COL(sse2, xmm, movdqa) static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out) { __asm__ volatile( #define FDCT_ROW_SSE2_H1(i,t) \ "movq " #i "(%0), %%xmm2 \n\t" \ "movq " #i "+8(%0), %%xmm0 \n\t" \ "movdqa " #t "+32(%1), %%xmm3 \n\t" \ "movdqa " #t "+48(%1), %%xmm7 \n\t" \ "movdqa " #t "(%1), %%xmm4 \n\t" \ "movdqa " #t "+16(%1), %%xmm5 \n\t" #define FDCT_ROW_SSE2_H2(i,t) \ "movq " #i "(%0), %%xmm2 \n\t" \ "movq " #i "+8(%0), %%xmm0 \n\t" \ "movdqa " #t "+32(%1), %%xmm3 \n\t" \ "movdqa " #t "+48(%1), %%xmm7 \n\t" #define FDCT_ROW_SSE2(i) \ "movq %%xmm2, %%xmm1 \n\t" \ "pshuflw $27, %%xmm0, %%xmm0 \n\t" \ "paddsw %%xmm0, %%xmm1 \n\t" \ "psubsw %%xmm0, %%xmm2 \n\t" \ "punpckldq %%xmm2, %%xmm1 \n\t" \ "pshufd $78, %%xmm1, %%xmm2 \n\t" \ "pmaddwd %%xmm2, %%xmm3 \n\t" \ "pmaddwd %%xmm1, %%xmm7 \n\t" \ "pmaddwd %%xmm5, %%xmm2 \n\t" \ "pmaddwd %%xmm4, %%xmm1 \n\t" \ "paddd %%xmm7, %%xmm3 \n\t" \ "paddd %%xmm2, %%xmm1 \n\t" \ "paddd %%xmm6, %%xmm3 \n\t" \ "paddd %%xmm6, %%xmm1 \n\t" \ "psrad %3, %%xmm3 \n\t" \ "psrad %3, %%xmm1 \n\t" \ "packssdw %%xmm3, %%xmm1 \n\t" \ "movdqa %%xmm1, " #i "(%4) \n\t" "movdqa (%2), %%xmm6 \n\t" FDCT_ROW_SSE2_H1(0,0) FDCT_ROW_SSE2(0) FDCT_ROW_SSE2_H2(64,0) FDCT_ROW_SSE2(64) FDCT_ROW_SSE2_H1(16,64) FDCT_ROW_SSE2(16) FDCT_ROW_SSE2_H2(112,64) FDCT_ROW_SSE2(112) FDCT_ROW_SSE2_H1(32,128) FDCT_ROW_SSE2(32) FDCT_ROW_SSE2_H2(96,128) FDCT_ROW_SSE2(96) FDCT_ROW_SSE2_H1(48,192) FDCT_ROW_SSE2(48) FDCT_ROW_SSE2_H2(80,192) FDCT_ROW_SSE2(80) : : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2), "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out) ); } static av_always_inline void fdct_row_mmx2(const int16_t *in, int16_t *out, const int16_t *table) { __asm__ volatile ( "pshufw $0x1B, 8(%0), %%mm5 \n\t" "movq (%0), %%mm0 \n\t" "movq %%mm0, %%mm1 \n\t" "paddsw %%mm5, %%mm0 \n\t" "psubsw %%mm5, %%mm1 \n\t" "movq %%mm0, %%mm2 \n\t" "punpckldq %%mm1, %%mm0 \n\t" "punpckhdq %%mm1, %%mm2 \n\t" "movq (%1), %%mm1 \n\t" "movq 8(%1), %%mm3 \n\t" "movq 16(%1), %%mm4 \n\t" "movq 24(%1), %%mm5 \n\t" "movq 32(%1), %%mm6 \n\t" "movq 40(%1), %%mm7 \n\t" "pmaddwd %%mm0, %%mm1 \n\t" "pmaddwd %%mm2, %%mm3 \n\t" "pmaddwd %%mm0, %%mm4 \n\t" "pmaddwd %%mm2, %%mm5 \n\t" "pmaddwd %%mm0, %%mm6 \n\t" "pmaddwd %%mm2, %%mm7 \n\t" "pmaddwd 48(%1), %%mm0 \n\t" "pmaddwd 56(%1), %%mm2 \n\t" "paddd %%mm1, %%mm3 \n\t" "paddd %%mm4, %%mm5 \n\t" "paddd %%mm6, %%mm7 \n\t" "paddd %%mm0, %%mm2 \n\t" "movq (%2), %%mm0 \n\t" "paddd %%mm0, %%mm3 \n\t" "paddd %%mm0, %%mm5 \n\t" "paddd %%mm0, %%mm7 \n\t" "paddd %%mm0, %%mm2 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t" "packssdw %%mm5, %%mm3 \n\t" "packssdw %%mm2, %%mm7 \n\t" "movq %%mm3, (%3) \n\t" "movq %%mm7, 8(%3) \n\t" : : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out)); } static av_always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table) { //FIXME reorder (I do not have an old MMX-only CPU here to benchmark ...) __asm__ volatile( "movd 12(%0), %%mm1 \n\t" "punpcklwd 8(%0), %%mm1 \n\t" "movq %%mm1, %%mm2 \n\t" "psrlq $0x20, %%mm1 \n\t" "movq 0(%0), %%mm0 \n\t" "punpcklwd %%mm2, %%mm1 \n\t" "movq %%mm0, %%mm5 \n\t" "paddsw %%mm1, %%mm0 \n\t" "psubsw %%mm1, %%mm5 \n\t" "movq %%mm0, %%mm2 \n\t" "punpckldq %%mm5, %%mm0 \n\t" "punpckhdq %%mm5, %%mm2 \n\t" "movq 0(%1), %%mm1 \n\t" "movq 8(%1), %%mm3 \n\t" "movq 16(%1), %%mm4 \n\t" "movq 24(%1), %%mm5 \n\t" "movq 32(%1), %%mm6 \n\t" "movq 40(%1), %%mm7 \n\t" "pmaddwd %%mm0, %%mm1 \n\t" "pmaddwd %%mm2, %%mm3 \n\t" "pmaddwd %%mm0, %%mm4 \n\t" "pmaddwd %%mm2, %%mm5 \n\t" "pmaddwd %%mm0, %%mm6 \n\t" "pmaddwd %%mm2, %%mm7 \n\t" "pmaddwd 48(%1), %%mm0 \n\t" "pmaddwd 56(%1), %%mm2 \n\t" "paddd %%mm1, %%mm3 \n\t" "paddd %%mm4, %%mm5 \n\t" "paddd %%mm6, %%mm7 \n\t" "paddd %%mm0, %%mm2 \n\t" "movq (%2), %%mm0 \n\t" "paddd %%mm0, %%mm3 \n\t" "paddd %%mm0, %%mm5 \n\t" "paddd %%mm0, %%mm7 \n\t" "paddd %%mm0, %%mm2 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t" "psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t" "packssdw %%mm5, %%mm3 \n\t" "packssdw %%mm2, %%mm7 \n\t" "movq %%mm3, 0(%3) \n\t" "movq %%mm7, 8(%3) \n\t" : : "r" (in), "r" (table), "r" (fdct_r_row), "r" (out)); } void ff_fdct_mmx(int16_t *block) { DECLARE_ALIGNED(8, int64_t, align_tmp)[16]; int16_t * block1= (int16_t*)align_tmp; const int16_t *table= tab_frw_01234567; int i; fdct_col_mmx(block, block1, 0); fdct_col_mmx(block, block1, 4); for(i=8;i>0;i--) { fdct_row_mmx(block1, block, table); block1 += 8; table += 32; block += 8; } } void ff_fdct_mmx2(int16_t *block) { DECLARE_ALIGNED(8, int64_t, align_tmp)[16]; int16_t *block1= (int16_t*)align_tmp; const int16_t *table= tab_frw_01234567; int i; fdct_col_mmx(block, block1, 0); fdct_col_mmx(block, block1, 4); for(i=8;i>0;i--) { fdct_row_mmx2(block1, block, table); block1 += 8; table += 32; block += 8; } } void ff_fdct_sse2(int16_t *block) { DECLARE_ALIGNED(16, int64_t, align_tmp)[16]; int16_t * const block1= (int16_t*)align_tmp; fdct_col_sse2(block, block1, 0); fdct_row_sse2(block1, block); }
123linslouis-android-video-cutter
jni/libavcodec/x86/fdct_mmx.c
C
asf20
18,081
/* * FFT/MDCT transform with SSE optimizations * Copyright (c) 2008 Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "fft.h" DECLARE_ALIGNED(16, static const int, m1m1m1m1)[4] = { 1 << 31, 1 << 31, 1 << 31, 1 << 31 }; void ff_fft_dispatch_sse(FFTComplex *z, int nbits); void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits); void ff_fft_calc_sse(FFTContext *s, FFTComplex *z) { int n = 1 << s->nbits; ff_fft_dispatch_interleave_sse(z, s->nbits); if(n <= 16) { x86_reg i = -8*n; __asm__ volatile( "1: \n" "movaps (%0,%1), %%xmm0 \n" "movaps %%xmm0, %%xmm1 \n" "unpcklps 16(%0,%1), %%xmm0 \n" "unpckhps 16(%0,%1), %%xmm1 \n" "movaps %%xmm0, (%0,%1) \n" "movaps %%xmm1, 16(%0,%1) \n" "add $32, %0 \n" "jl 1b \n" :"+r"(i) :"r"(z+n) :"memory" ); } } void ff_fft_permute_sse(FFTContext *s, FFTComplex *z) { int n = 1 << s->nbits; int i; for(i=0; i<n; i+=2) { __asm__ volatile( "movaps %2, %%xmm0 \n" "movlps %%xmm0, %0 \n" "movhps %%xmm0, %1 \n" :"=m"(s->tmp_buf[s->revtab[i]]), "=m"(s->tmp_buf[s->revtab[i+1]]) :"m"(z[i]) ); } memcpy(z, s->tmp_buf, n*sizeof(FFTComplex)); } void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input) { av_unused x86_reg i, j, k, l; long n = 1 << s->mdct_bits; long n2 = n >> 1; long n4 = n >> 2; long n8 = n >> 3; const uint16_t *revtab = s->revtab + n8; const FFTSample *tcos = s->tcos; const FFTSample *tsin = s->tsin; FFTComplex *z = (FFTComplex *)output; /* pre rotation */ for(k=n8-2; k>=0; k-=2) { __asm__ volatile( "movaps (%2,%1,2), %%xmm0 \n" // { z[k].re, z[k].im, z[k+1].re, z[k+1].im } "movaps -16(%2,%0,2), %%xmm1 \n" // { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } "movaps %%xmm0, %%xmm2 \n" "shufps $0x88, %%xmm1, %%xmm0 \n" // { z[k].re, z[k+1].re, z[-k-2].re, z[-k-1].re } "shufps $0x77, %%xmm2, %%xmm1 \n" // { z[-k-1].im, z[-k-2].im, z[k+1].im, z[k].im } "movlps (%3,%1), %%xmm4 \n" "movlps (%4,%1), %%xmm5 \n" "movhps -8(%3,%0), %%xmm4 \n" // { cos[k], cos[k+1], cos[-k-2], cos[-k-1] } "movhps -8(%4,%0), %%xmm5 \n" // { sin[k], sin[k+1], sin[-k-2], sin[-k-1] } "movaps %%xmm0, %%xmm2 \n" "movaps %%xmm1, %%xmm3 \n" "mulps %%xmm5, %%xmm0 \n" // re*sin "mulps %%xmm4, %%xmm1 \n" // im*cos "mulps %%xmm4, %%xmm2 \n" // re*cos "mulps %%xmm5, %%xmm3 \n" // im*sin "subps %%xmm0, %%xmm1 \n" // -> re "addps %%xmm3, %%xmm2 \n" // -> im "movaps %%xmm1, %%xmm0 \n" "unpcklps %%xmm2, %%xmm1 \n" // { z[k], z[k+1] } "unpckhps %%xmm2, %%xmm0 \n" // { z[-k-2], z[-k-1] } ::"r"(-4*k), "r"(4*k), "r"(input+n4), "r"(tcos+n8), "r"(tsin+n8) ); #if ARCH_X86_64 // if we have enough regs, don't let gcc make the luts latency-bound // but if not, latency is faster than spilling __asm__("movlps %%xmm0, %0 \n" "movhps %%xmm0, %1 \n" "movlps %%xmm1, %2 \n" "movhps %%xmm1, %3 \n" :"=m"(z[revtab[-k-2]]), "=m"(z[revtab[-k-1]]), "=m"(z[revtab[ k ]]), "=m"(z[revtab[ k+1]]) ); #else __asm__("movlps %%xmm0, %0" :"=m"(z[revtab[-k-2]])); __asm__("movhps %%xmm0, %0" :"=m"(z[revtab[-k-1]])); __asm__("movlps %%xmm1, %0" :"=m"(z[revtab[ k ]])); __asm__("movhps %%xmm1, %0" :"=m"(z[revtab[ k+1]])); #endif } ff_fft_dispatch_sse(z, s->nbits); /* post rotation + reinterleave + reorder */ #define CMUL(j,xmm0,xmm1)\ "movaps (%2,"#j",2), %%xmm6 \n"\ "movaps 16(%2,"#j",2), "#xmm0"\n"\ "movaps %%xmm6, "#xmm1"\n"\ "movaps "#xmm0",%%xmm7 \n"\ "mulps (%3,"#j"), %%xmm6 \n"\ "mulps (%4,"#j"), "#xmm0"\n"\ "mulps (%4,"#j"), "#xmm1"\n"\ "mulps (%3,"#j"), %%xmm7 \n"\ "subps %%xmm6, "#xmm0"\n"\ "addps %%xmm7, "#xmm1"\n" j = -n2; k = n2-16; __asm__ volatile( "1: \n" CMUL(%0, %%xmm0, %%xmm1) CMUL(%1, %%xmm4, %%xmm5) "shufps $0x1b, %%xmm1, %%xmm1 \n" "shufps $0x1b, %%xmm5, %%xmm5 \n" "movaps %%xmm4, %%xmm6 \n" "unpckhps %%xmm1, %%xmm4 \n" "unpcklps %%xmm1, %%xmm6 \n" "movaps %%xmm0, %%xmm2 \n" "unpcklps %%xmm5, %%xmm0 \n" "unpckhps %%xmm5, %%xmm2 \n" "movaps %%xmm6, (%2,%1,2) \n" "movaps %%xmm4, 16(%2,%1,2) \n" "movaps %%xmm0, (%2,%0,2) \n" "movaps %%xmm2, 16(%2,%0,2) \n" "sub $16, %1 \n" "add $16, %0 \n" "jl 1b \n" :"+&r"(j), "+&r"(k) :"r"(z+n8), "r"(tcos+n8), "r"(tsin+n8) :"memory" ); } void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input) { x86_reg j, k; long n = 1 << s->mdct_bits; long n4 = n >> 2; ff_imdct_half_sse(s, output+n4, input); j = -n; k = n-16; __asm__ volatile( "movaps %4, %%xmm7 \n" "1: \n" "movaps (%2,%1), %%xmm0 \n" "movaps (%3,%0), %%xmm1 \n" "shufps $0x1b, %%xmm0, %%xmm0 \n" "shufps $0x1b, %%xmm1, %%xmm1 \n" "xorps %%xmm7, %%xmm0 \n" "movaps %%xmm1, (%3,%1) \n" "movaps %%xmm0, (%2,%0) \n" "sub $16, %1 \n" "add $16, %0 \n" "jl 1b \n" :"+r"(j), "+r"(k) :"r"(output+n4), "r"(output+n4*3), "m"(*m1m1m1m1) ); }
123linslouis-android-video-cutter
jni/libavcodec/x86/fft_sse.c
C
asf20
6,917
/* * vp3dsp MMX function declarations * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_VP3DSP_MMX_H #define AVCODEC_X86_VP3DSP_MMX_H #include <stdint.h> #include "libavcodec/dsputil.h" void ff_vp3_idct_mmx(int16_t *data); void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block); void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block); void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block); void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); #endif /* AVCODEC_X86_VP3DSP_MMX_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/vp3dsp_mmx.h
C
asf20
1,443
;***************************************************************************** ;* MMX/SSE2-optimized H.264 deblocking code ;***************************************************************************** ;* Copyright (C) 2005-2008 x264 project ;* ;* Authors: Loren Merritt <lorenm@u.washington.edu> ;* ;* This program is free software; you can redistribute it and/or modify ;* it under the terms of the GNU General Public License as published by ;* the Free Software Foundation; either version 2 of the License, or ;* (at your option) any later version. ;* ;* This program is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;* GNU General Public License for more details. ;* ;* You should have received a copy of the GNU General Public License ;* along with this program; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. ;***************************************************************************** %include "x86inc.asm" SECTION_RODATA pb_00: times 16 db 0x00 pb_01: times 16 db 0x01 pb_03: times 16 db 0x03 pb_a1: times 16 db 0xa1 SECTION .text ; expands to [base],...,[base+7*stride] %define PASS8ROWS(base, base3, stride, stride3) \ [base], [base+stride], [base+stride*2], [base3], \ [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4] ; in: 8 rows of 4 bytes in %1..%8 ; out: 4 rows of 8 bytes in m0..m3 %macro TRANSPOSE4x8_LOAD 8 movd m0, %1 movd m2, %2 movd m1, %3 movd m3, %4 punpcklbw m0, m2 punpcklbw m1, m3 movq m2, m0 punpcklwd m0, m1 punpckhwd m2, m1 movd m4, %5 movd m6, %6 movd m5, %7 movd m7, %8 punpcklbw m4, m6 punpcklbw m5, m7 movq m6, m4 punpcklwd m4, m5 punpckhwd m6, m5 movq m1, m0 movq m3, m2 punpckldq m0, m4 punpckhdq m1, m4 punpckldq m2, m6 punpckhdq m3, m6 %endmacro ; in: 4 rows of 8 bytes in m0..m3 ; out: 8 rows of 4 bytes in %1..%8 %macro TRANSPOSE8x4_STORE 8 movq m4, m0 movq m5, m1 movq m6, m2 punpckhdq m4, m4 punpckhdq m5, m5 punpckhdq m6, m6 punpcklbw m0, m1 punpcklbw m2, m3 movq m1, m0 punpcklwd m0, m2 punpckhwd m1, m2 movd %1, m0 punpckhdq m0, m0 movd %2, m0 movd %3, m1 punpckhdq m1, m1 movd %4, m1 punpckhdq m3, m3 punpcklbw m4, m5 punpcklbw m6, m3 movq m5, m4 punpcklwd m4, m6 punpckhwd m5, m6 movd %5, m4 punpckhdq m4, m4 movd %6, m4 movd %7, m5 punpckhdq m5, m5 movd %8, m5 %endmacro %macro SBUTTERFLY 4 movq %4, %2 punpckl%1 %2, %3 punpckh%1 %4, %3 %endmacro ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8 ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16] %macro TRANSPOSE6x8_MEM 9 movq m0, %1 movq m1, %2 movq m2, %3 movq m3, %4 movq m4, %5 movq m5, %6 movq m6, %7 SBUTTERFLY bw, m0, m1, m7 SBUTTERFLY bw, m2, m3, m1 SBUTTERFLY bw, m4, m5, m3 movq [%9+0x10], m1 SBUTTERFLY bw, m6, %8, m5 SBUTTERFLY wd, m0, m2, m1 SBUTTERFLY wd, m4, m6, m2 punpckhdq m0, m4 movq [%9+0x00], m0 SBUTTERFLY wd, m7, [%9+0x10], m6 SBUTTERFLY wd, m3, m5, m4 SBUTTERFLY dq, m7, m3, m0 SBUTTERFLY dq, m1, m2, m5 punpckldq m6, m4 movq [%9+0x10], m1 movq [%9+0x20], m5 movq [%9+0x30], m7 movq [%9+0x40], m0 movq [%9+0x50], m6 %endmacro ; in: 8 rows of 8 in %1..%8 ; out: 8 rows of 8 in %9..%16 %macro TRANSPOSE8x8_MEM 16 movq m0, %1 movq m1, %2 movq m2, %3 movq m3, %4 movq m4, %5 movq m5, %6 movq m6, %7 SBUTTERFLY bw, m0, m1, m7 SBUTTERFLY bw, m2, m3, m1 SBUTTERFLY bw, m4, m5, m3 SBUTTERFLY bw, m6, %8, m5 movq %9, m3 SBUTTERFLY wd, m0, m2, m3 SBUTTERFLY wd, m4, m6, m2 SBUTTERFLY wd, m7, m1, m6 movq %11, m2 movq m2, %9 SBUTTERFLY wd, m2, m5, m1 SBUTTERFLY dq, m0, m4, m5 SBUTTERFLY dq, m7, m2, m4 movq %9, m0 movq %10, m5 movq %13, m7 movq %14, m4 SBUTTERFLY dq, m3, %11, m0 SBUTTERFLY dq, m6, m1, m5 movq %11, m3 movq %12, m0 movq %15, m6 movq %16, m5 %endmacro ; out: %4 = |%1-%2|>%3 ; clobbers: %5 %macro DIFF_GT 5 mova %5, %2 mova %4, %1 psubusb %5, %1 psubusb %4, %2 por %4, %5 psubusb %4, %3 %endmacro ; out: %4 = |%1-%2|>%3 ; clobbers: %5 %macro DIFF_GT2 5 mova %5, %2 mova %4, %1 psubusb %5, %1 psubusb %4, %2 psubusb %5, %3 psubusb %4, %3 pcmpeqb %4, %5 %endmacro %macro SPLATW 1 %ifidn m0, xmm0 pshuflw %1, %1, 0 punpcklqdq %1, %1 %else pshufw %1, %1, 0 %endif %endmacro ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1 ; out: m5=beta-1, m7=mask, %3=alpha-1 ; clobbers: m4,m6 %macro LOAD_MASK 2-3 movd m4, %1 movd m5, %2 SPLATW m4 SPLATW m5 packuswb m4, m4 ; 16x alpha-1 packuswb m5, m5 ; 16x beta-1 %if %0>2 mova %3, m4 %endif DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1 DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1 por m7, m4 DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1 por m7, m4 pxor m6, m6 pcmpeqb m7, m6 %endmacro ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask) ; out: m1=p0' m2=q0' ; clobbers: m0,3-6 %macro DEBLOCK_P0_Q0 0 mova m5, m1 pxor m5, m2 ; p0^q0 pand m5, [pb_01 GLOBAL] ; (p0^q0)&1 pcmpeqb m4, m4 pxor m3, m4 pavgb m3, m0 ; (p1 - q1 + 256)>>1 pavgb m3, [pb_03 GLOBAL] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2 pxor m4, m1 pavgb m4, m2 ; (q0 - p0 + 256)>>1 pavgb m3, m5 paddusb m3, m4 ; d+128+33 mova m6, [pb_a1 GLOBAL] psubusb m6, m3 psubusb m3, [pb_a1 GLOBAL] pminub m6, m7 pminub m3, m7 psubusb m1, m6 psubusb m2, m3 paddusb m1, m3 paddusb m2, m6 %endmacro ; in: m1=p0 m2=q0 ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 ) ; clobbers: q2, tmp, tc0 %macro LUMA_Q1 6 mova %6, m1 pavgb %6, m2 pavgb %2, %6 ; avg(p2,avg(p0,q0)) pxor %6, %3 pand %6, [pb_01 GLOBAL] ; (p2^avg(p0,q0))&1 psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1 mova %6, %1 psubusb %6, %5 paddusb %5, %1 pmaxub %2, %6 pminub %2, %5 mova %4, %2 %endmacro %ifdef ARCH_X86_64 ;----------------------------------------------------------------------------- ; void x264_deblock_v_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) ;----------------------------------------------------------------------------- INIT_XMM cglobal x264_deblock_v_luma_sse2, 5,5,10 movd m8, [r4] ; tc0 lea r4, [r1*3] dec r2d ; alpha-1 neg r4 dec r3d ; beta-1 add r4, r0 ; pix-3*stride mova m0, [r4+r1] ; p1 mova m1, [r4+2*r1] ; p0 mova m2, [r0] ; q0 mova m3, [r0+r1] ; q1 LOAD_MASK r2d, r3d punpcklbw m8, m8 punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0] pcmpeqb m9, m9 pcmpeqb m9, m8 pandn m9, m7 pand m8, m9 movdqa m3, [r4] ; p2 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1 pand m6, m9 mova m7, m8 psubb m7, m6 pand m6, m8 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4 movdqa m4, [r0+2*r1] ; q2 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1 pand m6, m9 pand m8, m6 psubb m7, m6 mova m3, [r0+r1] LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6 DEBLOCK_P0_Q0 mova [r4+2*r1], m1 mova [r0], m2 RET ;----------------------------------------------------------------------------- ; void x264_deblock_h_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) ;----------------------------------------------------------------------------- INIT_MMX cglobal x264_deblock_h_luma_sse2, 5,7 movsxd r10, r1d lea r11, [r10+r10*2] lea r6, [r0-4] lea r5, [r0-4+r11] %ifdef WIN64 sub rsp, 0x98 %define pix_tmp rsp+0x30 %else sub rsp, 0x68 %define pix_tmp rsp %endif ; transpose 6x16 -> tmp space TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r10, r11), pix_tmp lea r6, [r6+r10*8] lea r5, [r5+r10*8] TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r10, r11), pix_tmp+8 ; vertical filter ; alpha, beta, tc0 are still in r2d, r3d, r4 ; don't backup r6, r5, r10, r11 because x264_deblock_v_luma_sse2 doesn't use them lea r0, [pix_tmp+0x30] mov r1d, 0x10 %ifdef WIN64 mov [rsp+0x20], r4 %endif call x264_deblock_v_luma_sse2 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter) add r6, 2 add r5, 2 movq m0, [pix_tmp+0x18] movq m1, [pix_tmp+0x28] movq m2, [pix_tmp+0x38] movq m3, [pix_tmp+0x48] TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11) shl r10, 3 sub r6, r10 sub r5, r10 shr r10, 3 movq m0, [pix_tmp+0x10] movq m1, [pix_tmp+0x20] movq m2, [pix_tmp+0x30] movq m3, [pix_tmp+0x40] TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11) %ifdef WIN64 add rsp, 0x98 %else add rsp, 0x68 %endif RET %else %macro DEBLOCK_LUMA 3 ;----------------------------------------------------------------------------- ; void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) ;----------------------------------------------------------------------------- cglobal x264_deblock_%2_luma_%1, 5,5 lea r4, [r1*3] dec r2 ; alpha-1 neg r4 dec r3 ; beta-1 add r4, r0 ; pix-3*stride %assign pad 2*%3+12-(stack_offset&15) SUB esp, pad mova m0, [r4+r1] ; p1 mova m1, [r4+2*r1] ; p0 mova m2, [r0] ; q0 mova m3, [r0+r1] ; q1 LOAD_MASK r2, r3 mov r3, r4mp movd m4, [r3] ; tc0 punpcklbw m4, m4 punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0] mova [esp+%3], m4 ; tc pcmpeqb m3, m3 pcmpgtb m4, m3 pand m4, m7 mova [esp], m4 ; mask mova m3, [r4] ; p2 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1 pand m6, m4 pand m4, [esp+%3] ; tc mova m7, m4 psubb m7, m6 pand m6, m4 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4 mova m4, [r0+2*r1] ; q2 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1 mova m5, [esp] ; mask pand m6, m5 mova m5, [esp+%3] ; tc pand m5, m6 psubb m7, m6 mova m3, [r0+r1] LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6 DEBLOCK_P0_Q0 mova [r4+2*r1], m1 mova [r0], m2 ADD esp, pad RET ;----------------------------------------------------------------------------- ; void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) ;----------------------------------------------------------------------------- INIT_MMX cglobal x264_deblock_h_luma_%1, 0,5 mov r0, r0mp mov r3, r1m lea r4, [r3*3] sub r0, 4 lea r1, [r0+r4] %assign pad 0x78-(stack_offset&15) SUB esp, pad %define pix_tmp esp+12 ; transpose 6x16 -> tmp space TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp lea r0, [r0+r3*8] lea r1, [r1+r3*8] TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8 ; vertical filter lea r0, [pix_tmp+0x30] PUSH dword r4m PUSH dword r3m PUSH dword r2m PUSH dword 16 PUSH dword r0 call x264_deblock_%2_luma_%1 %ifidn %2, v8 add dword [esp ], 8 ; pix_tmp+0x38 add dword [esp+16], 2 ; tc0+2 call x264_deblock_%2_luma_%1 %endif ADD esp, 20 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter) mov r0, r0mp sub r0, 2 lea r1, [r0+r4] movq m0, [pix_tmp+0x10] movq m1, [pix_tmp+0x20] movq m2, [pix_tmp+0x30] movq m3, [pix_tmp+0x40] TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4) lea r0, [r0+r3*8] lea r1, [r1+r3*8] movq m0, [pix_tmp+0x18] movq m1, [pix_tmp+0x28] movq m2, [pix_tmp+0x38] movq m3, [pix_tmp+0x48] TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4) ADD esp, pad RET %endmacro ; DEBLOCK_LUMA INIT_XMM DEBLOCK_LUMA sse2, v, 16 %endif ; ARCH %macro LUMA_INTRA_P012 4 ; p0..p3 in memory mova t0, p2 mova t1, p0 pavgb t0, p1 pavgb t1, q0 pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2 mova t5, t1 mova t2, p2 mova t3, p0 paddb t2, p1 paddb t3, q0 paddb t2, t3 mova t3, t2 mova t4, t2 psrlw t2, 1 pavgb t2, mpb_00 pxor t2, t0 pand t2, mpb_01 psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4; mova t1, p2 mova t2, p2 pavgb t1, q1 psubb t2, q1 paddb t3, t3 psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1 pand t2, mpb_01 psubb t1, t2 pavgb t1, p1 pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2 psrlw t3, 2 pavgb t3, mpb_00 pxor t3, t1 pand t3, mpb_01 psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8 mova t3, p0 mova t2, p0 pxor t3, q1 pavgb t2, q1 pand t3, mpb_01 psubb t2, t3 pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4 pxor t1, t2 pxor t2, p0 pand t1, mask1p pand t2, mask0 pxor t1, t2 pxor t1, p0 mova %1, t1 ; store p0 mova t1, %4 ; p3 mova t2, t1 pavgb t1, p2 paddb t2, p2 pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4 paddb t2, t2 paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0 psrlw t2, 2 pavgb t2, mpb_00 pxor t2, t1 pand t2, mpb_01 psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8 pxor t0, p1 pxor t1, p2 pand t0, mask1p pand t1, mask1p pxor t0, p1 pxor t1, p2 mova %2, t0 ; store p1 mova %3, t1 ; store p2 %endmacro %macro LUMA_INTRA_SWAP_PQ 0 %define q1 m0 %define q0 m1 %define p0 m2 %define p1 m3 %define p2 q2 %define mask1p mask1q %endmacro %macro DEBLOCK_LUMA_INTRA 2 %define p1 m0 %define p0 m1 %define q0 m2 %define q1 m3 %define t0 m4 %define t1 m5 %define t2 m6 %define t3 m7 %ifdef ARCH_X86_64 %define p2 m8 %define q2 m9 %define t4 m10 %define t5 m11 %define mask0 m12 %define mask1p m13 %define mask1q [rsp-24] %define mpb_00 m14 %define mpb_01 m15 %else %define spill(x) [esp+16*x+((stack_offset+4)&15)] %define p2 [r4+r1] %define q2 [r0+2*r1] %define t4 spill(0) %define t5 spill(1) %define mask0 spill(2) %define mask1p spill(3) %define mask1q spill(4) %define mpb_00 [pb_00 GLOBAL] %define mpb_01 [pb_01 GLOBAL] %endif ;----------------------------------------------------------------------------- ; void x264_deblock_v_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta ) ;----------------------------------------------------------------------------- cglobal x264_deblock_%2_luma_intra_%1, 4,6,16 %ifndef ARCH_X86_64 sub esp, 0x60 %endif lea r4, [r1*4] lea r5, [r1*3] ; 3*stride dec r2d ; alpha-1 jl .end neg r4 dec r3d ; beta-1 jl .end add r4, r0 ; pix-4*stride mova p1, [r4+2*r1] mova p0, [r4+r5] mova q0, [r0] mova q1, [r0+r1] %ifdef ARCH_X86_64 pxor mpb_00, mpb_00 mova mpb_01, [pb_01 GLOBAL] LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0 SWAP 7, 12 ; m12=mask0 pavgb t5, mpb_00 pavgb t5, mpb_01 ; alpha/4+1 movdqa p2, [r4+r1] movdqa q2, [r0+2*r1] DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1 DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1 DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1 pand t0, mask0 pand t4, t0 pand t2, t0 mova mask1q, t4 mova mask1p, t2 %else LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0 mova m4, t5 mova mask0, m7 pavgb m4, [pb_00 GLOBAL] pavgb m4, [pb_01 GLOBAL] ; alpha/4+1 DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1 pand m6, mask0 DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1 pand m4, m6 mova mask1p, m4 DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1 pand m4, m6 mova mask1q, m4 %endif LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4] LUMA_INTRA_SWAP_PQ LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5] .end: %ifndef ARCH_X86_64 add esp, 0x60 %endif RET INIT_MMX %ifdef ARCH_X86_64 ;----------------------------------------------------------------------------- ; void x264_deblock_h_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta ) ;----------------------------------------------------------------------------- cglobal x264_deblock_h_luma_intra_%1, 4,7 movsxd r10, r1d lea r11, [r10*3] lea r6, [r0-4] lea r5, [r0-4+r11] sub rsp, 0x88 %define pix_tmp rsp ; transpose 8x16 -> tmp space TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r10, r11), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30) lea r6, [r6+r10*8] lea r5, [r5+r10*8] TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r10, r11), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30) lea r0, [pix_tmp+0x40] mov r1, 0x10 call x264_deblock_v_luma_intra_%1 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8) lea r5, [r6+r11] TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r10, r11) shl r10, 3 sub r6, r10 sub r5, r10 shr r10, 3 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r10, r11) add rsp, 0x88 RET %else cglobal x264_deblock_h_luma_intra_%1, 2,4 lea r3, [r1*3] sub r0, 4 lea r2, [r0+r3] %assign pad 0x8c-(stack_offset&15) SUB rsp, pad %define pix_tmp rsp ; transpose 8x16 -> tmp space TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30) lea r0, [r0+r1*8] lea r2, [r2+r1*8] TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30) lea r0, [pix_tmp+0x40] PUSH dword r3m PUSH dword r2m PUSH dword 16 PUSH r0 call x264_deblock_%2_luma_intra_%1 %ifidn %2, v8 add dword [rsp], 8 ; pix_tmp+8 call x264_deblock_%2_luma_intra_%1 %endif ADD esp, 16 mov r1, r1m mov r0, r0mp lea r3, [r1*3] sub r0, 4 lea r2, [r0+r3] ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8) TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3) lea r0, [r0+r1*8] lea r2, [r2+r1*8] TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3) ADD rsp, pad RET %endif ; ARCH_X86_64 %endmacro ; DEBLOCK_LUMA_INTRA INIT_XMM DEBLOCK_LUMA_INTRA sse2, v %ifndef ARCH_X86_64 INIT_MMX DEBLOCK_LUMA_INTRA mmxext, v8 %endif
123linslouis-android-video-cutter
jni/libavcodec/x86/h264_deblock_sse2.asm
Assembly
asf20
19,758
/* * FFT/MDCT transform with 3DNow! optimizations * Copyright (c) 2008 Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define EMULATE_3DNOWEXT #include "fft_3dn2.c"
123linslouis-android-video-cutter
jni/libavcodec/x86/fft_3dn.c
C
asf20
898
/* * FFT/MDCT transform with Extended 3DNow! optimizations * Copyright (c) 2006-2008 Zuxy MENG Jie, Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "fft.h" DECLARE_ALIGNED(8, static const int, m1m1)[2] = { 1<<31, 1<<31 }; #ifdef EMULATE_3DNOWEXT #define PSWAPD(s,d)\ "movq "#s","#d"\n"\ "psrlq $32,"#d"\n"\ "punpckldq "#s","#d"\n" #define ff_fft_calc_3dn2 ff_fft_calc_3dn #define ff_fft_dispatch_3dn2 ff_fft_dispatch_3dn #define ff_fft_dispatch_interleave_3dn2 ff_fft_dispatch_interleave_3dn #define ff_imdct_calc_3dn2 ff_imdct_calc_3dn #define ff_imdct_half_3dn2 ff_imdct_half_3dn #else #define PSWAPD(s,d) "pswapd "#s","#d"\n" #endif void ff_fft_dispatch_3dn2(FFTComplex *z, int nbits); void ff_fft_dispatch_interleave_3dn2(FFTComplex *z, int nbits); void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z) { int n = 1<<s->nbits; int i; ff_fft_dispatch_interleave_3dn2(z, s->nbits); __asm__ volatile("femms"); if(n <= 8) for(i=0; i<n; i+=2) FFSWAP(FFTSample, z[i].im, z[i+1].re); } void ff_imdct_half_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input) { x86_reg j, k; long n = 1 << s->mdct_bits; long n2 = n >> 1; long n4 = n >> 2; long n8 = n >> 3; const uint16_t *revtab = s->revtab; const FFTSample *tcos = s->tcos; const FFTSample *tsin = s->tsin; const FFTSample *in1, *in2; FFTComplex *z = (FFTComplex *)output; /* pre rotation */ in1 = input; in2 = input + n2 - 1; #ifdef EMULATE_3DNOWEXT __asm__ volatile("movd %0, %%mm7" ::"r"(1<<31)); #endif for(k = 0; k < n4; k++) { // FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it __asm__ volatile( "movd %0, %%mm0 \n" "movd %2, %%mm1 \n" "punpckldq %1, %%mm0 \n" "punpckldq %3, %%mm1 \n" "movq %%mm0, %%mm2 \n" PSWAPD( %%mm1, %%mm3 ) "pfmul %%mm1, %%mm0 \n" "pfmul %%mm3, %%mm2 \n" #ifdef EMULATE_3DNOWEXT "movq %%mm0, %%mm1 \n" "punpckhdq %%mm2, %%mm0 \n" "punpckldq %%mm2, %%mm1 \n" "pxor %%mm7, %%mm0 \n" "pfadd %%mm1, %%mm0 \n" #else "pfpnacc %%mm2, %%mm0 \n" #endif ::"m"(in2[-2*k]), "m"(in1[2*k]), "m"(tcos[k]), "m"(tsin[k]) ); __asm__ volatile( "movq %%mm0, %0 \n\t" :"=m"(z[revtab[k]]) ); } ff_fft_dispatch_3dn2(z, s->nbits); #define CMUL(j,mm0,mm1)\ "movq (%2,"#j",2), %%mm6 \n"\ "movq 8(%2,"#j",2), "#mm0"\n"\ "movq %%mm6, "#mm1"\n"\ "movq "#mm0",%%mm7 \n"\ "pfmul (%3,"#j"), %%mm6 \n"\ "pfmul (%4,"#j"), "#mm0"\n"\ "pfmul (%4,"#j"), "#mm1"\n"\ "pfmul (%3,"#j"), %%mm7 \n"\ "pfsub %%mm6, "#mm0"\n"\ "pfadd %%mm7, "#mm1"\n" /* post rotation */ j = -n2; k = n2-8; __asm__ volatile( "1: \n" CMUL(%0, %%mm0, %%mm1) CMUL(%1, %%mm2, %%mm3) "movd %%mm0, (%2,%0,2) \n" "movd %%mm1,12(%2,%1,2) \n" "movd %%mm2, (%2,%1,2) \n" "movd %%mm3,12(%2,%0,2) \n" "psrlq $32, %%mm0 \n" "psrlq $32, %%mm1 \n" "psrlq $32, %%mm2 \n" "psrlq $32, %%mm3 \n" "movd %%mm0, 8(%2,%0,2) \n" "movd %%mm1, 4(%2,%1,2) \n" "movd %%mm2, 8(%2,%1,2) \n" "movd %%mm3, 4(%2,%0,2) \n" "sub $8, %1 \n" "add $8, %0 \n" "jl 1b \n" :"+r"(j), "+r"(k) :"r"(z+n8), "r"(tcos+n8), "r"(tsin+n8) :"memory" ); __asm__ volatile("femms"); } void ff_imdct_calc_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input) { x86_reg j, k; long n = 1 << s->mdct_bits; long n4 = n >> 2; ff_imdct_half_3dn2(s, output+n4, input); j = -n; k = n-8; __asm__ volatile( "movq %4, %%mm7 \n" "1: \n" PSWAPD((%2,%1), %%mm0) PSWAPD((%3,%0), %%mm1) "pxor %%mm7, %%mm0 \n" "movq %%mm1, (%3,%1) \n" "movq %%mm0, (%2,%0) \n" "sub $8, %1 \n" "add $8, %0 \n" "jl 1b \n" :"+r"(j), "+r"(k) :"r"(output+n4), "r"(output+n4*3), "m"(*m1m1) ); __asm__ volatile("femms"); }
123linslouis-android-video-cutter
jni/libavcodec/x86/fft_3dn2.c
C
asf20
5,236
/* * XVID MPEG-4 VIDEO CODEC * - MMX and XMM forward discrete cosine transform - * * Copyright(C) 2001 Peter Ross <pross@xvid.org> * * Originally provided by Intel at AP-922 * http://developer.intel.com/vtune/cbts/strmsimd/922down.htm * (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm) * but in a limited edition. * New macro implements a column part for precise iDCT * The routine precision now satisfies IEEE standard 1180-1990. * * Copyright(C) 2000-2001 Peter Gubanov <peter@elecard.net.ru> * Rounding trick Copyright(C) 2000 Michel Lespinasse <walken@zoy.org> * * http://www.elecard.com/peter/idct.html * http://www.linuxvideo.org/mpeg2dec/ * * These examples contain code fragments for first stage iDCT 8x8 * (for rows) and first stage DCT 8x8 (for columns) * * conversion to gcc syntax by Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with FFmpeg; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "libavcodec/avcodec.h" #include "idct_xvid.h" //============================================================================= // Macros and other preprocessor constants //============================================================================= #define BITS_INV_ACC 5 // 4 or 5 for IEEE #define SHIFT_INV_ROW (16 - BITS_INV_ACC) //11 #define SHIFT_INV_COL (1 + BITS_INV_ACC) //6 #define RND_INV_ROW (1024 * (6 - BITS_INV_ACC)) #define RND_INV_COL (16 * (BITS_INV_ACC - 3)) #define RND_INV_CORR (RND_INV_COL - 1) #define BITS_FRW_ACC 3 // 2 or 3 for accuracy #define SHIFT_FRW_COL BITS_FRW_ACC #define SHIFT_FRW_ROW (BITS_FRW_ACC + 17) #define RND_FRW_ROW (262144*(BITS_FRW_ACC - 1)) //----------------------------------------------------------------------------- // Various memory constants (trigonometric values or rounding values) //----------------------------------------------------------------------------- DECLARE_ALIGNED(8, static const int16_t, tg_1_16)[4*4] = { 13036,13036,13036,13036, // tg * (2<<16) + 0.5 27146,27146,27146,27146, // tg * (2<<16) + 0.5 -21746,-21746,-21746,-21746, // tg * (2<<16) + 0.5 23170,23170,23170,23170}; // cos * (2<<15) + 0.5 DECLARE_ALIGNED(8, static const int32_t, rounder_0)[2*8] = { 65536,65536, 3597,3597, 2260,2260, 1203,1203, 0,0, 120,120, 512,512, 512,512}; //----------------------------------------------------------------------------- // // The first stage iDCT 8x8 - inverse DCTs of rows // //----------------------------------------------------------------------------- // The 8-point inverse DCT direct algorithm //----------------------------------------------------------------------------- // // static const short w[32] = { // FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16), // FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16), // FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16), // FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16), // FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16), // FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16), // FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16), // FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) }; // // #define DCT_8_INV_ROW(x, y) // { // int a0, a1, a2, a3, b0, b1, b2, b3; // // a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3]; // a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7]; // a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11]; // a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15]; // b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19]; // b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23]; // b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27]; // b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31]; // // y[0] = SHIFT_ROUND ( a0 + b0 ); // y[1] = SHIFT_ROUND ( a1 + b1 ); // y[2] = SHIFT_ROUND ( a2 + b2 ); // y[3] = SHIFT_ROUND ( a3 + b3 ); // y[4] = SHIFT_ROUND ( a3 - b3 ); // y[5] = SHIFT_ROUND ( a2 - b2 ); // y[6] = SHIFT_ROUND ( a1 - b1 ); // y[7] = SHIFT_ROUND ( a0 - b0 ); // } // //----------------------------------------------------------------------------- // // In this implementation the outputs of the iDCT-1D are multiplied // for rows 0,4 - by cos_4_16, // for rows 1,7 - by cos_1_16, // for rows 2,6 - by cos_2_16, // for rows 3,5 - by cos_3_16 // and are shifted to the left for better accuracy // // For the constants used, // FIX(float_const) = (short) (float_const * (1<<15) + 0.5) // //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // Tables for mmx processors //----------------------------------------------------------------------------- // Table for rows 0,4 - constants are multiplied by cos_4_16 DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx)[32*4] = { 16384,16384,16384,-16384, // movq-> w06 w04 w02 w00 21407,8867,8867,-21407, // w07 w05 w03 w01 16384,-16384,16384,16384, // w14 w12 w10 w08 -8867,21407,-21407,-8867, // w15 w13 w11 w09 22725,12873,19266,-22725, // w22 w20 w18 w16 19266,4520,-4520,-12873, // w23 w21 w19 w17 12873,4520,4520,19266, // w30 w28 w26 w24 -22725,19266,-12873,-22725, // w31 w29 w27 w25 // Table for rows 1,7 - constants are multiplied by cos_1_16 22725,22725,22725,-22725, // movq-> w06 w04 w02 w00 29692,12299,12299,-29692, // w07 w05 w03 w01 22725,-22725,22725,22725, // w14 w12 w10 w08 -12299,29692,-29692,-12299, // w15 w13 w11 w09 31521,17855,26722,-31521, // w22 w20 w18 w16 26722,6270,-6270,-17855, // w23 w21 w19 w17 17855,6270,6270,26722, // w30 w28 w26 w24 -31521,26722,-17855,-31521, // w31 w29 w27 w25 // Table for rows 2,6 - constants are multiplied by cos_2_16 21407,21407,21407,-21407, // movq-> w06 w04 w02 w00 27969,11585,11585,-27969, // w07 w05 w03 w01 21407,-21407,21407,21407, // w14 w12 w10 w08 -11585,27969,-27969,-11585, // w15 w13 w11 w09 29692,16819,25172,-29692, // w22 w20 w18 w16 25172,5906,-5906,-16819, // w23 w21 w19 w17 16819,5906,5906,25172, // w30 w28 w26 w24 -29692,25172,-16819,-29692, // w31 w29 w27 w25 // Table for rows 3,5 - constants are multiplied by cos_3_16 19266,19266,19266,-19266, // movq-> w06 w04 w02 w00 25172,10426,10426,-25172, // w07 w05 w03 w01 19266,-19266,19266,19266, // w14 w12 w10 w08 -10426,25172,-25172,-10426, // w15 w13 w11 w09 26722,15137,22654,-26722, // w22 w20 w18 w16 22654,5315,-5315,-15137, // w23 w21 w19 w17 15137,5315,5315,22654, // w30 w28 w26 w24 -26722,22654,-15137,-26722, // w31 w29 w27 w25 }; //----------------------------------------------------------------------------- // Tables for xmm processors //----------------------------------------------------------------------------- // %3 for rows 0,4 - constants are multiplied by cos_4_16 DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm)[32*4] = { 16384,21407,16384,8867, // movq-> w05 w04 w01 w00 16384,8867,-16384,-21407, // w07 w06 w03 w02 16384,-8867,16384,-21407, // w13 w12 w09 w08 -16384,21407,16384,-8867, // w15 w14 w11 w10 22725,19266,19266,-4520, // w21 w20 w17 w16 12873,4520,-22725,-12873, // w23 w22 w19 w18 12873,-22725,4520,-12873, // w29 w28 w25 w24 4520,19266,19266,-22725, // w31 w30 w27 w26 // %3 for rows 1,7 - constants are multiplied by cos_1_16 22725,29692,22725,12299, // movq-> w05 w04 w01 w00 22725,12299,-22725,-29692, // w07 w06 w03 w02 22725,-12299,22725,-29692, // w13 w12 w09 w08 -22725,29692,22725,-12299, // w15 w14 w11 w10 31521,26722,26722,-6270, // w21 w20 w17 w16 17855,6270,-31521,-17855, // w23 w22 w19 w18 17855,-31521,6270,-17855, // w29 w28 w25 w24 6270,26722,26722,-31521, // w31 w30 w27 w26 // %3 for rows 2,6 - constants are multiplied by cos_2_16 21407,27969,21407,11585, // movq-> w05 w04 w01 w00 21407,11585,-21407,-27969, // w07 w06 w03 w02 21407,-11585,21407,-27969, // w13 w12 w09 w08 -21407,27969,21407,-11585, // w15 w14 w11 w10 29692,25172,25172,-5906, // w21 w20 w17 w16 16819,5906,-29692,-16819, // w23 w22 w19 w18 16819,-29692,5906,-16819, // w29 w28 w25 w24 5906,25172,25172,-29692, // w31 w30 w27 w26 // %3 for rows 3,5 - constants are multiplied by cos_3_16 19266,25172,19266,10426, // movq-> w05 w04 w01 w00 19266,10426,-19266,-25172, // w07 w06 w03 w02 19266,-10426,19266,-25172, // w13 w12 w09 w08 -19266,25172,19266,-10426, // w15 w14 w11 w10 26722,22654,22654,-5315, // w21 w20 w17 w16 15137,5315,-26722,-15137, // w23 w22 w19 w18 15137,-26722,5315,-15137, // w29 w28 w25 w24 5315,22654,22654,-26722, // w31 w30 w27 w26 }; //============================================================================= // Helper macros for the code //============================================================================= //----------------------------------------------------------------------------- // DCT_8_INV_ROW_MMX( INP, OUT, TABLE, ROUNDER //----------------------------------------------------------------------------- #define DCT_8_INV_ROW_MMX(A1,A2,A3,A4)\ "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\ "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\ "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\ "movq " #A3 ",%%mm3 \n\t"/* 3 ; w06 w04 w02 w00*/\ "punpcklwd %%mm1,%%mm0 \n\t"/* x5 x1 x4 x0*/\ "movq %%mm0,%%mm5 \n\t"/* 5 ; x5 x1 x4 x0*/\ "punpckldq %%mm0,%%mm0 \n\t"/* x4 x0 x4 x0*/\ "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w05 w03 w01*/\ "punpckhwd %%mm1,%%mm2 \n\t"/* 1 ; x7 x3 x6 x2*/\ "pmaddwd %%mm0,%%mm3 \n\t"/* x4*w06+x0*w04 x4*w02+x0*w00*/\ "movq %%mm2,%%mm6 \n\t"/* 6 ; x7 x3 x6 x2*/\ "movq 32+" #A3 ",%%mm1 \n\t"/* 1 ; w22 w20 w18 w16*/\ "punpckldq %%mm2,%%mm2 \n\t"/* x6 x2 x6 x2*/\ "pmaddwd %%mm2,%%mm4 \n\t"/* x6*w07+x2*w05 x6*w03+x2*w01*/\ "punpckhdq %%mm5,%%mm5 \n\t"/* x5 x1 x5 x1*/\ "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x4*w14+x0*w12 x4*w10+x0*w08*/\ "punpckhdq %%mm6,%%mm6 \n\t"/* x7 x3 x7 x3*/\ "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w21 w19 w17*/\ "pmaddwd %%mm5,%%mm1 \n\t"/* x5*w22+x1*w20 x5*w18+x1*w16*/\ "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\ "pmaddwd %%mm6,%%mm7 \n\t"/* x7*w23+x3*w21 x7*w19+x3*w17*/\ "pmaddwd 24+" #A3 ",%%mm2 \n\t"/* x6*w15+x2*w13 x6*w11+x2*w09*/\ "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\ "pmaddwd 48+" #A3 ",%%mm5 \n\t"/* x5*w30+x1*w28 x5*w26+x1*w24*/\ "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\ "pmaddwd 56+" #A3 ",%%mm6 \n\t"/* x7*w31+x3*w29 x7*w27+x3*w25*/\ "paddd %%mm7,%%mm1 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\ "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\ "psubd %%mm1,%%mm3 \n\t"/* a1-b1 a0-b0*/\ "psrad $11,%%mm3 \n\t"/* y6=a1-b1 y7=a0-b0*/\ "paddd %%mm4,%%mm1 \n\t"/* 4 ; a1+b1 a0+b0*/\ "paddd %%mm2,%%mm0 \n\t"/* 2 ; a3=sum(even3) a2=sum(even2)*/\ "psrad $11,%%mm1 \n\t"/* y1=a1+b1 y0=a0+b0*/\ "paddd %%mm6,%%mm5 \n\t"/* 6 ; b3=sum(odd3) b2=sum(odd2)*/\ "movq %%mm0,%%mm4 \n\t"/* 4 ; a3 a2*/\ "paddd %%mm5,%%mm0 \n\t"/* a3+b3 a2+b2*/\ "psubd %%mm5,%%mm4 \n\t"/* 5 ; a3-b3 a2-b2*/\ "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\ "psrad $11,%%mm4 \n\t"/* y4=a3-b3 y5=a2-b2*/\ "packssdw %%mm0,%%mm1 \n\t"/* 0 ; y3 y2 y1 y0*/\ "packssdw %%mm3,%%mm4 \n\t"/* 3 ; y6 y7 y4 y5*/\ "movq %%mm4,%%mm7 \n\t"/* 7 ; y6 y7 y4 y5*/\ "psrld $16,%%mm4 \n\t"/* 0 y6 0 y4*/\ "pslld $16,%%mm7 \n\t"/* y7 0 y5 0*/\ "movq %%mm1," #A2 " \n\t"/* 1 ; save y3 y2 y1 y0*/\ "por %%mm4,%%mm7 \n\t"/* 4 ; y7 y6 y5 y4*/\ "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\ //----------------------------------------------------------------------------- // DCT_8_INV_ROW_XMM( INP, OUT, TABLE, ROUNDER //----------------------------------------------------------------------------- #define DCT_8_INV_ROW_XMM(A1,A2,A3,A4)\ "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\ "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\ "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\ "movq " #A3 ",%%mm3 \n\t"/* 3 ; w05 w04 w01 w00*/\ "pshufw $0x88,%%mm0,%%mm0 \n\t"/* x2 x0 x2 x0*/\ "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w06 w03 w02*/\ "movq %%mm1,%%mm5 \n\t"/* 5 ; x7 x6 x5 x4*/\ "pmaddwd %%mm0,%%mm3 \n\t"/* x2*w05+x0*w04 x2*w01+x0*w00*/\ "movq 32+" #A3 ",%%mm6 \n\t"/* 6 ; w21 w20 w17 w16*/\ "pshufw $0x88,%%mm1,%%mm1 \n\t"/* x6 x4 x6 x4*/\ "pmaddwd %%mm1,%%mm4 \n\t"/* x6*w07+x4*w06 x6*w03+x4*w02*/\ "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w22 w19 w18*/\ "pshufw $0xdd,%%mm2,%%mm2 \n\t"/* x3 x1 x3 x1*/\ "pmaddwd %%mm2,%%mm6 \n\t"/* x3*w21+x1*w20 x3*w17+x1*w16*/\ "pshufw $0xdd,%%mm5,%%mm5 \n\t"/* x7 x5 x7 x5*/\ "pmaddwd %%mm5,%%mm7 \n\t"/* x7*w23+x5*w22 x7*w19+x5*w18*/\ "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\ "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x2*w13+x0*w12 x2*w09+x0*w08*/\ "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\ "pmaddwd 24+" #A3 ",%%mm1 \n\t"/* x6*w15+x4*w14 x6*w11+x4*w10*/\ "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\ "pmaddwd 48+" #A3 ",%%mm2 \n\t"/* x3*w29+x1*w28 x3*w25+x1*w24*/\ "paddd %%mm7,%%mm6 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\ "pmaddwd 56+" #A3 ",%%mm5 \n\t"/* x7*w31+x5*w30 x7*w27+x5*w26*/\ "paddd %%mm6,%%mm3 \n\t"/* a1+b1 a0+b0*/\ "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\ "psrad $11,%%mm3 \n\t"/* y1=a1+b1 y0=a0+b0*/\ "paddd %%mm1,%%mm0 \n\t"/* 1 ; a3=sum(even3) a2=sum(even2)*/\ "psubd %%mm6,%%mm4 \n\t"/* 6 ; a1-b1 a0-b0*/\ "movq %%mm0,%%mm7 \n\t"/* 7 ; a3 a2*/\ "paddd %%mm5,%%mm2 \n\t"/* 5 ; b3=sum(odd3) b2=sum(odd2)*/\ "paddd %%mm2,%%mm0 \n\t"/* a3+b3 a2+b2*/\ "psrad $11,%%mm4 \n\t"/* y6=a1-b1 y7=a0-b0*/\ "psubd %%mm2,%%mm7 \n\t"/* 2 ; a3-b3 a2-b2*/\ "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\ "psrad $11,%%mm7 \n\t"/* y4=a3-b3 y5=a2-b2*/\ "packssdw %%mm0,%%mm3 \n\t"/* 0 ; y3 y2 y1 y0*/\ "packssdw %%mm4,%%mm7 \n\t"/* 4 ; y6 y7 y4 y5*/\ "movq %%mm3, " #A2 " \n\t"/* 3 ; save y3 y2 y1 y0*/\ "pshufw $0xb1,%%mm7,%%mm7 \n\t"/* y7 y6 y5 y4*/\ "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\ //----------------------------------------------------------------------------- // // The first stage DCT 8x8 - forward DCTs of columns // // The %2puts are multiplied // for rows 0,4 - on cos_4_16, // for rows 1,7 - on cos_1_16, // for rows 2,6 - on cos_2_16, // for rows 3,5 - on cos_3_16 // and are shifted to the left for rise of accuracy // //----------------------------------------------------------------------------- // // The 8-point scaled forward DCT algorithm (26a8m) // //----------------------------------------------------------------------------- // // #define DCT_8_FRW_COL(x, y) //{ // short t0, t1, t2, t3, t4, t5, t6, t7; // short tp03, tm03, tp12, tm12, tp65, tm65; // short tp465, tm465, tp765, tm765; // // t0 = LEFT_SHIFT ( x[0] + x[7] ); // t1 = LEFT_SHIFT ( x[1] + x[6] ); // t2 = LEFT_SHIFT ( x[2] + x[5] ); // t3 = LEFT_SHIFT ( x[3] + x[4] ); // t4 = LEFT_SHIFT ( x[3] - x[4] ); // t5 = LEFT_SHIFT ( x[2] - x[5] ); // t6 = LEFT_SHIFT ( x[1] - x[6] ); // t7 = LEFT_SHIFT ( x[0] - x[7] ); // // tp03 = t0 + t3; // tm03 = t0 - t3; // tp12 = t1 + t2; // tm12 = t1 - t2; // // y[0] = tp03 + tp12; // y[4] = tp03 - tp12; // // y[2] = tm03 + tm12 * tg_2_16; // y[6] = tm03 * tg_2_16 - tm12; // // tp65 =(t6 +t5 )*cos_4_16; // tm65 =(t6 -t5 )*cos_4_16; // // tp765 = t7 + tp65; // tm765 = t7 - tp65; // tp465 = t4 + tm65; // tm465 = t4 - tm65; // // y[1] = tp765 + tp465 * tg_1_16; // y[7] = tp765 * tg_1_16 - tp465; // y[5] = tm765 * tg_3_16 + tm465; // y[3] = tm765 - tm465 * tg_3_16; //} // //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // DCT_8_INV_COL_4 INP,OUT //----------------------------------------------------------------------------- #define DCT_8_INV_COL(A1,A2)\ "movq 2*8(%3),%%mm0\n\t"\ "movq 16*3+" #A1 ",%%mm3\n\t"\ "movq %%mm0,%%mm1 \n\t"/* tg_3_16*/\ "movq 16*5+" #A1 ",%%mm5\n\t"\ "pmulhw %%mm3,%%mm0 \n\t"/* x3*(tg_3_16-1)*/\ "movq (%3),%%mm4\n\t"\ "pmulhw %%mm5,%%mm1 \n\t"/* x5*(tg_3_16-1)*/\ "movq 16*7+" #A1 ",%%mm7\n\t"\ "movq %%mm4,%%mm2 \n\t"/* tg_1_16*/\ "movq 16*1+" #A1 ",%%mm6\n\t"\ "pmulhw %%mm7,%%mm4 \n\t"/* x7*tg_1_16*/\ "paddsw %%mm3,%%mm0 \n\t"/* x3*tg_3_16*/\ "pmulhw %%mm6,%%mm2 \n\t"/* x1*tg_1_16*/\ "paddsw %%mm3,%%mm1 \n\t"/* x3+x5*(tg_3_16-1)*/\ "psubsw %%mm5,%%mm0 \n\t"/* x3*tg_3_16-x5 = tm35*/\ "movq 3*8(%3),%%mm3\n\t"\ "paddsw %%mm5,%%mm1 \n\t"/* x3+x5*tg_3_16 = tp35*/\ "paddsw %%mm6,%%mm4 \n\t"/* x1+tg_1_16*x7 = tp17*/\ "psubsw %%mm7,%%mm2 \n\t"/* x1*tg_1_16-x7 = tm17*/\ "movq %%mm4,%%mm5 \n\t"/* tp17*/\ "movq %%mm2,%%mm6 \n\t"/* tm17*/\ "paddsw %%mm1,%%mm5 \n\t"/* tp17+tp35 = b0*/\ "psubsw %%mm0,%%mm6 \n\t"/* tm17-tm35 = b3*/\ "psubsw %%mm1,%%mm4 \n\t"/* tp17-tp35 = t1*/\ "paddsw %%mm0,%%mm2 \n\t"/* tm17+tm35 = t2*/\ "movq 1*8(%3),%%mm7\n\t"\ "movq %%mm4,%%mm1 \n\t"/* t1*/\ "movq %%mm5,3*16 +" #A2 "\n\t"/* save b0*/\ "paddsw %%mm2,%%mm1 \n\t"/* t1+t2*/\ "movq %%mm6,5*16 +" #A2 "\n\t"/* save b3*/\ "psubsw %%mm2,%%mm4 \n\t"/* t1-t2*/\ "movq 2*16+" #A1 ",%%mm5\n\t"\ "movq %%mm7,%%mm0 \n\t"/* tg_2_16*/\ "movq 6*16+" #A1 ",%%mm6\n\t"\ "pmulhw %%mm5,%%mm0 \n\t"/* x2*tg_2_16*/\ "pmulhw %%mm6,%%mm7 \n\t"/* x6*tg_2_16*/\ "pmulhw %%mm3,%%mm1 \n\t"/* ocos_4_16*(t1+t2) = b1/2*/\ "movq 0*16+" #A1 ",%%mm2\n\t"\ "pmulhw %%mm3,%%mm4 \n\t"/* ocos_4_16*(t1-t2) = b2/2*/\ "psubsw %%mm6,%%mm0 \n\t"/* t2*tg_2_16-x6 = tm26*/\ "movq %%mm2,%%mm3 \n\t"/* x0*/\ "movq 4*16+" #A1 ",%%mm6\n\t"\ "paddsw %%mm5,%%mm7 \n\t"/* x2+x6*tg_2_16 = tp26*/\ "paddsw %%mm6,%%mm2 \n\t"/* x0+x4 = tp04*/\ "psubsw %%mm6,%%mm3 \n\t"/* x0-x4 = tm04*/\ "movq %%mm2,%%mm5 \n\t"/* tp04*/\ "movq %%mm3,%%mm6 \n\t"/* tm04*/\ "psubsw %%mm7,%%mm2 \n\t"/* tp04-tp26 = a3*/\ "paddsw %%mm0,%%mm3 \n\t"/* tm04+tm26 = a1*/\ "paddsw %%mm1,%%mm1 \n\t"/* b1*/\ "paddsw %%mm4,%%mm4 \n\t"/* b2*/\ "paddsw %%mm7,%%mm5 \n\t"/* tp04+tp26 = a0*/\ "psubsw %%mm0,%%mm6 \n\t"/* tm04-tm26 = a2*/\ "movq %%mm3,%%mm7 \n\t"/* a1*/\ "movq %%mm6,%%mm0 \n\t"/* a2*/\ "paddsw %%mm1,%%mm3 \n\t"/* a1+b1*/\ "paddsw %%mm4,%%mm6 \n\t"/* a2+b2*/\ "psraw $6,%%mm3 \n\t"/* dst1*/\ "psubsw %%mm1,%%mm7 \n\t"/* a1-b1*/\ "psraw $6,%%mm6 \n\t"/* dst2*/\ "psubsw %%mm4,%%mm0 \n\t"/* a2-b2*/\ "movq 3*16+" #A2 ",%%mm1 \n\t"/* load b0*/\ "psraw $6,%%mm7 \n\t"/* dst6*/\ "movq %%mm5,%%mm4 \n\t"/* a0*/\ "psraw $6,%%mm0 \n\t"/* dst5*/\ "movq %%mm3,1*16+" #A2 "\n\t"\ "paddsw %%mm1,%%mm5 \n\t"/* a0+b0*/\ "movq %%mm6,2*16+" #A2 "\n\t"\ "psubsw %%mm1,%%mm4 \n\t"/* a0-b0*/\ "movq 5*16+" #A2 ",%%mm3 \n\t"/* load b3*/\ "psraw $6,%%mm5 \n\t"/* dst0*/\ "movq %%mm2,%%mm6 \n\t"/* a3*/\ "psraw $6,%%mm4 \n\t"/* dst7*/\ "movq %%mm0,5*16+" #A2 "\n\t"\ "paddsw %%mm3,%%mm2 \n\t"/* a3+b3*/\ "movq %%mm7,6*16+" #A2 "\n\t"\ "psubsw %%mm3,%%mm6 \n\t"/* a3-b3*/\ "movq %%mm5,0*16+" #A2 "\n\t"\ "psraw $6,%%mm2 \n\t"/* dst3*/\ "movq %%mm4,7*16+" #A2 "\n\t"\ "psraw $6,%%mm6 \n\t"/* dst4*/\ "movq %%mm2,3*16+" #A2 "\n\t"\ "movq %%mm6,4*16+" #A2 "\n\t" //============================================================================= // Code //============================================================================= //----------------------------------------------------------------------------- // void idct_mmx(uint16_t block[64]); //----------------------------------------------------------------------------- void ff_idct_xvid_mmx(short *block){ __asm__ volatile( //# Process each row DCT_8_INV_ROW_MMX(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1)) DCT_8_INV_ROW_MMX(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1)) DCT_8_INV_ROW_MMX(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1)) DCT_8_INV_ROW_MMX(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1)) DCT_8_INV_ROW_MMX(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1)) DCT_8_INV_ROW_MMX(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1)) DCT_8_INV_ROW_MMX(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1)) DCT_8_INV_ROW_MMX(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1)) //# Process the columns (4 at a time) DCT_8_INV_COL(0(%0), 0(%0)) DCT_8_INV_COL(8(%0), 8(%0)) :: "r"(block), "r"(rounder_0), "r"(tab_i_04_mmx), "r"(tg_1_16)); } //----------------------------------------------------------------------------- // void idct_xmm(uint16_t block[64]); //----------------------------------------------------------------------------- void ff_idct_xvid_mmx2(short *block){ __asm__ volatile( //# Process each row DCT_8_INV_ROW_XMM(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1)) DCT_8_INV_ROW_XMM(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1)) DCT_8_INV_ROW_XMM(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1)) DCT_8_INV_ROW_XMM(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1)) DCT_8_INV_ROW_XMM(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1)) DCT_8_INV_ROW_XMM(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1)) DCT_8_INV_ROW_XMM(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1)) DCT_8_INV_ROW_XMM(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1)) //# Process the columns (4 at a time) DCT_8_INV_COL(0(%0), 0(%0)) DCT_8_INV_COL(8(%0), 8(%0)) :: "r"(block), "r"(rounder_0), "r"(tab_i_04_xmm), "r"(tg_1_16)); }
123linslouis-android-video-cutter
jni/libavcodec/x86/idct_mmx_xvid.c
C
asf20
23,945
/* * MPEG video MMX templates * * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #undef MMREG_WIDTH #undef MM #undef MOVQ #undef SPREADW #undef PMAXW #undef PMAX #undef SAVE_SIGN #undef RESTORE_SIGN #if HAVE_SSE2 #define MMREG_WIDTH "16" #define MM "%%xmm" #define MOVQ "movdqa" #define SPREADW(a) \ "pshuflw $0, "a", "a" \n\t"\ "punpcklwd "a", "a" \n\t" #define PMAXW(a,b) "pmaxsw "a", "b" \n\t" #define PMAX(a,b) \ "movhlps "a", "b" \n\t"\ PMAXW(b, a)\ "pshuflw $0x0E, "a", "b" \n\t"\ PMAXW(b, a)\ "pshuflw $0x01, "a", "b" \n\t"\ PMAXW(b, a) #else #define MMREG_WIDTH "8" #define MM "%%mm" #define MOVQ "movq" #if HAVE_MMX2 #define SPREADW(a) "pshufw $0, "a", "a" \n\t" #define PMAXW(a,b) "pmaxsw "a", "b" \n\t" #define PMAX(a,b) \ "pshufw $0x0E, "a", "b" \n\t"\ PMAXW(b, a)\ "pshufw $0x01, "a", "b" \n\t"\ PMAXW(b, a) #else #define SPREADW(a) \ "punpcklwd "a", "a" \n\t"\ "punpcklwd "a", "a" \n\t" #define PMAXW(a,b) \ "psubusw "a", "b" \n\t"\ "paddw "a", "b" \n\t" #define PMAX(a,b) \ "movq "a", "b" \n\t"\ "psrlq $32, "a" \n\t"\ PMAXW(b, a)\ "movq "a", "b" \n\t"\ "psrlq $16, "a" \n\t"\ PMAXW(b, a) #endif #endif #if HAVE_SSSE3 #define SAVE_SIGN(a,b) \ "movdqa "b", "a" \n\t"\ "pabsw "b", "b" \n\t" #define RESTORE_SIGN(a,b) \ "psignw "a", "b" \n\t" #else #define SAVE_SIGN(a,b) \ "pxor "a", "a" \n\t"\ "pcmpgtw "b", "a" \n\t" /* block[i] <= 0 ? 0xFF : 0x00 */\ "pxor "a", "b" \n\t"\ "psubw "a", "b" \n\t" /* ABS(block[i]) */ #define RESTORE_SIGN(a,b) \ "pxor "a", "b" \n\t"\ "psubw "a", "b" \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) #endif static int RENAME(dct_quantize)(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow) { x86_reg last_non_zero_p1; int level=0, q; //=0 is because gcc says uninitialized ... const uint16_t *qmat, *bias; DECLARE_ALIGNED(16, int16_t, temp_block)[64]; assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly? //s->fdct (block); RENAMEl(ff_fdct) (block); //cannot be anything else ... if(s->dct_error_sum) s->denoise_dct(s, block); if (s->mb_intra) { int dummy; if (n < 4) q = s->y_dc_scale; else q = s->c_dc_scale; /* note: block[0] is assumed to be positive */ if (!s->h263_aic) { #if 1 __asm__ volatile ( "mul %%ecx \n\t" : "=d" (level), "=a"(dummy) : "a" ((block[0]>>2) + q), "c" (ff_inverse[q<<1]) ); #else __asm__ volatile ( "xorl %%edx, %%edx \n\t" "divw %%cx \n\t" "movzwl %%ax, %%eax \n\t" : "=a" (level) : "a" ((block[0]>>2) + q), "c" (q<<1) : "%edx" ); #endif } else /* For AIC we skip quant/dequant of INTRADC */ level = (block[0] + 4)>>3; block[0]=0; //avoid fake overflow // temp_block[0] = (block[0] + (q >> 1)) / q; last_non_zero_p1 = 1; bias = s->q_intra_matrix16[qscale][1]; qmat = s->q_intra_matrix16[qscale][0]; } else { last_non_zero_p1 = 0; bias = s->q_inter_matrix16[qscale][1]; qmat = s->q_inter_matrix16[qscale][0]; } if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){ __asm__ volatile( "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1 SPREADW(MM"3") "pxor "MM"7, "MM"7 \n\t" // 0 "pxor "MM"4, "MM"4 \n\t" // 0 MOVQ" (%2), "MM"5 \n\t" // qmat[0] "pxor "MM"6, "MM"6 \n\t" "psubw (%3), "MM"6 \n\t" // -bias[0] "mov $-128, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i] SAVE_SIGN(MM"1", MM"0") // ABS(block[i]) "psubusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0] "pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16 "por "MM"0, "MM"4 \n\t" RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) MOVQ" "MM"0, (%5, %%"REG_a") \n\t" "pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00 MOVQ" (%4, %%"REG_a"), "MM"1 \n\t" MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0 "pandn "MM"1, "MM"0 \n\t" PMAXW(MM"0", MM"3") "add $"MMREG_WIDTH", %%"REG_a" \n\t" " js 1b \n\t" PMAX(MM"3", MM"0") "movd "MM"3, %%"REG_a" \n\t" "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1 : "+a" (last_non_zero_p1) : "r" (block+64), "r" (qmat), "r" (bias), "r" (inv_zigzag_direct16+64), "r" (temp_block+64) ); }else{ // FMT_H263 __asm__ volatile( "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1 SPREADW(MM"3") "pxor "MM"7, "MM"7 \n\t" // 0 "pxor "MM"4, "MM"4 \n\t" // 0 "mov $-128, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i] SAVE_SIGN(MM"1", MM"0") // ABS(block[i]) MOVQ" (%3, %%"REG_a"), "MM"6 \n\t" // bias[0] "paddusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0] MOVQ" (%2, %%"REG_a"), "MM"5 \n\t" // qmat[i] "pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16 "por "MM"0, "MM"4 \n\t" RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) MOVQ" "MM"0, (%5, %%"REG_a") \n\t" "pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00 MOVQ" (%4, %%"REG_a"), "MM"1 \n\t" MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0 "pandn "MM"1, "MM"0 \n\t" PMAXW(MM"0", MM"3") "add $"MMREG_WIDTH", %%"REG_a" \n\t" " js 1b \n\t" PMAX(MM"3", MM"0") "movd "MM"3, %%"REG_a" \n\t" "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1 : "+a" (last_non_zero_p1) : "r" (block+64), "r" (qmat+64), "r" (bias+64), "r" (inv_zigzag_direct16+64), "r" (temp_block+64) ); } __asm__ volatile( "movd %1, "MM"1 \n\t" // max_qcoeff SPREADW(MM"1") "psubusw "MM"1, "MM"4 \n\t" "packuswb "MM"4, "MM"4 \n\t" #if HAVE_SSE2 "packuswb "MM"4, "MM"4 \n\t" #endif "movd "MM"4, %0 \n\t" // *overflow : "=g" (*overflow) : "g" (s->max_qcoeff) ); if(s->mb_intra) block[0]= level; else block[0]= temp_block[0]; if(s->dsp.idct_permutation_type == FF_SIMPLE_IDCT_PERM){ if(last_non_zero_p1 <= 1) goto end; block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08]; block[0x20] = temp_block[0x10]; if(last_non_zero_p1 <= 4) goto end; block[0x18] = temp_block[0x09]; block[0x04] = temp_block[0x02]; block[0x09] = temp_block[0x03]; if(last_non_zero_p1 <= 7) goto end; block[0x14] = temp_block[0x0A]; block[0x28] = temp_block[0x11]; block[0x12] = temp_block[0x18]; block[0x02] = temp_block[0x20]; if(last_non_zero_p1 <= 11) goto end; block[0x1A] = temp_block[0x19]; block[0x24] = temp_block[0x12]; block[0x19] = temp_block[0x0B]; block[0x01] = temp_block[0x04]; block[0x0C] = temp_block[0x05]; if(last_non_zero_p1 <= 16) goto end; block[0x11] = temp_block[0x0C]; block[0x29] = temp_block[0x13]; block[0x16] = temp_block[0x1A]; block[0x0A] = temp_block[0x21]; block[0x30] = temp_block[0x28]; block[0x22] = temp_block[0x30]; block[0x38] = temp_block[0x29]; block[0x06] = temp_block[0x22]; if(last_non_zero_p1 <= 24) goto end; block[0x1B] = temp_block[0x1B]; block[0x21] = temp_block[0x14]; block[0x1C] = temp_block[0x0D]; block[0x05] = temp_block[0x06]; block[0x0D] = temp_block[0x07]; block[0x15] = temp_block[0x0E]; block[0x2C] = temp_block[0x15]; block[0x13] = temp_block[0x1C]; if(last_non_zero_p1 <= 32) goto end; block[0x0B] = temp_block[0x23]; block[0x34] = temp_block[0x2A]; block[0x2A] = temp_block[0x31]; block[0x32] = temp_block[0x38]; block[0x3A] = temp_block[0x39]; block[0x26] = temp_block[0x32]; block[0x39] = temp_block[0x2B]; block[0x03] = temp_block[0x24]; if(last_non_zero_p1 <= 40) goto end; block[0x1E] = temp_block[0x1D]; block[0x25] = temp_block[0x16]; block[0x1D] = temp_block[0x0F]; block[0x2D] = temp_block[0x17]; block[0x17] = temp_block[0x1E]; block[0x0E] = temp_block[0x25]; block[0x31] = temp_block[0x2C]; block[0x2B] = temp_block[0x33]; if(last_non_zero_p1 <= 48) goto end; block[0x36] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B]; block[0x23] = temp_block[0x34]; block[0x3C] = temp_block[0x2D]; block[0x07] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; block[0x0F] = temp_block[0x27]; block[0x35] = temp_block[0x2E]; if(last_non_zero_p1 <= 56) goto end; block[0x2E] = temp_block[0x35]; block[0x33] = temp_block[0x3C]; block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36]; block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37]; block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; }else if(s->dsp.idct_permutation_type == FF_LIBMPEG2_IDCT_PERM){ if(last_non_zero_p1 <= 1) goto end; block[0x04] = temp_block[0x01]; block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; if(last_non_zero_p1 <= 4) goto end; block[0x0C] = temp_block[0x09]; block[0x01] = temp_block[0x02]; block[0x05] = temp_block[0x03]; if(last_non_zero_p1 <= 7) goto end; block[0x09] = temp_block[0x0A]; block[0x14] = temp_block[0x11]; block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20]; if(last_non_zero_p1 <= 11) goto end; block[0x1C] = temp_block[0x19]; block[0x11] = temp_block[0x12]; block[0x0D] = temp_block[0x0B]; block[0x02] = temp_block[0x04]; block[0x06] = temp_block[0x05]; if(last_non_zero_p1 <= 16) goto end; block[0x0A] = temp_block[0x0C]; block[0x15] = temp_block[0x13]; block[0x19] = temp_block[0x1A]; block[0x24] = temp_block[0x21]; block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30]; block[0x2C] = temp_block[0x29]; block[0x21] = temp_block[0x22]; if(last_non_zero_p1 <= 24) goto end; block[0x1D] = temp_block[0x1B]; block[0x12] = temp_block[0x14]; block[0x0E] = temp_block[0x0D]; block[0x03] = temp_block[0x06]; block[0x07] = temp_block[0x07]; block[0x0B] = temp_block[0x0E]; block[0x16] = temp_block[0x15]; block[0x1A] = temp_block[0x1C]; if(last_non_zero_p1 <= 32) goto end; block[0x25] = temp_block[0x23]; block[0x29] = temp_block[0x2A]; block[0x34] = temp_block[0x31]; block[0x38] = temp_block[0x38]; block[0x3C] = temp_block[0x39]; block[0x31] = temp_block[0x32]; block[0x2D] = temp_block[0x2B]; block[0x22] = temp_block[0x24]; if(last_non_zero_p1 <= 40) goto end; block[0x1E] = temp_block[0x1D]; block[0x13] = temp_block[0x16]; block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17]; block[0x1B] = temp_block[0x1E]; block[0x26] = temp_block[0x25]; block[0x2A] = temp_block[0x2C]; block[0x35] = temp_block[0x33]; if(last_non_zero_p1 <= 48) goto end; block[0x39] = temp_block[0x3A]; block[0x3D] = temp_block[0x3B]; block[0x32] = temp_block[0x34]; block[0x2E] = temp_block[0x2D]; block[0x23] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; block[0x27] = temp_block[0x27]; block[0x2B] = temp_block[0x2E]; if(last_non_zero_p1 <= 56) goto end; block[0x36] = temp_block[0x35]; block[0x3A] = temp_block[0x3C]; block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36]; block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; }else{ if(last_non_zero_p1 <= 1) goto end; block[0x01] = temp_block[0x01]; block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; if(last_non_zero_p1 <= 4) goto end; block[0x09] = temp_block[0x09]; block[0x02] = temp_block[0x02]; block[0x03] = temp_block[0x03]; if(last_non_zero_p1 <= 7) goto end; block[0x0A] = temp_block[0x0A]; block[0x11] = temp_block[0x11]; block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20]; if(last_non_zero_p1 <= 11) goto end; block[0x19] = temp_block[0x19]; block[0x12] = temp_block[0x12]; block[0x0B] = temp_block[0x0B]; block[0x04] = temp_block[0x04]; block[0x05] = temp_block[0x05]; if(last_non_zero_p1 <= 16) goto end; block[0x0C] = temp_block[0x0C]; block[0x13] = temp_block[0x13]; block[0x1A] = temp_block[0x1A]; block[0x21] = temp_block[0x21]; block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30]; block[0x29] = temp_block[0x29]; block[0x22] = temp_block[0x22]; if(last_non_zero_p1 <= 24) goto end; block[0x1B] = temp_block[0x1B]; block[0x14] = temp_block[0x14]; block[0x0D] = temp_block[0x0D]; block[0x06] = temp_block[0x06]; block[0x07] = temp_block[0x07]; block[0x0E] = temp_block[0x0E]; block[0x15] = temp_block[0x15]; block[0x1C] = temp_block[0x1C]; if(last_non_zero_p1 <= 32) goto end; block[0x23] = temp_block[0x23]; block[0x2A] = temp_block[0x2A]; block[0x31] = temp_block[0x31]; block[0x38] = temp_block[0x38]; block[0x39] = temp_block[0x39]; block[0x32] = temp_block[0x32]; block[0x2B] = temp_block[0x2B]; block[0x24] = temp_block[0x24]; if(last_non_zero_p1 <= 40) goto end; block[0x1D] = temp_block[0x1D]; block[0x16] = temp_block[0x16]; block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17]; block[0x1E] = temp_block[0x1E]; block[0x25] = temp_block[0x25]; block[0x2C] = temp_block[0x2C]; block[0x33] = temp_block[0x33]; if(last_non_zero_p1 <= 48) goto end; block[0x3A] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B]; block[0x34] = temp_block[0x34]; block[0x2D] = temp_block[0x2D]; block[0x26] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; block[0x27] = temp_block[0x27]; block[0x2E] = temp_block[0x2E]; if(last_non_zero_p1 <= 56) goto end; block[0x35] = temp_block[0x35]; block[0x3C] = temp_block[0x3C]; block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36]; block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; } end: /* for(i=0; i<last_non_zero_p1; i++) { int j= zigzag_direct_noperm[i]; block[block_permute_op(j)]= temp_block[j]; } */ return last_non_zero_p1 - 1; }
123linslouis-android-video-cutter
jni/libavcodec/x86/mpegvideo_mmx_template.c
C
asf20
17,466
/* * Copyright (C) 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MMX-optimized functions cribbed from the original VP3 source code. */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #include "vp3dsp_mmx.h" extern const uint16_t ff_vp3_idct_data[]; // this is off by one or two for some cases when filter_limit is greater than 63 // in: p0 in mm6, p1 in mm4, p2 in mm2, p3 in mm1 // out: p1 in mm4, p2 in mm3 #define VP3_LOOP_FILTER(flim) \ "movq %%mm6, %%mm7 \n\t" \ "pand "MANGLE(ff_pb_7 )", %%mm6 \n\t" /* p0&7 */ \ "psrlw $3, %%mm7 \n\t" \ "pand "MANGLE(ff_pb_1F)", %%mm7 \n\t" /* p0>>3 */ \ "movq %%mm2, %%mm3 \n\t" /* mm3 = p2 */ \ "pxor %%mm4, %%mm2 \n\t" \ "pand "MANGLE(ff_pb_1 )", %%mm2 \n\t" /* (p2^p1)&1 */ \ "movq %%mm2, %%mm5 \n\t" \ "paddb %%mm2, %%mm2 \n\t" \ "paddb %%mm5, %%mm2 \n\t" /* 3*(p2^p1)&1 */ \ "paddb %%mm6, %%mm2 \n\t" /* extra bits lost in shifts */ \ "pcmpeqb %%mm0, %%mm0 \n\t" \ "pxor %%mm0, %%mm1 \n\t" /* 255 - p3 */ \ "pavgb %%mm2, %%mm1 \n\t" /* (256 - p3 + extrabits) >> 1 */ \ "pxor %%mm4, %%mm0 \n\t" /* 255 - p1 */ \ "pavgb %%mm3, %%mm0 \n\t" /* (256 + p2-p1) >> 1 */ \ "paddb "MANGLE(ff_pb_3 )", %%mm1 \n\t" \ "pavgb %%mm0, %%mm1 \n\t" /* 128+2+( p2-p1 - p3) >> 2 */ \ "pavgb %%mm0, %%mm1 \n\t" /* 128+1+(3*(p2-p1) - p3) >> 3 */ \ "paddusb %%mm1, %%mm7 \n\t" /* d+128+1 */ \ "movq "MANGLE(ff_pb_81)", %%mm6 \n\t" \ "psubusb %%mm7, %%mm6 \n\t" \ "psubusb "MANGLE(ff_pb_81)", %%mm7 \n\t" \ \ "movq "#flim", %%mm5 \n\t" \ "pminub %%mm5, %%mm6 \n\t" \ "pminub %%mm5, %%mm7 \n\t" \ "movq %%mm6, %%mm0 \n\t" \ "movq %%mm7, %%mm1 \n\t" \ "paddb %%mm6, %%mm6 \n\t" \ "paddb %%mm7, %%mm7 \n\t" \ "pminub %%mm5, %%mm6 \n\t" \ "pminub %%mm5, %%mm7 \n\t" \ "psubb %%mm0, %%mm6 \n\t" \ "psubb %%mm1, %%mm7 \n\t" \ "paddusb %%mm7, %%mm4 \n\t" \ "psubusb %%mm6, %%mm4 \n\t" \ "psubusb %%mm7, %%mm3 \n\t" \ "paddusb %%mm6, %%mm3 \n\t" #define STORE_4_WORDS(dst0, dst1, dst2, dst3, mm) \ "movd "#mm", %0 \n\t" \ "movw %w0, -1"#dst0" \n\t" \ "psrlq $32, "#mm" \n\t" \ "shr $16, %0 \n\t" \ "movw %w0, -1"#dst1" \n\t" \ "movd "#mm", %0 \n\t" \ "movw %w0, -1"#dst2" \n\t" \ "shr $16, %0 \n\t" \ "movw %w0, -1"#dst3" \n\t" void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values) { __asm__ volatile( "movq %0, %%mm6 \n\t" "movq %1, %%mm4 \n\t" "movq %2, %%mm2 \n\t" "movq %3, %%mm1 \n\t" VP3_LOOP_FILTER(%4) "movq %%mm4, %1 \n\t" "movq %%mm3, %2 \n\t" : "+m" (*(uint64_t*)(src - 2*stride)), "+m" (*(uint64_t*)(src - 1*stride)), "+m" (*(uint64_t*)(src + 0*stride)), "+m" (*(uint64_t*)(src + 1*stride)) : "m"(*(uint64_t*)(bounding_values+129)) ); } void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values) { x86_reg tmp; __asm__ volatile( "movd -2(%1), %%mm6 \n\t" "movd -2(%1,%3), %%mm0 \n\t" "movd -2(%1,%3,2), %%mm1 \n\t" "movd -2(%1,%4), %%mm4 \n\t" TRANSPOSE8x4(%%mm6, %%mm0, %%mm1, %%mm4, -2(%2), -2(%2,%3), -2(%2,%3,2), -2(%2,%4), %%mm2) VP3_LOOP_FILTER(%5) SBUTTERFLY(%%mm4, %%mm3, %%mm5, bw, q) STORE_4_WORDS((%1), (%1,%3), (%1,%3,2), (%1,%4), %%mm4) STORE_4_WORDS((%2), (%2,%3), (%2,%3,2), (%2,%4), %%mm5) : "=&r"(tmp) : "r"(src), "r"(src+4*stride), "r"((x86_reg)stride), "r"((x86_reg)3*stride), "m"(*(uint64_t*)(bounding_values+129)) : "memory" ); } /* from original comments: The Macro does IDct on 4 1-D Dcts */ #define BeginIDCT() \ "movq "I(3)", %%mm2 \n\t" \ "movq "C(3)", %%mm6 \n\t" \ "movq %%mm2, %%mm4 \n\t" \ "movq "J(5)", %%mm7 \n\t" \ "pmulhw %%mm6, %%mm4 \n\t" /* r4 = c3*i3 - i3 */ \ "movq "C(5)", %%mm1 \n\t" \ "pmulhw %%mm7, %%mm6 \n\t" /* r6 = c3*i5 - i5 */ \ "movq %%mm1, %%mm5 \n\t" \ "pmulhw %%mm2, %%mm1 \n\t" /* r1 = c5*i3 - i3 */ \ "movq "I(1)", %%mm3 \n\t" \ "pmulhw %%mm7, %%mm5 \n\t" /* r5 = c5*i5 - i5 */ \ "movq "C(1)", %%mm0 \n\t" \ "paddw %%mm2, %%mm4 \n\t" /* r4 = c3*i3 */ \ "paddw %%mm7, %%mm6 \n\t" /* r6 = c3*i5 */ \ "paddw %%mm1, %%mm2 \n\t" /* r2 = c5*i3 */ \ "movq "J(7)", %%mm1 \n\t" \ "paddw %%mm5, %%mm7 \n\t" /* r7 = c5*i5 */ \ "movq %%mm0, %%mm5 \n\t" /* r5 = c1 */ \ "pmulhw %%mm3, %%mm0 \n\t" /* r0 = c1*i1 - i1 */ \ "paddsw %%mm7, %%mm4 \n\t" /* r4 = C = c3*i3 + c5*i5 */ \ "pmulhw %%mm1, %%mm5 \n\t" /* r5 = c1*i7 - i7 */ \ "movq "C(7)", %%mm7 \n\t" \ "psubsw %%mm2, %%mm6 \n\t" /* r6 = D = c3*i5 - c5*i3 */ \ "paddw %%mm3, %%mm0 \n\t" /* r0 = c1*i1 */ \ "pmulhw %%mm7, %%mm3 \n\t" /* r3 = c7*i1 */ \ "movq "I(2)", %%mm2 \n\t" \ "pmulhw %%mm1, %%mm7 \n\t" /* r7 = c7*i7 */ \ "paddw %%mm1, %%mm5 \n\t" /* r5 = c1*i7 */ \ "movq %%mm2, %%mm1 \n\t" /* r1 = i2 */ \ "pmulhw "C(2)", %%mm2 \n\t" /* r2 = c2*i2 - i2 */ \ "psubsw %%mm5, %%mm3 \n\t" /* r3 = B = c7*i1 - c1*i7 */ \ "movq "J(6)", %%mm5 \n\t" \ "paddsw %%mm7, %%mm0 \n\t" /* r0 = A = c1*i1 + c7*i7 */ \ "movq %%mm5, %%mm7 \n\t" /* r7 = i6 */ \ "psubsw %%mm4, %%mm0 \n\t" /* r0 = A - C */ \ "pmulhw "C(2)", %%mm5 \n\t" /* r5 = c2*i6 - i6 */ \ "paddw %%mm1, %%mm2 \n\t" /* r2 = c2*i2 */ \ "pmulhw "C(6)", %%mm1 \n\t" /* r1 = c6*i2 */ \ "paddsw %%mm4, %%mm4 \n\t" /* r4 = C + C */ \ "paddsw %%mm0, %%mm4 \n\t" /* r4 = C. = A + C */ \ "psubsw %%mm6, %%mm3 \n\t" /* r3 = B - D */ \ "paddw %%mm7, %%mm5 \n\t" /* r5 = c2*i6 */ \ "paddsw %%mm6, %%mm6 \n\t" /* r6 = D + D */ \ "pmulhw "C(6)", %%mm7 \n\t" /* r7 = c6*i6 */ \ "paddsw %%mm3, %%mm6 \n\t" /* r6 = D. = B + D */ \ "movq %%mm4, "I(1)"\n\t" /* save C. at I(1) */ \ "psubsw %%mm5, %%mm1 \n\t" /* r1 = H = c6*i2 - c2*i6 */ \ "movq "C(4)", %%mm4 \n\t" \ "movq %%mm3, %%mm5 \n\t" /* r5 = B - D */ \ "pmulhw %%mm4, %%mm3 \n\t" /* r3 = (c4 - 1) * (B - D) */ \ "paddsw %%mm2, %%mm7 \n\t" /* r3 = (c4 - 1) * (B - D) */ \ "movq %%mm6, "I(2)"\n\t" /* save D. at I(2) */ \ "movq %%mm0, %%mm2 \n\t" /* r2 = A - C */ \ "movq "I(0)", %%mm6 \n\t" \ "pmulhw %%mm4, %%mm0 \n\t" /* r0 = (c4 - 1) * (A - C) */ \ "paddw %%mm3, %%mm5 \n\t" /* r5 = B. = c4 * (B - D) */ \ "movq "J(4)", %%mm3 \n\t" \ "psubsw %%mm1, %%mm5 \n\t" /* r5 = B.. = B. - H */ \ "paddw %%mm0, %%mm2 \n\t" /* r0 = A. = c4 * (A - C) */ \ "psubsw %%mm3, %%mm6 \n\t" /* r6 = i0 - i4 */ \ "movq %%mm6, %%mm0 \n\t" \ "pmulhw %%mm4, %%mm6 \n\t" /* r6 = (c4 - 1) * (i0 - i4) */ \ "paddsw %%mm3, %%mm3 \n\t" /* r3 = i4 + i4 */ \ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H + H */ \ "paddsw %%mm0, %%mm3 \n\t" /* r3 = i0 + i4 */ \ "paddsw %%mm5, %%mm1 \n\t" /* r1 = H. = B + H */ \ "pmulhw %%mm3, %%mm4 \n\t" /* r4 = (c4 - 1) * (i0 + i4) */ \ "paddsw %%mm0, %%mm6 \n\t" /* r6 = F = c4 * (i0 - i4) */ \ "psubsw %%mm2, %%mm6 \n\t" /* r6 = F. = F - A. */ \ "paddsw %%mm2, %%mm2 \n\t" /* r2 = A. + A. */ \ "movq "I(1)", %%mm0 \n\t" /* r0 = C. */ \ "paddsw %%mm6, %%mm2 \n\t" /* r2 = A.. = F + A. */ \ "paddw %%mm3, %%mm4 \n\t" /* r4 = E = c4 * (i0 + i4) */ \ "psubsw %%mm1, %%mm2 \n\t" /* r2 = R2 = A.. - H. */ /* RowIDCT gets ready to transpose */ #define RowIDCT() \ BeginIDCT() \ "movq "I(2)", %%mm3 \n\t" /* r3 = D. */ \ "psubsw %%mm7, %%mm4 \n\t" /* r4 = E. = E - G */ \ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H. + H. */ \ "paddsw %%mm7, %%mm7 \n\t" /* r7 = G + G */ \ "paddsw %%mm2, %%mm1 \n\t" /* r1 = R1 = A.. + H. */ \ "paddsw %%mm4, %%mm7 \n\t" /* r1 = R1 = A.. + H. */ \ "psubsw %%mm3, %%mm4 \n\t" /* r4 = R4 = E. - D. */ \ "paddsw %%mm3, %%mm3 \n\t" \ "psubsw %%mm5, %%mm6 \n\t" /* r6 = R6 = F. - B.. */ \ "paddsw %%mm5, %%mm5 \n\t" \ "paddsw %%mm4, %%mm3 \n\t" /* r3 = R3 = E. + D. */ \ "paddsw %%mm6, %%mm5 \n\t" /* r5 = R5 = F. + B.. */ \ "psubsw %%mm0, %%mm7 \n\t" /* r7 = R7 = G. - C. */ \ "paddsw %%mm0, %%mm0 \n\t" \ "movq %%mm1, "I(1)"\n\t" /* save R1 */ \ "paddsw %%mm7, %%mm0 \n\t" /* r0 = R0 = G. + C. */ /* Column IDCT normalizes and stores final results */ #define ColumnIDCT() \ BeginIDCT() \ "paddsw "OC_8", %%mm2 \n\t" /* adjust R2 (and R1) for shift */ \ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H. + H. */ \ "paddsw %%mm2, %%mm1 \n\t" /* r1 = R1 = A.. + H. */ \ "psraw $4, %%mm2 \n\t" /* r2 = NR2 */ \ "psubsw %%mm7, %%mm4 \n\t" /* r4 = E. = E - G */ \ "psraw $4, %%mm1 \n\t" /* r1 = NR1 */ \ "movq "I(2)", %%mm3 \n\t" /* r3 = D. */ \ "paddsw %%mm7, %%mm7 \n\t" /* r7 = G + G */ \ "movq %%mm2, "I(2)"\n\t" /* store NR2 at I2 */ \ "paddsw %%mm4, %%mm7 \n\t" /* r7 = G. = E + G */ \ "movq %%mm1, "I(1)"\n\t" /* store NR1 at I1 */ \ "psubsw %%mm3, %%mm4 \n\t" /* r4 = R4 = E. - D. */ \ "paddsw "OC_8", %%mm4 \n\t" /* adjust R4 (and R3) for shift */ \ "paddsw %%mm3, %%mm3 \n\t" /* r3 = D. + D. */ \ "paddsw %%mm4, %%mm3 \n\t" /* r3 = R3 = E. + D. */ \ "psraw $4, %%mm4 \n\t" /* r4 = NR4 */ \ "psubsw %%mm5, %%mm6 \n\t" /* r6 = R6 = F. - B.. */ \ "psraw $4, %%mm3 \n\t" /* r3 = NR3 */ \ "paddsw "OC_8", %%mm6 \n\t" /* adjust R6 (and R5) for shift */ \ "paddsw %%mm5, %%mm5 \n\t" /* r5 = B.. + B.. */ \ "paddsw %%mm6, %%mm5 \n\t" /* r5 = R5 = F. + B.. */ \ "psraw $4, %%mm6 \n\t" /* r6 = NR6 */ \ "movq %%mm4, "J(4)"\n\t" /* store NR4 at J4 */ \ "psraw $4, %%mm5 \n\t" /* r5 = NR5 */ \ "movq %%mm3, "I(3)"\n\t" /* store NR3 at I3 */ \ "psubsw %%mm0, %%mm7 \n\t" /* r7 = R7 = G. - C. */ \ "paddsw "OC_8", %%mm7 \n\t" /* adjust R7 (and R0) for shift */ \ "paddsw %%mm0, %%mm0 \n\t" /* r0 = C. + C. */ \ "paddsw %%mm7, %%mm0 \n\t" /* r0 = R0 = G. + C. */ \ "psraw $4, %%mm7 \n\t" /* r7 = NR7 */ \ "movq %%mm6, "J(6)"\n\t" /* store NR6 at J6 */ \ "psraw $4, %%mm0 \n\t" /* r0 = NR0 */ \ "movq %%mm5, "J(5)"\n\t" /* store NR5 at J5 */ \ "movq %%mm7, "J(7)"\n\t" /* store NR7 at J7 */ \ "movq %%mm0, "I(0)"\n\t" /* store NR0 at I0 */ /* Following macro does two 4x4 transposes in place. At entry (we assume): r0 = a3 a2 a1 a0 I(1) = b3 b2 b1 b0 r2 = c3 c2 c1 c0 r3 = d3 d2 d1 d0 r4 = e3 e2 e1 e0 r5 = f3 f2 f1 f0 r6 = g3 g2 g1 g0 r7 = h3 h2 h1 h0 At exit, we have: I(0) = d0 c0 b0 a0 I(1) = d1 c1 b1 a1 I(2) = d2 c2 b2 a2 I(3) = d3 c3 b3 a3 J(4) = h0 g0 f0 e0 J(5) = h1 g1 f1 e1 J(6) = h2 g2 f2 e2 J(7) = h3 g3 f3 e3 I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3. J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7. Since r1 is free at entry, we calculate the Js first. */ #define Transpose() \ "movq %%mm4, %%mm1 \n\t" /* r1 = e3 e2 e1 e0 */ \ "punpcklwd %%mm5, %%mm4 \n\t" /* r4 = f1 e1 f0 e0 */ \ "movq %%mm0, "I(0)"\n\t" /* save a3 a2 a1 a0 */ \ "punpckhwd %%mm5, %%mm1 \n\t" /* r1 = f3 e3 f2 e2 */ \ "movq %%mm6, %%mm0 \n\t" /* r0 = g3 g2 g1 g0 */ \ "punpcklwd %%mm7, %%mm6 \n\t" /* r6 = h1 g1 h0 g0 */ \ "movq %%mm4, %%mm5 \n\t" /* r5 = f1 e1 f0 e0 */ \ "punpckldq %%mm6, %%mm4 \n\t" /* r4 = h0 g0 f0 e0 = R4 */ \ "punpckhdq %%mm6, %%mm5 \n\t" /* r5 = h1 g1 f1 e1 = R5 */ \ "movq %%mm1, %%mm6 \n\t" /* r6 = f3 e3 f2 e2 */ \ "movq %%mm4, "J(4)"\n\t" \ "punpckhwd %%mm7, %%mm0 \n\t" /* r0 = h3 g3 h2 g2 */ \ "movq %%mm5, "J(5)"\n\t" \ "punpckhdq %%mm0, %%mm6 \n\t" /* r6 = h3 g3 f3 e3 = R7 */ \ "movq "I(0)", %%mm4 \n\t" /* r4 = a3 a2 a1 a0 */ \ "punpckldq %%mm0, %%mm1 \n\t" /* r1 = h2 g2 f2 e2 = R6 */ \ "movq "I(1)", %%mm5 \n\t" /* r5 = b3 b2 b1 b0 */ \ "movq %%mm4, %%mm0 \n\t" /* r0 = a3 a2 a1 a0 */ \ "movq %%mm6, "J(7)"\n\t" \ "punpcklwd %%mm5, %%mm0 \n\t" /* r0 = b1 a1 b0 a0 */ \ "movq %%mm1, "J(6)"\n\t" \ "punpckhwd %%mm5, %%mm4 \n\t" /* r4 = b3 a3 b2 a2 */ \ "movq %%mm2, %%mm5 \n\t" /* r5 = c3 c2 c1 c0 */ \ "punpcklwd %%mm3, %%mm2 \n\t" /* r2 = d1 c1 d0 c0 */ \ "movq %%mm0, %%mm1 \n\t" /* r1 = b1 a1 b0 a0 */ \ "punpckldq %%mm2, %%mm0 \n\t" /* r0 = d0 c0 b0 a0 = R0 */ \ "punpckhdq %%mm2, %%mm1 \n\t" /* r1 = d1 c1 b1 a1 = R1 */ \ "movq %%mm4, %%mm2 \n\t" /* r2 = b3 a3 b2 a2 */ \ "movq %%mm0, "I(0)"\n\t" \ "punpckhwd %%mm3, %%mm5 \n\t" /* r5 = d3 c3 d2 c2 */ \ "movq %%mm1, "I(1)"\n\t" \ "punpckhdq %%mm5, %%mm4 \n\t" /* r4 = d3 c3 b3 a3 = R3 */ \ "punpckldq %%mm5, %%mm2 \n\t" /* r2 = d2 c2 b2 a2 = R2 */ \ "movq %%mm4, "I(3)"\n\t" \ "movq %%mm2, "I(2)"\n\t" void ff_vp3_idct_mmx(int16_t *output_data) { /* eax = quantized input * ebx = dequantizer matrix * ecx = IDCT constants * M(I) = ecx + MaskOffset(0) + I * 8 * C(I) = ecx + CosineOffset(32) + (I-1) * 8 * edx = output * r0..r7 = mm0..mm7 */ #define C(x) AV_STRINGIFY(16*(x-1))"(%1)" #define OC_8 "%2" /* at this point, function has completed dequantization + dezigzag + * partial transposition; now do the idct itself */ #define I(x) AV_STRINGIFY(16* x )"(%0)" #define J(x) AV_STRINGIFY(16*(x-4) + 8)"(%0)" __asm__ volatile ( RowIDCT() Transpose() #undef I #undef J #define I(x) AV_STRINGIFY(16* x + 64)"(%0)" #define J(x) AV_STRINGIFY(16*(x-4) + 72)"(%0)" RowIDCT() Transpose() #undef I #undef J #define I(x) AV_STRINGIFY(16*x)"(%0)" #define J(x) AV_STRINGIFY(16*x)"(%0)" ColumnIDCT() #undef I #undef J #define I(x) AV_STRINGIFY(16*x + 8)"(%0)" #define J(x) AV_STRINGIFY(16*x + 8)"(%0)" ColumnIDCT() :: "r"(output_data), "r"(ff_vp3_idct_data), "m"(ff_pw_8) ); #undef I #undef J } void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block) { ff_vp3_idct_mmx(block); put_signed_pixels_clamped_mmx(block, dest, line_size); } void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block) { ff_vp3_idct_mmx(block); add_pixels_clamped_mmx(block, dest, line_size); } void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int linesize, const DCTELEM *block) { int dc = block[0]; dc = (46341*dc)>>16; dc = (46341*dc + (8<<16))>>20; __asm__ volatile( "movd %3, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" #define DC_ADD \ "movq (%0), %%mm2 \n\t" \ "movq (%0,%1), %%mm3 \n\t" \ "paddusb %%mm0, %%mm2 \n\t" \ "movq (%0,%1,2), %%mm4 \n\t" \ "paddusb %%mm0, %%mm3 \n\t" \ "movq (%0,%2), %%mm5 \n\t" \ "paddusb %%mm0, %%mm4 \n\t" \ "paddusb %%mm0, %%mm5 \n\t" \ "psubusb %%mm1, %%mm2 \n\t" \ "psubusb %%mm1, %%mm3 \n\t" \ "movq %%mm2, (%0) \n\t" \ "psubusb %%mm1, %%mm4 \n\t" \ "movq %%mm3, (%0,%1) \n\t" \ "psubusb %%mm1, %%mm5 \n\t" \ "movq %%mm4, (%0,%1,2) \n\t" \ "movq %%mm5, (%0,%2) \n\t" DC_ADD "lea (%0,%1,4), %0 \n\t" DC_ADD : "+r"(dest) : "r"((x86_reg)linesize), "r"((x86_reg)3*linesize), "r"(dc) ); }
123linslouis-android-video-cutter
jni/libavcodec/x86/vp3dsp_mmx.c
C
asf20
17,244
/* * XVID MPEG-4 VIDEO CODEC * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /*! * @file * header for Xvid IDCT functions */ #ifndef AVCODEC_X86_IDCT_XVID_H #define AVCODEC_X86_IDCT_XVID_H #include <stdint.h> void ff_idct_xvid_mmx(short *block); void ff_idct_xvid_mmx2(short *block); void ff_idct_xvid_sse2(short *block); void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block); void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block); #endif /* AVCODEC_X86_IDCT_XVID_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/idct_xvid.h
C
asf20
1,227
;****************************************************************************** ;* MMX optimized DSP utils ;* Copyright (c) 2008 Loren Merritt ;* ;* This file is part of FFmpeg. ;* ;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* ;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public ;* License along with FFmpeg; if not, write to the Free Software ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** %include "x86inc.asm" SECTION_RODATA pb_f: times 16 db 15 pb_zzzzzzzz77777777: times 8 db -1 pb_7: times 8 db 7 pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11 pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13 section .text align=16 %macro PSWAPD_SSE 2 pshufw %1, %2, 0x4e %endmacro %macro PSWAPD_3DN1 2 movq %1, %2 psrlq %1, 32 punpckldq %1, %2 %endmacro %macro FLOAT_TO_INT16_INTERLEAVE6 1 ; void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len) cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5 %ifdef ARCH_X86_64 %define lend r10d mov lend, r2d %else %define lend dword r2m %endif mov src1q, [srcq+1*gprsize] mov src2q, [srcq+2*gprsize] mov src3q, [srcq+3*gprsize] mov src4q, [srcq+4*gprsize] mov src5q, [srcq+5*gprsize] mov srcq, [srcq] sub src1q, srcq sub src2q, srcq sub src3q, srcq sub src4q, srcq sub src5q, srcq .loop: cvtps2pi mm0, [srcq] cvtps2pi mm1, [srcq+src1q] cvtps2pi mm2, [srcq+src2q] cvtps2pi mm3, [srcq+src3q] cvtps2pi mm4, [srcq+src4q] cvtps2pi mm5, [srcq+src5q] packssdw mm0, mm3 packssdw mm1, mm4 packssdw mm2, mm5 pswapd mm3, mm0 punpcklwd mm0, mm1 punpckhwd mm1, mm2 punpcklwd mm2, mm3 pswapd mm3, mm0 punpckldq mm0, mm2 punpckhdq mm2, mm1 punpckldq mm1, mm3 movq [dstq ], mm0 movq [dstq+16], mm2 movq [dstq+ 8], mm1 add srcq, 8 add dstq, 24 sub lend, 2 jg .loop emms RET %endmacro ; FLOAT_TO_INT16_INTERLEAVE6 %define pswapd PSWAPD_SSE FLOAT_TO_INT16_INTERLEAVE6 sse %define cvtps2pi pf2id %define pswapd PSWAPD_3DN1 FLOAT_TO_INT16_INTERLEAVE6 3dnow %undef pswapd FLOAT_TO_INT16_INTERLEAVE6 3dn2 %undef cvtps2pi %macro SCALARPRODUCT 1 ; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift) cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift shl orderq, 1 add v1q, orderq add v2q, orderq neg orderq movd m3, shiftm pxor m2, m2 .loop: movu m0, [v1q + orderq] movu m1, [v1q + orderq + mmsize] pmaddwd m0, [v2q + orderq] pmaddwd m1, [v2q + orderq + mmsize] paddd m2, m0 paddd m2, m1 add orderq, mmsize*2 jl .loop %if mmsize == 16 movhlps m0, m2 paddd m2, m0 psrad m2, m3 pshuflw m0, m2, 0x4e %else psrad m2, m3 pshufw m0, m2, 0x4e %endif paddd m2, m0 movd eax, m2 RET ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, v3, order, mul shl orderq, 1 movd m7, mulm %if mmsize == 16 pshuflw m7, m7, 0 punpcklqdq m7, m7 %else pshufw m7, m7, 0 %endif pxor m6, m6 add v1q, orderq add v2q, orderq add v3q, orderq neg orderq .loop: movu m0, [v2q + orderq] movu m1, [v2q + orderq + mmsize] mova m4, [v1q + orderq] mova m5, [v1q + orderq + mmsize] movu m2, [v3q + orderq] movu m3, [v3q + orderq + mmsize] pmaddwd m0, m4 pmaddwd m1, m5 pmullw m2, m7 pmullw m3, m7 paddd m6, m0 paddd m6, m1 paddw m2, m4 paddw m3, m5 mova [v1q + orderq], m2 mova [v1q + orderq + mmsize], m3 add orderq, mmsize*2 jl .loop %if mmsize == 16 movhlps m0, m6 paddd m6, m0 pshuflw m0, m6, 0x4e %else pshufw m0, m6, 0x4e %endif paddd m6, m0 movd eax, m6 RET %endmacro INIT_MMX SCALARPRODUCT mmx2 INIT_XMM SCALARPRODUCT sse2 %macro SCALARPRODUCT_LOOP 1 align 16 .loop%1: sub orderq, mmsize*2 %if %1 mova m1, m4 mova m4, [v2q + orderq] mova m0, [v2q + orderq + mmsize] palignr m1, m0, %1 palignr m0, m4, %1 mova m3, m5 mova m5, [v3q + orderq] mova m2, [v3q + orderq + mmsize] palignr m3, m2, %1 palignr m2, m5, %1 %else mova m0, [v2q + orderq] mova m1, [v2q + orderq + mmsize] mova m2, [v3q + orderq] mova m3, [v3q + orderq + mmsize] %endif %define t0 [v1q + orderq] %define t1 [v1q + orderq + mmsize] %ifdef ARCH_X86_64 mova m8, t0 mova m9, t1 %define t0 m8 %define t1 m9 %endif pmaddwd m0, t0 pmaddwd m1, t1 pmullw m2, m7 pmullw m3, m7 paddw m2, t0 paddw m3, t1 paddd m6, m0 paddd m6, m1 mova [v1q + orderq], m2 mova [v1q + orderq + mmsize], m3 jg .loop%1 %if %1 jmp .end %endif %endmacro ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) cglobal scalarproduct_and_madd_int16_ssse3, 4,5,10, v1, v2, v3, order, mul shl orderq, 1 movd m7, mulm pshuflw m7, m7, 0 punpcklqdq m7, m7 pxor m6, m6 mov r4d, v2d and r4d, 15 and v2q, ~15 and v3q, ~15 mova m4, [v2q + orderq] mova m5, [v3q + orderq] ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable) cmp r4d, 0 je .loop0 cmp r4d, 2 je .loop2 cmp r4d, 4 je .loop4 cmp r4d, 6 je .loop6 cmp r4d, 8 je .loop8 cmp r4d, 10 je .loop10 cmp r4d, 12 je .loop12 SCALARPRODUCT_LOOP 14 SCALARPRODUCT_LOOP 12 SCALARPRODUCT_LOOP 10 SCALARPRODUCT_LOOP 8 SCALARPRODUCT_LOOP 6 SCALARPRODUCT_LOOP 4 SCALARPRODUCT_LOOP 2 SCALARPRODUCT_LOOP 0 .end: movhlps m0, m6 paddd m6, m0 pshuflw m0, m6, 0x4e paddd m6, m0 movd eax, m6 RET ; void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top movq mm0, [topq] movq mm2, mm0 movd mm4, [left_topq] psllq mm2, 8 movq mm1, mm0 por mm4, mm2 movd mm3, [leftq] psubb mm0, mm4 ; t-tl add dstq, wq add topq, wq add diffq, wq neg wq jmp .skip .loop: movq mm4, [topq+wq] movq mm0, mm4 psllq mm4, 8 por mm4, mm1 movq mm1, mm0 ; t psubb mm0, mm4 ; t-tl .skip: movq mm2, [diffq+wq] %assign i 0 %rep 8 movq mm4, mm0 paddb mm4, mm3 ; t-tl+l movq mm5, mm3 pmaxub mm3, mm1 pminub mm5, mm1 pminub mm3, mm4 pmaxub mm3, mm5 ; median paddb mm3, mm2 ; +residual %if i==0 movq mm7, mm3 psllq mm7, 56 %else movq mm6, mm3 psrlq mm7, 8 psllq mm6, 56 por mm7, mm6 %endif %if i<7 psrlq mm0, 8 psrlq mm1, 8 psrlq mm2, 8 %endif %assign i i+1 %endrep movq [dstq+wq], mm7 add wq, 8 jl .loop movzx r2d, byte [dstq-1] mov [leftq], r2d movzx r2d, byte [topq-1] mov [left_topq], r2d RET %macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned add srcq, wq add dstq, wq neg wq %%.loop: mova m1, [srcq+wq] mova m2, m1 psllw m1, 8 paddb m1, m2 mova m2, m1 pshufb m1, m3 paddb m1, m2 pshufb m0, m5 mova m2, m1 pshufb m1, m4 paddb m1, m2 %if mmsize == 16 mova m2, m1 pshufb m1, m6 paddb m1, m2 %endif paddb m0, m1 %if %1 mova [dstq+wq], m0 %else movq [dstq+wq], m0 movhps [dstq+wq+8], m0 %endif add wq, mmsize jl %%.loop mov eax, mmsize-1 sub eax, wd movd m1, eax pshufb m0, m1 movd eax, m0 RET %endmacro ; int ff_add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left) INIT_MMX cglobal add_hfyu_left_prediction_ssse3, 3,3,7, dst, src, w, left .skip_prologue: mova m5, [pb_7 GLOBAL] mova m4, [pb_zzzz3333zzzzbbbb GLOBAL] mova m3, [pb_zz11zz55zz99zzdd GLOBAL] movd m0, leftm psllq m0, 56 ADD_HFYU_LEFT_LOOP 1 INIT_XMM cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src, w, left mova m5, [pb_f GLOBAL] mova m6, [pb_zzzzzzzz77777777 GLOBAL] mova m4, [pb_zzzz3333zzzzbbbb GLOBAL] mova m3, [pb_zz11zz55zz99zzdd GLOBAL] movd m0, leftm pslldq m0, 15 test srcq, 15 jnz add_hfyu_left_prediction_ssse3.skip_prologue test dstq, 15 jnz .unaligned ADD_HFYU_LEFT_LOOP 1 .unaligned: ADD_HFYU_LEFT_LOOP 0 ; float ff_scalarproduct_float_sse(const float *v1, const float *v2, int len) cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset neg offsetq shl offsetq, 2 sub v1q, offsetq sub v2q, offsetq xorps xmm0, xmm0 .loop: movaps xmm1, [v1q+offsetq] mulps xmm1, [v2q+offsetq] addps xmm0, xmm1 add offsetq, 16 js .loop movhlps xmm1, xmm0 addps xmm0, xmm1 movss xmm1, xmm0 shufps xmm0, xmm0, 1 addss xmm0, xmm1 %ifndef ARCH_X86_64 movd r0m, xmm0 fld dword r0m %endif RET
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_yasm.asm
Assembly
asf20
10,062
/* * simple math operations * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_MATHOPS_H #define AVCODEC_X86_MATHOPS_H #include "config.h" #include "libavutil/common.h" #if ARCH_X86_32 #define MULL(ra, rb, shift) \ ({ int rt, dummy; __asm__ (\ "imull %3 \n\t"\ "shrdl %4, %%edx, %%eax \n\t"\ : "=a"(rt), "=d"(dummy)\ : "a" ((int)(ra)), "rm" ((int)(rb)), "i"(shift));\ rt; }) #define MULH(ra, rb) \ ({ int rt, dummy;\ __asm__ ("imull %3\n\t" : "=d"(rt), "=a"(dummy): "a" ((int)(ra)), "rm" ((int)(rb)));\ rt; }) #define MUL64(ra, rb) \ ({ int64_t rt;\ __asm__ ("imull %2\n\t" : "=A"(rt) : "a" ((int)(ra)), "g" ((int)(rb)));\ rt; }) #endif #if HAVE_CMOV /* median of 3 */ #define mid_pred mid_pred static inline av_const int mid_pred(int a, int b, int c) { int i=b; __asm__ volatile( "cmp %2, %1 \n\t" "cmovg %1, %0 \n\t" "cmovg %2, %1 \n\t" "cmp %3, %1 \n\t" "cmovl %3, %1 \n\t" "cmp %1, %0 \n\t" "cmovg %1, %0 \n\t" :"+&r"(i), "+&r"(a) :"r"(b), "r"(c) ); return i; } #endif #if HAVE_CMOV #define COPY3_IF_LT(x, y, a, b, c, d)\ __asm__ volatile(\ "cmpl %0, %3 \n\t"\ "cmovl %3, %0 \n\t"\ "cmovl %4, %1 \n\t"\ "cmovl %5, %2 \n\t"\ : "+&r" (x), "+&r" (a), "+r" (c)\ : "r" (y), "r" (b), "r" (d)\ ); #endif // avoid +32 for shift optimization (gcc should do that ...) #define NEG_SSR32 NEG_SSR32 static inline int32_t NEG_SSR32( int32_t a, int8_t s){ __asm__ ("sarl %1, %0\n\t" : "+r" (a) : "ic" ((uint8_t)(-s)) ); return a; } #define NEG_USR32 NEG_USR32 static inline uint32_t NEG_USR32(uint32_t a, int8_t s){ __asm__ ("shrl %1, %0\n\t" : "+r" (a) : "ic" ((uint8_t)(-s)) ); return a; } #endif /* AVCODEC_X86_MATHOPS_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/mathops.h
C
asf20
2,744
/* * Copyright (c) 2008 Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * SSSE3 optimized version of (put|avg)_h264_chroma_mc8. * H264_CHROMA_MC8_TMPL must be defined to the desired function name * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function * AVG_OP must be defined to empty for put and the identify for avg */ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, int rnd) { if(y==0 && x==0) { /* no filter needed */ H264_CHROMA_MC8_MV0(dst, src, stride, h); return; } assert(x<8 && y<8 && x>=0 && y>=0); if(y==0 || x==0) { /* 1 dimensional filter only */ __asm__ volatile( "movd %0, %%xmm7 \n\t" "movq %1, %%xmm6 \n\t" "pshuflw $0, %%xmm7, %%xmm7 \n\t" "movlhps %%xmm6, %%xmm6 \n\t" "movlhps %%xmm7, %%xmm7 \n\t" :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4:&ff_pw_3)) ); if(x) { __asm__ volatile( "1: \n\t" "movq (%1), %%xmm0 \n\t" "movq 1(%1), %%xmm1 \n\t" "movq (%1,%3), %%xmm2 \n\t" "movq 1(%1,%3), %%xmm3 \n\t" "punpcklbw %%xmm1, %%xmm0 \n\t" "punpcklbw %%xmm3, %%xmm2 \n\t" "pmaddubsw %%xmm7, %%xmm0 \n\t" "pmaddubsw %%xmm7, %%xmm2 \n\t" AVG_OP("movq (%0), %%xmm4 \n\t") AVG_OP("movhps (%0,%3), %%xmm4 \n\t") "paddw %%xmm6, %%xmm0 \n\t" "paddw %%xmm6, %%xmm2 \n\t" "psrlw $3, %%xmm0 \n\t" "psrlw $3, %%xmm2 \n\t" "packuswb %%xmm2, %%xmm0 \n\t" AVG_OP("pavgb %%xmm4, %%xmm0 \n\t") "movq %%xmm0, (%0) \n\t" "movhps %%xmm0, (%0,%3) \n\t" "sub $2, %2 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%0,%3,2), %0 \n\t" "jg 1b \n\t" :"+r"(dst), "+r"(src), "+r"(h) :"r"((x86_reg)stride) ); } else { __asm__ volatile( "1: \n\t" "movq (%1), %%xmm0 \n\t" "movq (%1,%3), %%xmm1 \n\t" "movdqa %%xmm1, %%xmm2 \n\t" "movq (%1,%3,2), %%xmm3 \n\t" "punpcklbw %%xmm1, %%xmm0 \n\t" "punpcklbw %%xmm3, %%xmm2 \n\t" "pmaddubsw %%xmm7, %%xmm0 \n\t" "pmaddubsw %%xmm7, %%xmm2 \n\t" AVG_OP("movq (%0), %%xmm4 \n\t") AVG_OP("movhps (%0,%3), %%xmm4 \n\t") "paddw %%xmm6, %%xmm0 \n\t" "paddw %%xmm6, %%xmm2 \n\t" "psrlw $3, %%xmm0 \n\t" "psrlw $3, %%xmm2 \n\t" "packuswb %%xmm2, %%xmm0 \n\t" AVG_OP("pavgb %%xmm4, %%xmm0 \n\t") "movq %%xmm0, (%0) \n\t" "movhps %%xmm0, (%0,%3) \n\t" "sub $2, %2 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%0,%3,2), %0 \n\t" "jg 1b \n\t" :"+r"(dst), "+r"(src), "+r"(h) :"r"((x86_reg)stride) ); } return; } /* general case, bilinear */ __asm__ volatile( "movd %0, %%xmm7 \n\t" "movd %1, %%xmm6 \n\t" "movdqa %2, %%xmm5 \n\t" "pshuflw $0, %%xmm7, %%xmm7 \n\t" "pshuflw $0, %%xmm6, %%xmm6 \n\t" "movlhps %%xmm7, %%xmm7 \n\t" "movlhps %%xmm6, %%xmm6 \n\t" :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(*(rnd?&ff_pw_32:&ff_pw_28)) ); __asm__ volatile( "movq (%1), %%xmm0 \n\t" "movq 1(%1), %%xmm1 \n\t" "punpcklbw %%xmm1, %%xmm0 \n\t" "add %3, %1 \n\t" "1: \n\t" "movq (%1), %%xmm1 \n\t" "movq 1(%1), %%xmm2 \n\t" "movq (%1,%3), %%xmm3 \n\t" "movq 1(%1,%3), %%xmm4 \n\t" "lea (%1,%3,2), %1 \n\t" "punpcklbw %%xmm2, %%xmm1 \n\t" "punpcklbw %%xmm4, %%xmm3 \n\t" "movdqa %%xmm1, %%xmm2 \n\t" "movdqa %%xmm3, %%xmm4 \n\t" "pmaddubsw %%xmm7, %%xmm0 \n\t" "pmaddubsw %%xmm6, %%xmm1 \n\t" "pmaddubsw %%xmm7, %%xmm2 \n\t" "pmaddubsw %%xmm6, %%xmm3 \n\t" "paddw %%xmm5, %%xmm0 \n\t" "paddw %%xmm5, %%xmm2 \n\t" "paddw %%xmm0, %%xmm1 \n\t" "paddw %%xmm2, %%xmm3 \n\t" "movdqa %%xmm4, %%xmm0 \n\t" "psrlw $6, %%xmm1 \n\t" "psrlw $6, %%xmm3 \n\t" AVG_OP("movq (%0), %%xmm2 \n\t") AVG_OP("movhps (%0,%3), %%xmm2 \n\t") "packuswb %%xmm3, %%xmm1 \n\t" AVG_OP("pavgb %%xmm2, %%xmm1 \n\t") "movq %%xmm1, (%0)\n\t" "movhps %%xmm1, (%0,%3)\n\t" "sub $2, %2 \n\t" "lea (%0,%3,2), %0 \n\t" "jg 1b \n\t" :"+r"(dst), "+r"(src), "+r"(h) :"r"((x86_reg)stride) ); } static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { __asm__ volatile( "movd %0, %%mm7 \n\t" "movd %1, %%mm6 \n\t" "movq %2, %%mm5 \n\t" "pshufw $0, %%mm7, %%mm7 \n\t" "pshufw $0, %%mm6, %%mm6 \n\t" :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32) ); __asm__ volatile( "movd (%1), %%mm0 \n\t" "punpcklbw 1(%1), %%mm0 \n\t" "add %3, %1 \n\t" "1: \n\t" "movd (%1), %%mm1 \n\t" "movd (%1,%3), %%mm3 \n\t" "punpcklbw 1(%1), %%mm1 \n\t" "punpcklbw 1(%1,%3), %%mm3 \n\t" "lea (%1,%3,2), %1 \n\t" "movq %%mm1, %%mm2 \n\t" "movq %%mm3, %%mm4 \n\t" "pmaddubsw %%mm7, %%mm0 \n\t" "pmaddubsw %%mm6, %%mm1 \n\t" "pmaddubsw %%mm7, %%mm2 \n\t" "pmaddubsw %%mm6, %%mm3 \n\t" "paddw %%mm5, %%mm0 \n\t" "paddw %%mm5, %%mm2 \n\t" "paddw %%mm0, %%mm1 \n\t" "paddw %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm0 \n\t" "psrlw $6, %%mm1 \n\t" "psrlw $6, %%mm3 \n\t" "packuswb %%mm1, %%mm1 \n\t" "packuswb %%mm3, %%mm3 \n\t" AVG_OP("pavgb (%0), %%mm1 \n\t") AVG_OP("pavgb (%0,%3), %%mm3 \n\t") "movd %%mm1, (%0)\n\t" "movd %%mm3, (%0,%3)\n\t" "sub $2, %2 \n\t" "lea (%0,%3,2), %0 \n\t" "jg 1b \n\t" :"+r"(dst), "+r"(src), "+r"(h) :"r"((x86_reg)stride) ); }
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_h264_template_ssse3.c
C
asf20
7,208
/* * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "dsputil_mmx.h" DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL; DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL; /***********************************/ /* IDCT */ #define SUMSUB_BADC( a, b, c, d ) \ "paddw "#b", "#a" \n\t"\ "paddw "#d", "#c" \n\t"\ "paddw "#b", "#b" \n\t"\ "paddw "#d", "#d" \n\t"\ "psubw "#a", "#b" \n\t"\ "psubw "#c", "#d" \n\t" #define SUMSUBD2_AB( a, b, t ) \ "movq "#b", "#t" \n\t"\ "psraw $1 , "#b" \n\t"\ "paddw "#a", "#b" \n\t"\ "psraw $1 , "#a" \n\t"\ "psubw "#t", "#a" \n\t" #define IDCT4_1D( s02, s13, d02, d13, t ) \ SUMSUB_BA ( s02, d02 )\ SUMSUBD2_AB( s13, d13, t )\ SUMSUB_BADC( d13, s02, s13, d02 ) #define STORE_DIFF_4P( p, t, z ) \ "psraw $6, "#p" \n\t"\ "movd (%0), "#t" \n\t"\ "punpcklbw "#z", "#t" \n\t"\ "paddsw "#t", "#p" \n\t"\ "packuswb "#z", "#p" \n\t"\ "movd "#p", (%0) \n\t" static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride) { /* Load dct coeffs */ __asm__ volatile( "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm1 \n\t" "movq 16(%0), %%mm2 \n\t" "movq 24(%0), %%mm3 \n\t" :: "r"(block) ); __asm__ volatile( /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) "movq %0, %%mm6 \n\t" /* in: 1,4,0,2 out: 1,2,3,0 */ TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) "paddw %%mm6, %%mm3 \n\t" /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) "pxor %%mm7, %%mm7 \n\t" :: "m"(ff_pw_32)); __asm__ volatile( STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) "add %1, %0 \n\t" STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) : "+r"(dst) : "r" ((x86_reg)stride) ); } static inline void h264_idct8_1d(int16_t *block) { __asm__ volatile( "movq 112(%0), %%mm7 \n\t" "movq 80(%0), %%mm0 \n\t" "movq 48(%0), %%mm3 \n\t" "movq 16(%0), %%mm5 \n\t" "movq %%mm0, %%mm4 \n\t" "movq %%mm5, %%mm1 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm1 \n\t" "paddw %%mm0, %%mm4 \n\t" "paddw %%mm5, %%mm1 \n\t" "paddw %%mm7, %%mm4 \n\t" "paddw %%mm0, %%mm1 \n\t" "psubw %%mm5, %%mm4 \n\t" "paddw %%mm3, %%mm1 \n\t" "psubw %%mm3, %%mm5 \n\t" "psubw %%mm3, %%mm0 \n\t" "paddw %%mm7, %%mm5 \n\t" "psubw %%mm7, %%mm0 \n\t" "psraw $1, %%mm3 \n\t" "psraw $1, %%mm7 \n\t" "psubw %%mm3, %%mm5 \n\t" "psubw %%mm7, %%mm0 \n\t" "movq %%mm4, %%mm3 \n\t" "movq %%mm1, %%mm7 \n\t" "psraw $2, %%mm1 \n\t" "psraw $2, %%mm3 \n\t" "paddw %%mm5, %%mm3 \n\t" "psraw $2, %%mm5 \n\t" "paddw %%mm0, %%mm1 \n\t" "psraw $2, %%mm0 \n\t" "psubw %%mm4, %%mm5 \n\t" "psubw %%mm0, %%mm7 \n\t" "movq 32(%0), %%mm2 \n\t" "movq 96(%0), %%mm6 \n\t" "movq %%mm2, %%mm4 \n\t" "movq %%mm6, %%mm0 \n\t" "psraw $1, %%mm4 \n\t" "psraw $1, %%mm6 \n\t" "psubw %%mm0, %%mm4 \n\t" "paddw %%mm2, %%mm6 \n\t" "movq (%0), %%mm2 \n\t" "movq 64(%0), %%mm0 \n\t" SUMSUB_BA( %%mm0, %%mm2 ) SUMSUB_BA( %%mm6, %%mm0 ) SUMSUB_BA( %%mm4, %%mm2 ) SUMSUB_BA( %%mm7, %%mm6 ) SUMSUB_BA( %%mm5, %%mm4 ) SUMSUB_BA( %%mm3, %%mm2 ) SUMSUB_BA( %%mm1, %%mm0 ) :: "r"(block) ); } static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) { int i; DECLARE_ALIGNED(8, int16_t, b2)[64]; block[0] += 32; for(i=0; i<2; i++){ DECLARE_ALIGNED(8, uint64_t, tmp); h264_idct8_1d(block+4*i); __asm__ volatile( "movq %%mm7, %0 \n\t" TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) "movq %%mm0, 8(%1) \n\t" "movq %%mm6, 24(%1) \n\t" "movq %%mm7, 40(%1) \n\t" "movq %%mm4, 56(%1) \n\t" "movq %0, %%mm7 \n\t" TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) "movq %%mm7, (%1) \n\t" "movq %%mm1, 16(%1) \n\t" "movq %%mm0, 32(%1) \n\t" "movq %%mm3, 48(%1) \n\t" : "=m"(tmp) : "r"(b2+32*i) : "memory" ); } for(i=0; i<2; i++){ h264_idct8_1d(b2+4*i); __asm__ volatile( "psraw $6, %%mm7 \n\t" "psraw $6, %%mm6 \n\t" "psraw $6, %%mm5 \n\t" "psraw $6, %%mm4 \n\t" "psraw $6, %%mm3 \n\t" "psraw $6, %%mm2 \n\t" "psraw $6, %%mm1 \n\t" "psraw $6, %%mm0 \n\t" "movq %%mm7, (%0) \n\t" "movq %%mm5, 16(%0) \n\t" "movq %%mm3, 32(%0) \n\t" "movq %%mm1, 48(%0) \n\t" "movq %%mm0, 64(%0) \n\t" "movq %%mm2, 80(%0) \n\t" "movq %%mm4, 96(%0) \n\t" "movq %%mm6, 112(%0) \n\t" :: "r"(b2+4*i) : "memory" ); } add_pixels_clamped_mmx(b2, dst, stride); } #define STORE_DIFF_8P( p, d, t, z )\ "movq "#d", "#t" \n"\ "psraw $6, "#p" \n"\ "punpcklbw "#z", "#t" \n"\ "paddsw "#t", "#p" \n"\ "packuswb "#p", "#p" \n"\ "movq "#p", "#d" \n" #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\ "movdqa "#c", "#a" \n"\ "movdqa "#g", "#e" \n"\ "psraw $1, "#c" \n"\ "psraw $1, "#g" \n"\ "psubw "#e", "#c" \n"\ "paddw "#a", "#g" \n"\ "movdqa "#b", "#e" \n"\ "psraw $1, "#e" \n"\ "paddw "#b", "#e" \n"\ "paddw "#d", "#e" \n"\ "paddw "#f", "#e" \n"\ "movdqa "#f", "#a" \n"\ "psraw $1, "#a" \n"\ "paddw "#f", "#a" \n"\ "paddw "#h", "#a" \n"\ "psubw "#b", "#a" \n"\ "psubw "#d", "#b" \n"\ "psubw "#d", "#f" \n"\ "paddw "#h", "#b" \n"\ "psubw "#h", "#f" \n"\ "psraw $1, "#d" \n"\ "psraw $1, "#h" \n"\ "psubw "#d", "#b" \n"\ "psubw "#h", "#f" \n"\ "movdqa "#e", "#d" \n"\ "movdqa "#a", "#h" \n"\ "psraw $2, "#d" \n"\ "psraw $2, "#h" \n"\ "paddw "#f", "#d" \n"\ "paddw "#b", "#h" \n"\ "psraw $2, "#f" \n"\ "psraw $2, "#b" \n"\ "psubw "#f", "#e" \n"\ "psubw "#a", "#b" \n"\ "movdqa 0x00(%1), "#a" \n"\ "movdqa 0x40(%1), "#f" \n"\ SUMSUB_BA(f, a)\ SUMSUB_BA(g, f)\ SUMSUB_BA(c, a)\ SUMSUB_BA(e, g)\ SUMSUB_BA(b, c)\ SUMSUB_BA(h, a)\ SUMSUB_BA(d, f) static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile( "movdqa 0x10(%1), %%xmm1 \n" "movdqa 0x20(%1), %%xmm2 \n" "movdqa 0x30(%1), %%xmm3 \n" "movdqa 0x50(%1), %%xmm5 \n" "movdqa 0x60(%1), %%xmm6 \n" "movdqa 0x70(%1), %%xmm7 \n" H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1)) "paddw %4, %%xmm4 \n" "movdqa %%xmm4, 0x00(%1) \n" "movdqa %%xmm2, 0x40(%1) \n" H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1) "movdqa %%xmm6, 0x60(%1) \n" "movdqa %%xmm7, 0x70(%1) \n" "pxor %%xmm7, %%xmm7 \n" STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7) "lea (%0,%2,4), %0 \n" STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7) "movdqa 0x60(%1), %%xmm0 \n" "movdqa 0x70(%1), %%xmm1 \n" STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7) :"+r"(dst) :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32) ); } static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) { int dc = (block[0] + 32) >> 6; __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); __asm__ volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" "movd %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movd %%mm2, %0 \n\t" "movd %%mm3, %1 \n\t" "movd %%mm4, %2 \n\t" "movd %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dst+0*stride)), "+m"(*(uint32_t*)(dst+1*stride)), "+m"(*(uint32_t*)(dst+2*stride)), "+m"(*(uint32_t*)(dst+3*stride)) ); } static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) { int dc = (block[0] + 32) >> 6; int y; __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); for(y=2; y--; dst += 4*stride){ __asm__ volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" "movq %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movq %%mm2, %0 \n\t" "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %3 \n\t" :"+m"(*(uint64_t*)(dst+0*stride)), "+m"(*(uint64_t*)(dst+1*stride)), "+m"(*(uint64_t*)(dst+2*stride)), "+m"(*(uint64_t*)(dst+3*stride)) ); } } //FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split static const uint8_t scan8[16 + 2*4]={ 4+1*8, 5+1*8, 4+2*8, 5+2*8, 6+1*8, 7+1*8, 6+2*8, 7+2*8, 4+3*8, 5+3*8, 4+4*8, 5+4*8, 6+3*8, 7+3*8, 6+4*8, 7+4*8, 1+1*8, 2+1*8, 1+2*8, 2+2*8, 1+4*8, 2+4*8, 1+5*8, 2+5*8, }; static void ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); } } static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=4){ if(nnzc[ scan8[i] ]) ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride); } } static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); } } } static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ if(nnzc[ scan8[i] ] || block[i*16]) ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); } } static void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); } } static void ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=4){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); else ff_h264_idct8_add_mmx (dst + block_offset[i], block + i*16, stride); } } } static void ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=4){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); else ff_h264_idct8_add_sse2 (dst + block_offset[i], block + i*16, stride); } } } static void ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=16; i<16+8; i++){ if(nnzc[ scan8[i] ] || block[i*16]) ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); } } static void ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=16; i<16+8; i++){ if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); } } #if CONFIG_GPL && HAVE_YASM static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile( "movd %0, %%mm0 \n\t" // 0 0 X D "punpcklwd %1, %%mm0 \n\t" // x X d D "paddsw %2, %%mm0 \n\t" "psraw $6, %%mm0 \n\t" "punpcklwd %%mm0, %%mm0 \n\t" // d d D D "pxor %%mm1, %%mm1 \n\t" // 0 0 0 0 "psubw %%mm0, %%mm1 \n\t" // -d-d-D-D "packuswb %%mm1, %%mm0 \n\t" // -d-d-D-D d d D D "pshufw $0xFA, %%mm0, %%mm1 \n\t" // -d-d-d-d-D-D-D-D "punpcklwd %%mm0, %%mm0 \n\t" // d d d d D D D D ::"m"(block[ 0]), "m"(block[16]), "m"(ff_pw_32) ); __asm__ volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" "movq %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movq %%mm2, %0 \n\t" "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %3 \n\t" :"+m"(*(uint64_t*)(dst+0*stride)), "+m"(*(uint64_t*)(dst+1*stride)), "+m"(*(uint64_t*)(dst+2*stride)), "+m"(*(uint64_t*)(dst+3*stride)) ); } extern void ff_x264_add8x4_idct_sse2(uint8_t *dst, int16_t *block, int stride); static void ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=2) if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride); } static void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=2){ if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride); else if(block[i*16]|block[i*16+16]) ff_h264_idct_dc_add8_mmx2(dst + block_offset[i], block + i*16, stride); } } static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=16; i<16+8; i+=2){ if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); else if(block[i*16]|block[i*16+16]) ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); } } #endif /***********************************/ /* deblocking */ // out: o = |x-y|>a // clobbers: t #define DIFF_GT_MMX(x,y,a,o,t)\ "movq "#y", "#t" \n\t"\ "movq "#x", "#o" \n\t"\ "psubusb "#x", "#t" \n\t"\ "psubusb "#y", "#o" \n\t"\ "por "#t", "#o" \n\t"\ "psubusb "#a", "#o" \n\t" // out: o = |x-y|>a // clobbers: t #define DIFF_GT2_MMX(x,y,a,o,t)\ "movq "#y", "#t" \n\t"\ "movq "#x", "#o" \n\t"\ "psubusb "#x", "#t" \n\t"\ "psubusb "#y", "#o" \n\t"\ "psubusb "#a", "#t" \n\t"\ "psubusb "#a", "#o" \n\t"\ "pcmpeqb "#t", "#o" \n\t"\ // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 // out: mm5=beta-1, mm7=mask // clobbers: mm4,mm6 #define H264_DEBLOCK_MASK(alpha1, beta1) \ "pshufw $0, "#alpha1", %%mm4 \n\t"\ "pshufw $0, "#beta1 ", %%mm5 \n\t"\ "packuswb %%mm4, %%mm4 \n\t"\ "packuswb %%mm5, %%mm5 \n\t"\ DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\ DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\ "por %%mm4, %%mm7 \n\t"\ DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\ "por %%mm4, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "pcmpeqb %%mm6, %%mm7 \n\t" // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) // out: mm1=p0' mm2=q0' // clobbers: mm0,3-6 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\ "movq %%mm1 , %%mm5 \n\t"\ "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\ "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\ "pcmpeqb %%mm4 , %%mm4 \n\t"\ "pxor %%mm4 , %%mm3 \n\t"\ "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\ "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\ "pxor %%mm1 , %%mm4 \n\t"\ "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\ "pavgb %%mm5 , %%mm3 \n\t"\ "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\ "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\ "psubusb %%mm3 , %%mm6 \n\t"\ "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\ "pminub %%mm7 , %%mm6 \n\t"\ "pminub %%mm7 , %%mm3 \n\t"\ "psubusb %%mm6 , %%mm1 \n\t"\ "psubusb %%mm3 , %%mm2 \n\t"\ "paddusb %%mm3 , %%mm1 \n\t"\ "paddusb %%mm6 , %%mm2 \n\t" // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 ) // clobbers: q2, tmp, tc0 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\ "movq %%mm1, "#tmp" \n\t"\ "pavgb %%mm2, "#tmp" \n\t"\ "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\ "pxor "q2addr", "#tmp" \n\t"\ "pand %9, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\ "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\ "movq "#p1", "#tmp" \n\t"\ "psubusb "#tc0", "#tmp" \n\t"\ "paddusb "#p1", "#tc0" \n\t"\ "pmaxub "#tmp", "#q2" \n\t"\ "pminub "#tc0", "#q2" \n\t"\ "movq "#q2", "q1addr" \n\t" static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) { DECLARE_ALIGNED(8, uint64_t, tmp0)[2]; __asm__ volatile( "movq (%2,%4), %%mm0 \n\t" //p1 "movq (%2,%4,2), %%mm1 \n\t" //p0 "movq (%3), %%mm2 \n\t" //q0 "movq (%3,%4), %%mm3 \n\t" //q1 H264_DEBLOCK_MASK(%7, %8) "movd %6, %%mm4 \n\t" "punpcklbw %%mm4, %%mm4 \n\t" "punpcklwd %%mm4, %%mm4 \n\t" "pcmpeqb %%mm3, %%mm3 \n\t" "movq %%mm4, %%mm6 \n\t" "pcmpgtb %%mm3, %%mm4 \n\t" "movq %%mm6, %1 \n\t" "pand %%mm4, %%mm7 \n\t" "movq %%mm7, %0 \n\t" /* filter p1 */ "movq (%2), %%mm3 \n\t" //p2 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta "pand %1, %%mm7 \n\t" // mask & tc0 "movq %%mm7, %%mm4 \n\t" "psubb %%mm6, %%mm7 \n\t" "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%2)", "(%2,%4)", %%mm6, %%mm4) /* filter q1 */ "movq (%3,%4,2), %%mm4 \n\t" //q2 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1 "pand %0, %%mm6 \n\t" "movq %1, %%mm5 \n\t" // can be merged with the and below but is slower then "pand %%mm6, %%mm5 \n\t" "psubb %%mm6, %%mm7 \n\t" "movq (%3,%4), %%mm3 \n\t" H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%3,%4,2)", "(%3,%4)", %%mm5, %%mm6) /* filter p0, q0 */ H264_DEBLOCK_P0_Q0(%9, unused) "movq %%mm1, (%2,%4,2) \n\t" "movq %%mm2, (%3) \n\t" : "=m"(tmp0[0]), "=m"(tmp0[1]) : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride), "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1), "m"(ff_bone) ); } static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { if((tc0[0] & tc0[1]) >= 0) h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0); if((tc0[2] & tc0[3]) >= 0) h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2); } static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { //FIXME: could cut some load/stores by merging transpose with filter // also, it only needs to transpose 6x8 DECLARE_ALIGNED(8, uint8_t, trans)[8*8]; int i; for(i=0; i<2; i++, pix+=8*stride, tc0+=2) { if((tc0[0] & tc0[1]) < 0) continue; transpose4x4(trans, pix-4, 8, stride); transpose4x4(trans +4*8, pix, 8, stride); transpose4x4(trans+4, pix-4+4*stride, 8, stride); transpose4x4(trans+4+4*8, pix +4*stride, 8, stride); h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0); transpose4x4(pix-2, trans +2*8, stride, 8); transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8); } } static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) { __asm__ volatile( "movq (%0), %%mm0 \n\t" //p1 "movq (%0,%2), %%mm1 \n\t" //p0 "movq (%1), %%mm2 \n\t" //q0 "movq (%1,%2), %%mm3 \n\t" //q1 H264_DEBLOCK_MASK(%4, %5) "movd %3, %%mm6 \n\t" "punpcklbw %%mm6, %%mm6 \n\t" "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask H264_DEBLOCK_P0_Q0(%6, %7) "movq %%mm1, (%0,%2) \n\t" "movq %%mm2, (%1) \n\t" :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride), "r"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F) ); } static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0); } static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { //FIXME: could cut some load/stores by merging transpose with filter DECLARE_ALIGNED(8, uint8_t, trans)[8*4]; transpose4x4(trans, pix-2, 8, stride); transpose4x4(trans+4, pix-2+4*stride, 8, stride); h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0); transpose4x4(pix-2, trans, stride, 8); transpose4x4(pix-2+4*stride, trans+4, stride, 8); } // p0 = (p0 + q1 + 2*p1 + 2) >> 2 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \ "movq "#p0", %%mm4 \n\t"\ "pxor "#q1", %%mm4 \n\t"\ "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\ "pavgb "#q1", "#p0" \n\t"\ "psubusb %%mm4, "#p0" \n\t"\ "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\ static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1) { __asm__ volatile( "movq (%0), %%mm0 \n\t" "movq (%0,%2), %%mm1 \n\t" "movq (%1), %%mm2 \n\t" "movq (%1,%2), %%mm3 \n\t" H264_DEBLOCK_MASK(%3, %4) "movq %%mm1, %%mm5 \n\t" "movq %%mm2, %%mm6 \n\t" H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0' H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0' "psubb %%mm5, %%mm1 \n\t" "psubb %%mm6, %%mm2 \n\t" "pand %%mm7, %%mm1 \n\t" "pand %%mm7, %%mm2 \n\t" "paddb %%mm5, %%mm1 \n\t" "paddb %%mm6, %%mm2 \n\t" "movq %%mm1, (%0,%2) \n\t" "movq %%mm2, (%1) \n\t" :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride), "m"(alpha1), "m"(beta1), "m"(ff_bone) ); } static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) { h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1); } static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) { //FIXME: could cut some load/stores by merging transpose with filter DECLARE_ALIGNED(8, uint8_t, trans)[8*4]; transpose4x4(trans, pix-2, 8, stride); transpose4x4(trans+4, pix-2+4*stride, 8, stride); h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1); transpose4x4(pix-2, trans, stride, 8); transpose4x4(pix-2+4*stride, trans+4, stride, 8); } static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { int dir; __asm__ volatile( "movq %0, %%mm7 \n" "movq %1, %%mm6 \n" ::"m"(ff_pb_1), "m"(ff_pb_3) ); if(field) __asm__ volatile( "movq %0, %%mm6 \n" ::"m"(ff_pb_3_1) ); __asm__ volatile( "movq %%mm6, %%mm5 \n" "paddb %%mm5, %%mm5 \n" :); // could do a special case for dir==0 && edges==1, but it only reduces the // average filter time by 1.2% for( dir=1; dir>=0; dir-- ) { const x86_reg d_idx = dir ? -8 : -1; const int mask_mv = dir ? mask_mv1 : mask_mv0; DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; int b_idx, edge; for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) { __asm__ volatile( "pand %0, %%mm0 \n\t" ::"m"(mask_dir) ); if(!(mask_mv & edge)) { if(bidir) { __asm__ volatile( "movd (%1,%0), %%mm2 \n" "punpckldq 40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] } "pshufw $0x44, (%1), %%mm0 \n" // { ref0[b], ref0[b] } "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] } "pshufw $0x4E, %%mm2, %%mm3 \n" "psubb %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } "psubb %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } "1: \n" "por %%mm1, %%mm0 \n" "movq (%2,%0,4), %%mm1 \n" "movq 8(%2,%0,4), %%mm2 \n" "movq %%mm1, %%mm3 \n" "movq %%mm2, %%mm4 \n" "psubw (%2), %%mm1 \n" "psubw 8(%2), %%mm2 \n" "psubw 160(%2), %%mm3 \n" "psubw 168(%2), %%mm4 \n" "packsswb %%mm2, %%mm1 \n" "packsswb %%mm4, %%mm3 \n" "paddb %%mm6, %%mm1 \n" "paddb %%mm6, %%mm3 \n" "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit "psubusb %%mm5, %%mm3 \n" "packsswb %%mm3, %%mm1 \n" "add $40, %0 \n" "cmp $40, %0 \n" "jl 1b \n" "sub $80, %0 \n" "pshufw $0x4E, %%mm1, %%mm1 \n" "por %%mm1, %%mm0 \n" "pshufw $0x4E, %%mm0, %%mm1 \n" "pminub %%mm1, %%mm0 \n" ::"r"(d_idx), "r"(ref[0]+b_idx), "r"(mv[0]+b_idx) ); } else { __asm__ volatile( "movd (%1), %%mm0 \n" "psubb (%1,%0), %%mm0 \n" // ref[b] != ref[bn] "movq (%2), %%mm1 \n" "movq 8(%2), %%mm2 \n" "psubw (%2,%0,4), %%mm1 \n" "psubw 8(%2,%0,4), %%mm2 \n" "packsswb %%mm2, %%mm1 \n" "paddb %%mm6, %%mm1 \n" "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit "packsswb %%mm1, %%mm1 \n" "por %%mm1, %%mm0 \n" ::"r"(d_idx), "r"(ref[0]+b_idx), "r"(mv[0]+b_idx) ); } } __asm__ volatile( "movd %0, %%mm1 \n" "por %1, %%mm1 \n" // nnz[b] || nnz[bn] ::"m"(nnz[b_idx]), "m"(nnz[b_idx+d_idx]) ); __asm__ volatile( "pminub %%mm7, %%mm1 \n" "pminub %%mm7, %%mm0 \n" "psllw $1, %%mm1 \n" "pxor %%mm2, %%mm2 \n" "pmaxub %%mm0, %%mm1 \n" "punpcklbw %%mm2, %%mm1 \n" "movq %%mm1, %0 \n" :"=m"(*bS[dir][edge]) ::"memory" ); } edges = 4; step = 1; } __asm__ volatile( "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm1 \n\t" "movq 16(%0), %%mm2 \n\t" "movq 24(%0), %%mm3 \n\t" TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4) "movq %%mm0, (%0) \n\t" "movq %%mm3, 8(%0) \n\t" "movq %%mm4, 16(%0) \n\t" "movq %%mm2, 24(%0) \n\t" ::"r"(bS[0]) :"memory" ); } /***********************************/ /* motion compensation */ #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\ "mov"#q" "#C", "#T" \n\t"\ "mov"#d" (%0), "#F" \n\t"\ "paddw "#D", "#T" \n\t"\ "psllw $2, "#T" \n\t"\ "psubw "#B", "#T" \n\t"\ "psubw "#E", "#T" \n\t"\ "punpcklbw "#Z", "#F" \n\t"\ "pmullw %4, "#T" \n\t"\ "paddw %5, "#A" \n\t"\ "add %2, %0 \n\t"\ "paddw "#F", "#A" \n\t"\ "paddw "#A", "#T" \n\t"\ "psraw $5, "#T" \n\t"\ "packuswb "#T", "#T" \n\t"\ OP(T, (%1), A, d)\ "add %3, %1 \n\t" #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\ "mov"#q" "#C", "#T" \n\t"\ "mov"#d" (%0), "#F" \n\t"\ "paddw "#D", "#T" \n\t"\ "psllw $2, "#T" \n\t"\ "paddw %4, "#A" \n\t"\ "psubw "#B", "#T" \n\t"\ "psubw "#E", "#T" \n\t"\ "punpcklbw "#Z", "#F" \n\t"\ "pmullw %3, "#T" \n\t"\ "paddw "#F", "#A" \n\t"\ "add %2, %0 \n\t"\ "paddw "#A", "#T" \n\t"\ "mov"#q" "#T", "#OF"(%1) \n\t" #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q) #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q) #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa) #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa) #define QPEL_H264(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=4;\ \ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq "MANGLE(ff_pw_5) ", %%mm4\n\t"\ "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\ "1: \n\t"\ "movd -1(%0), %%mm1 \n\t"\ "movd (%0), %%mm2 \n\t"\ "movd 1(%0), %%mm3 \n\t"\ "movd 2(%0), %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "paddw %%mm0, %%mm1 \n\t"\ "paddw %%mm3, %%mm2 \n\t"\ "movd -2(%0), %%mm0 \n\t"\ "movd 3(%0), %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "paddw %%mm3, %%mm0 \n\t"\ "psllw $2, %%mm2 \n\t"\ "psubw %%mm1, %%mm2 \n\t"\ "pmullw %%mm4, %%mm2 \n\t"\ "paddw %%mm5, %%mm0 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "psraw $5, %%mm0 \n\t"\ "packuswb %%mm0, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm6, d)\ "add %3, %0 \n\t"\ "add %4, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(src), "+c"(dst), "+g"(h)\ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ : "memory"\ );\ }\ static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=4;\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %0, %%mm4 \n\t"\ "movq %1, %%mm5 \n\t"\ :: "m"(ff_pw_5), "m"(ff_pw_16)\ );\ do{\ __asm__ volatile(\ "movd -1(%0), %%mm1 \n\t"\ "movd (%0), %%mm2 \n\t"\ "movd 1(%0), %%mm3 \n\t"\ "movd 2(%0), %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "paddw %%mm0, %%mm1 \n\t"\ "paddw %%mm3, %%mm2 \n\t"\ "movd -2(%0), %%mm0 \n\t"\ "movd 3(%0), %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "paddw %%mm3, %%mm0 \n\t"\ "psllw $2, %%mm2 \n\t"\ "psubw %%mm1, %%mm2 \n\t"\ "pmullw %%mm4, %%mm2 \n\t"\ "paddw %%mm5, %%mm0 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "movd (%2), %%mm3 \n\t"\ "psraw $5, %%mm0 \n\t"\ "packuswb %%mm0, %%mm0 \n\t"\ PAVGB" %%mm3, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm6, d)\ "add %4, %0 \n\t"\ "add %4, %1 \n\t"\ "add %3, %2 \n\t"\ : "+a"(src), "+c"(dst), "+d"(src2)\ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\ : "memory"\ );\ }while(--h);\ }\ static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ src -= 2*srcStride;\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm1 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm2 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm3 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm4 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ }\ static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ int h=4;\ int w=3;\ src -= 2*srcStride+2;\ while(w--){\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm1 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm2 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm3 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm4 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\ \ : "+a"(src)\ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ tmp += 4;\ src += 4 - 9*srcStride;\ }\ tmp -= 3*4;\ __asm__ volatile(\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "paddw 10(%0), %%mm0 \n\t"\ "movq 2(%0), %%mm1 \n\t"\ "paddw 8(%0), %%mm1 \n\t"\ "movq 4(%0), %%mm2 \n\t"\ "paddw 6(%0), %%mm2 \n\t"\ "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\ "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\ "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\ "paddsw %%mm2, %%mm0 \n\t"\ "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\ "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\ "psraw $6, %%mm0 \n\t"\ "packuswb %%mm0, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm7, d)\ "add $24, %0 \n\t"\ "add %3, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(tmp), "+c"(dst), "+g"(h)\ : "S"((x86_reg)dstStride)\ : "memory"\ );\ }\ \ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=8;\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq 1(%0), %%mm2 \n\t"\ "movq %%mm0, %%mm1 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpckhbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "paddw %%mm3, %%mm1 \n\t"\ "psllw $2, %%mm0 \n\t"\ "psllw $2, %%mm1 \n\t"\ "movq -1(%0), %%mm2 \n\t"\ "movq 2(%0), %%mm4 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "movq %%mm4, %%mm5 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ "punpckhbw %%mm7, %%mm5 \n\t"\ "paddw %%mm4, %%mm2 \n\t"\ "paddw %%mm3, %%mm5 \n\t"\ "psubw %%mm2, %%mm0 \n\t"\ "psubw %%mm5, %%mm1 \n\t"\ "pmullw %%mm6, %%mm0 \n\t"\ "pmullw %%mm6, %%mm1 \n\t"\ "movd -2(%0), %%mm2 \n\t"\ "movd 7(%0), %%mm5 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm5 \n\t"\ "paddw %%mm3, %%mm2 \n\t"\ "paddw %%mm5, %%mm4 \n\t"\ "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\ "paddw %%mm5, %%mm2 \n\t"\ "paddw %%mm5, %%mm4 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "paddw %%mm4, %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm5, q)\ "add %3, %0 \n\t"\ "add %4, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(src), "+c"(dst), "+g"(h)\ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ : "memory"\ );\ }\ \ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=8;\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %0, %%mm6 \n\t"\ :: "m"(ff_pw_5)\ );\ do{\ __asm__ volatile(\ "movq (%0), %%mm0 \n\t"\ "movq 1(%0), %%mm2 \n\t"\ "movq %%mm0, %%mm1 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpckhbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "paddw %%mm3, %%mm1 \n\t"\ "psllw $2, %%mm0 \n\t"\ "psllw $2, %%mm1 \n\t"\ "movq -1(%0), %%mm2 \n\t"\ "movq 2(%0), %%mm4 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "movq %%mm4, %%mm5 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ "punpckhbw %%mm7, %%mm5 \n\t"\ "paddw %%mm4, %%mm2 \n\t"\ "paddw %%mm3, %%mm5 \n\t"\ "psubw %%mm2, %%mm0 \n\t"\ "psubw %%mm5, %%mm1 \n\t"\ "pmullw %%mm6, %%mm0 \n\t"\ "pmullw %%mm6, %%mm1 \n\t"\ "movd -2(%0), %%mm2 \n\t"\ "movd 7(%0), %%mm5 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm5 \n\t"\ "paddw %%mm3, %%mm2 \n\t"\ "paddw %%mm5, %%mm4 \n\t"\ "movq %5, %%mm5 \n\t"\ "paddw %%mm5, %%mm2 \n\t"\ "paddw %%mm5, %%mm4 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "paddw %%mm4, %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "movq (%2), %%mm4 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ PAVGB" %%mm4, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm5, q)\ "add %4, %0 \n\t"\ "add %4, %1 \n\t"\ "add %3, %2 \n\t"\ : "+a"(src), "+c"(dst), "+d"(src2)\ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ "m"(ff_pw_16)\ : "memory"\ );\ }while(--h);\ }\ \ static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ int w= 2;\ src -= 2*srcStride;\ \ while(w--){\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm1 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm2 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm3 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm4 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ if(h==16){\ __asm__ volatile(\ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ }\ src += 4-(h+5)*srcStride;\ dst += 4-h*dstStride;\ }\ }\ static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\ int w = (size+8)>>2;\ src -= 2*srcStride+2;\ while(w--){\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm1 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm2 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm3 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm4 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\ QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\ QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\ : "+a"(src)\ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ if(size==16){\ __asm__ volatile(\ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\ QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\ QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\ : "+a"(src)\ : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ }\ tmp += 4;\ src += 4 - (size+5)*srcStride;\ }\ }\ static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ int w = size>>4;\ do{\ int h = size;\ __asm__ volatile(\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq 8(%0), %%mm3 \n\t"\ "movq 2(%0), %%mm1 \n\t"\ "movq 10(%0), %%mm4 \n\t"\ "paddw %%mm4, %%mm0 \n\t"\ "paddw %%mm3, %%mm1 \n\t"\ "paddw 18(%0), %%mm3 \n\t"\ "paddw 16(%0), %%mm4 \n\t"\ "movq 4(%0), %%mm2 \n\t"\ "movq 12(%0), %%mm5 \n\t"\ "paddw 6(%0), %%mm2 \n\t"\ "paddw 14(%0), %%mm5 \n\t"\ "psubw %%mm1, %%mm0 \n\t"\ "psubw %%mm4, %%mm3 \n\t"\ "psraw $2, %%mm0 \n\t"\ "psraw $2, %%mm3 \n\t"\ "psubw %%mm1, %%mm0 \n\t"\ "psubw %%mm4, %%mm3 \n\t"\ "paddsw %%mm2, %%mm0 \n\t"\ "paddsw %%mm5, %%mm3 \n\t"\ "psraw $2, %%mm0 \n\t"\ "psraw $2, %%mm3 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "paddw %%mm5, %%mm3 \n\t"\ "psraw $6, %%mm0 \n\t"\ "psraw $6, %%mm3 \n\t"\ "packuswb %%mm3, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm7, q)\ "add $48, %0 \n\t"\ "add %3, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(tmp), "+c"(dst), "+g"(h)\ : "S"((x86_reg)dstStride)\ : "memory"\ );\ tmp += 8 - size*24;\ dst += 8 - size*dstStride;\ }while(w--);\ }\ \ static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ }\ static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ }\ \ static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ }\ \ static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ src += 8*dstStride;\ dst += 8*dstStride;\ src2 += 8*src2Stride;\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ }\ \ static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\ OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ }\ static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\ }\ \ static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\ }\ \ static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ {\ __asm__ volatile(\ "movq (%1), %%mm0 \n\t"\ "movq 24(%1), %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "packuswb %%mm0, %%mm0 \n\t"\ "packuswb %%mm1, %%mm1 \n\t"\ PAVGB" (%0), %%mm0 \n\t"\ PAVGB" (%0,%3), %%mm1 \n\t"\ OP(%%mm0, (%2), %%mm4, d)\ OP(%%mm1, (%2,%4), %%mm5, d)\ "lea (%0,%3,2), %0 \n\t"\ "lea (%2,%4,2), %2 \n\t"\ "movq 48(%1), %%mm0 \n\t"\ "movq 72(%1), %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "packuswb %%mm0, %%mm0 \n\t"\ "packuswb %%mm1, %%mm1 \n\t"\ PAVGB" (%0), %%mm0 \n\t"\ PAVGB" (%0,%3), %%mm1 \n\t"\ OP(%%mm0, (%2), %%mm4, d)\ OP(%%mm1, (%2,%4), %%mm5, d)\ :"+a"(src8), "+c"(src16), "+d"(dst)\ :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\ :"memory");\ }\ static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ {\ do{\ __asm__ volatile(\ "movq (%1), %%mm0 \n\t"\ "movq 8(%1), %%mm1 \n\t"\ "movq 48(%1), %%mm2 \n\t"\ "movq 8+48(%1), %%mm3 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "psraw $5, %%mm2 \n\t"\ "psraw $5, %%mm3 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ "packuswb %%mm3, %%mm2 \n\t"\ PAVGB" (%0), %%mm0 \n\t"\ PAVGB" (%0,%3), %%mm2 \n\t"\ OP(%%mm0, (%2), %%mm5, q)\ OP(%%mm2, (%2,%4), %%mm5, q)\ ::"a"(src8), "c"(src16), "d"(dst),\ "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\ :"memory");\ src8 += 2L*src8Stride;\ src16 += 48;\ dst += 2L*dstStride;\ }while(h-=2);\ }\ static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ {\ OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ }\ #if ARCH_X86_64 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=16;\ __asm__ volatile(\ "pxor %%xmm15, %%xmm15 \n\t"\ "movdqa %6, %%xmm14 \n\t"\ "movdqa %7, %%xmm13 \n\t"\ "1: \n\t"\ "lddqu 6(%0), %%xmm1 \n\t"\ "lddqu -2(%0), %%xmm7 \n\t"\ "movdqa %%xmm1, %%xmm0 \n\t"\ "punpckhbw %%xmm15, %%xmm1 \n\t"\ "punpcklbw %%xmm15, %%xmm0 \n\t"\ "punpcklbw %%xmm15, %%xmm7 \n\t"\ "movdqa %%xmm1, %%xmm2 \n\t"\ "movdqa %%xmm0, %%xmm6 \n\t"\ "movdqa %%xmm1, %%xmm3 \n\t"\ "movdqa %%xmm0, %%xmm8 \n\t"\ "movdqa %%xmm1, %%xmm4 \n\t"\ "movdqa %%xmm0, %%xmm9 \n\t"\ "movdqa %%xmm0, %%xmm12 \n\t"\ "movdqa %%xmm1, %%xmm11 \n\t"\ "palignr $10,%%xmm0, %%xmm11\n\t"\ "palignr $10,%%xmm7, %%xmm12\n\t"\ "palignr $2, %%xmm0, %%xmm4 \n\t"\ "palignr $2, %%xmm7, %%xmm9 \n\t"\ "palignr $4, %%xmm0, %%xmm3 \n\t"\ "palignr $4, %%xmm7, %%xmm8 \n\t"\ "palignr $6, %%xmm0, %%xmm2 \n\t"\ "palignr $6, %%xmm7, %%xmm6 \n\t"\ "paddw %%xmm0 ,%%xmm11 \n\t"\ "palignr $8, %%xmm0, %%xmm1 \n\t"\ "palignr $8, %%xmm7, %%xmm0 \n\t"\ "paddw %%xmm12,%%xmm7 \n\t"\ "paddw %%xmm3, %%xmm2 \n\t"\ "paddw %%xmm8, %%xmm6 \n\t"\ "paddw %%xmm4, %%xmm1 \n\t"\ "paddw %%xmm9, %%xmm0 \n\t"\ "psllw $2, %%xmm2 \n\t"\ "psllw $2, %%xmm6 \n\t"\ "psubw %%xmm1, %%xmm2 \n\t"\ "psubw %%xmm0, %%xmm6 \n\t"\ "paddw %%xmm13,%%xmm11 \n\t"\ "paddw %%xmm13,%%xmm7 \n\t"\ "pmullw %%xmm14,%%xmm2 \n\t"\ "pmullw %%xmm14,%%xmm6 \n\t"\ "lddqu (%2), %%xmm3 \n\t"\ "paddw %%xmm11,%%xmm2 \n\t"\ "paddw %%xmm7, %%xmm6 \n\t"\ "psraw $5, %%xmm2 \n\t"\ "psraw $5, %%xmm6 \n\t"\ "packuswb %%xmm2,%%xmm6 \n\t"\ "pavgb %%xmm3, %%xmm6 \n\t"\ OP(%%xmm6, (%1), %%xmm4, dqa)\ "add %5, %0 \n\t"\ "add %5, %1 \n\t"\ "add %4, %2 \n\t"\ "decl %3 \n\t"\ "jg 1b \n\t"\ : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ } #else // ARCH_X86_64 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ src += 8*dstStride;\ dst += 8*dstStride;\ src2 += 8*src2Stride;\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ } #endif // ARCH_X86_64 #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=8;\ __asm__ volatile(\ "pxor %%xmm7, %%xmm7 \n\t"\ "movdqa %0, %%xmm6 \n\t"\ :: "m"(ff_pw_5)\ );\ do{\ __asm__ volatile(\ "lddqu -2(%0), %%xmm1 \n\t"\ "movdqa %%xmm1, %%xmm0 \n\t"\ "punpckhbw %%xmm7, %%xmm1 \n\t"\ "punpcklbw %%xmm7, %%xmm0 \n\t"\ "movdqa %%xmm1, %%xmm2 \n\t"\ "movdqa %%xmm1, %%xmm3 \n\t"\ "movdqa %%xmm1, %%xmm4 \n\t"\ "movdqa %%xmm1, %%xmm5 \n\t"\ "palignr $2, %%xmm0, %%xmm4 \n\t"\ "palignr $4, %%xmm0, %%xmm3 \n\t"\ "palignr $6, %%xmm0, %%xmm2 \n\t"\ "palignr $8, %%xmm0, %%xmm1 \n\t"\ "palignr $10,%%xmm0, %%xmm5 \n\t"\ "paddw %%xmm5, %%xmm0 \n\t"\ "paddw %%xmm3, %%xmm2 \n\t"\ "paddw %%xmm4, %%xmm1 \n\t"\ "psllw $2, %%xmm2 \n\t"\ "movq (%2), %%xmm3 \n\t"\ "psubw %%xmm1, %%xmm2 \n\t"\ "paddw %5, %%xmm0 \n\t"\ "pmullw %%xmm6, %%xmm2 \n\t"\ "paddw %%xmm0, %%xmm2 \n\t"\ "psraw $5, %%xmm2 \n\t"\ "packuswb %%xmm2, %%xmm2 \n\t"\ "pavgb %%xmm3, %%xmm2 \n\t"\ OP(%%xmm2, (%1), %%xmm4, q)\ "add %4, %0 \n\t"\ "add %4, %1 \n\t"\ "add %3, %2 \n\t"\ : "+a"(src), "+c"(dst), "+d"(src2)\ : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ "m"(ff_pw_16)\ : "memory"\ );\ }while(--h);\ }\ QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ \ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=8;\ __asm__ volatile(\ "pxor %%xmm7, %%xmm7 \n\t"\ "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\ "1: \n\t"\ "lddqu -2(%0), %%xmm1 \n\t"\ "movdqa %%xmm1, %%xmm0 \n\t"\ "punpckhbw %%xmm7, %%xmm1 \n\t"\ "punpcklbw %%xmm7, %%xmm0 \n\t"\ "movdqa %%xmm1, %%xmm2 \n\t"\ "movdqa %%xmm1, %%xmm3 \n\t"\ "movdqa %%xmm1, %%xmm4 \n\t"\ "movdqa %%xmm1, %%xmm5 \n\t"\ "palignr $2, %%xmm0, %%xmm4 \n\t"\ "palignr $4, %%xmm0, %%xmm3 \n\t"\ "palignr $6, %%xmm0, %%xmm2 \n\t"\ "palignr $8, %%xmm0, %%xmm1 \n\t"\ "palignr $10,%%xmm0, %%xmm5 \n\t"\ "paddw %%xmm5, %%xmm0 \n\t"\ "paddw %%xmm3, %%xmm2 \n\t"\ "paddw %%xmm4, %%xmm1 \n\t"\ "psllw $2, %%xmm2 \n\t"\ "psubw %%xmm1, %%xmm2 \n\t"\ "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\ "pmullw %%xmm6, %%xmm2 \n\t"\ "paddw %%xmm0, %%xmm2 \n\t"\ "psraw $5, %%xmm2 \n\t"\ "packuswb %%xmm2, %%xmm2 \n\t"\ OP(%%xmm2, (%1), %%xmm4, q)\ "add %3, %0 \n\t"\ "add %4, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(src), "+c"(dst), "+g"(h)\ : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ : "memory"\ );\ }\ static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ }\ #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ src -= 2*srcStride;\ \ __asm__ volatile(\ "pxor %%xmm7, %%xmm7 \n\t"\ "movq (%0), %%xmm0 \n\t"\ "add %2, %0 \n\t"\ "movq (%0), %%xmm1 \n\t"\ "add %2, %0 \n\t"\ "movq (%0), %%xmm2 \n\t"\ "add %2, %0 \n\t"\ "movq (%0), %%xmm3 \n\t"\ "add %2, %0 \n\t"\ "movq (%0), %%xmm4 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%xmm7, %%xmm0 \n\t"\ "punpcklbw %%xmm7, %%xmm1 \n\t"\ "punpcklbw %%xmm7, %%xmm2 \n\t"\ "punpcklbw %%xmm7, %%xmm3 \n\t"\ "punpcklbw %%xmm7, %%xmm4 \n\t"\ QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\ QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ if(h==16){\ __asm__ volatile(\ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\ QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ }\ }\ static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ }\ static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ } static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){ int w = (size+8)>>3; src -= 2*srcStride+2; while(w--){ __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "movq (%0), %%xmm0 \n\t" "add %2, %0 \n\t" "movq (%0), %%xmm1 \n\t" "add %2, %0 \n\t" "movq (%0), %%xmm2 \n\t" "add %2, %0 \n\t" "movq (%0), %%xmm3 \n\t" "add %2, %0 \n\t" "movq (%0), %%xmm4 \n\t" "add %2, %0 \n\t" "punpcklbw %%xmm7, %%xmm0 \n\t" "punpcklbw %%xmm7, %%xmm1 \n\t" "punpcklbw %%xmm7, %%xmm2 \n\t" "punpcklbw %%xmm7, %%xmm3 \n\t" "punpcklbw %%xmm7, %%xmm4 \n\t" QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48) QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48) QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48) QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48) QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48) QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48) QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48) QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48) : "+a"(src) : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) : "memory" ); if(size==16){ __asm__ volatile( QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48) QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48) QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48) QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48) QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48) QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48) QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48) QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48) : "+a"(src) : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) : "memory" ); } tmp += 8; src += 8 - (size+5)*srcStride; } } #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\ static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ int h = size;\ if(size == 16){\ __asm__ volatile(\ "1: \n\t"\ "movdqa 32(%0), %%xmm4 \n\t"\ "movdqa 16(%0), %%xmm5 \n\t"\ "movdqa (%0), %%xmm7 \n\t"\ "movdqa %%xmm4, %%xmm3 \n\t"\ "movdqa %%xmm4, %%xmm2 \n\t"\ "movdqa %%xmm4, %%xmm1 \n\t"\ "movdqa %%xmm4, %%xmm0 \n\t"\ "palignr $10, %%xmm5, %%xmm0 \n\t"\ "palignr $8, %%xmm5, %%xmm1 \n\t"\ "palignr $6, %%xmm5, %%xmm2 \n\t"\ "palignr $4, %%xmm5, %%xmm3 \n\t"\ "palignr $2, %%xmm5, %%xmm4 \n\t"\ "paddw %%xmm5, %%xmm0 \n\t"\ "paddw %%xmm4, %%xmm1 \n\t"\ "paddw %%xmm3, %%xmm2 \n\t"\ "movdqa %%xmm5, %%xmm6 \n\t"\ "movdqa %%xmm5, %%xmm4 \n\t"\ "movdqa %%xmm5, %%xmm3 \n\t"\ "palignr $8, %%xmm7, %%xmm4 \n\t"\ "palignr $2, %%xmm7, %%xmm6 \n\t"\ "palignr $10, %%xmm7, %%xmm3 \n\t"\ "paddw %%xmm6, %%xmm4 \n\t"\ "movdqa %%xmm5, %%xmm6 \n\t"\ "palignr $6, %%xmm7, %%xmm5 \n\t"\ "palignr $4, %%xmm7, %%xmm6 \n\t"\ "paddw %%xmm7, %%xmm3 \n\t"\ "paddw %%xmm6, %%xmm5 \n\t"\ \ "psubw %%xmm1, %%xmm0 \n\t"\ "psubw %%xmm4, %%xmm3 \n\t"\ "psraw $2, %%xmm0 \n\t"\ "psraw $2, %%xmm3 \n\t"\ "psubw %%xmm1, %%xmm0 \n\t"\ "psubw %%xmm4, %%xmm3 \n\t"\ "paddw %%xmm2, %%xmm0 \n\t"\ "paddw %%xmm5, %%xmm3 \n\t"\ "psraw $2, %%xmm0 \n\t"\ "psraw $2, %%xmm3 \n\t"\ "paddw %%xmm2, %%xmm0 \n\t"\ "paddw %%xmm5, %%xmm3 \n\t"\ "psraw $6, %%xmm0 \n\t"\ "psraw $6, %%xmm3 \n\t"\ "packuswb %%xmm0, %%xmm3 \n\t"\ OP(%%xmm3, (%1), %%xmm7, dqa)\ "add $48, %0 \n\t"\ "add %3, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(tmp), "+c"(dst), "+g"(h)\ : "S"((x86_reg)dstStride)\ : "memory"\ );\ }else{\ __asm__ volatile(\ "1: \n\t"\ "movdqa 16(%0), %%xmm1 \n\t"\ "movdqa (%0), %%xmm0 \n\t"\ "movdqa %%xmm1, %%xmm2 \n\t"\ "movdqa %%xmm1, %%xmm3 \n\t"\ "movdqa %%xmm1, %%xmm4 \n\t"\ "movdqa %%xmm1, %%xmm5 \n\t"\ "palignr $10, %%xmm0, %%xmm5 \n\t"\ "palignr $8, %%xmm0, %%xmm4 \n\t"\ "palignr $6, %%xmm0, %%xmm3 \n\t"\ "palignr $4, %%xmm0, %%xmm2 \n\t"\ "palignr $2, %%xmm0, %%xmm1 \n\t"\ "paddw %%xmm5, %%xmm0 \n\t"\ "paddw %%xmm4, %%xmm1 \n\t"\ "paddw %%xmm3, %%xmm2 \n\t"\ "psubw %%xmm1, %%xmm0 \n\t"\ "psraw $2, %%xmm0 \n\t"\ "psubw %%xmm1, %%xmm0 \n\t"\ "paddw %%xmm2, %%xmm0 \n\t"\ "psraw $2, %%xmm0 \n\t"\ "paddw %%xmm2, %%xmm0 \n\t"\ "psraw $6, %%xmm0 \n\t"\ "packuswb %%xmm0, %%xmm0 \n\t"\ OP(%%xmm0, (%1), %%xmm7, q)\ "add $48, %0 \n\t"\ "add %3, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(tmp), "+c"(dst), "+g"(h)\ : "S"((x86_reg)dstStride)\ : "memory"\ );\ }\ } #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\ OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ }\ static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\ }\ static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\ }\ #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2 #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2 #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2 #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2 #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2 #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2 #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2 #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2 #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2 #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2 #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2 #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2 #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2 #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2 #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2 #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2 #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2 #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2 #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2 #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2 #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2 #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2 #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2 #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2 #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2 #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2 #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \ H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\ H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\ H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\ H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\ static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){ put_pixels16_sse2(dst, src, stride, 16); } static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){ avg_pixels16_sse2(dst, src, stride, 16); } #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2 #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2 #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\ }\ #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \ static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\ }\ #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \ static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\ }\ #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \ static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ uint8_t * const halfHV= temp;\ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ assert(((int)temp & 7) == 0);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ uint8_t * const halfHV= temp;\ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ assert(((int)temp & 7) == 0);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ uint8_t * const halfHV= temp;\ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ assert(((int)temp & 7) == 0);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ uint8_t * const halfHV= temp;\ int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ assert(((int)temp & 7) == 0);\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\ }\ #define H264_MC_4816(MMX)\ H264_MC(put_, 4, MMX, 8)\ H264_MC(put_, 8, MMX, 8)\ H264_MC(put_, 16,MMX, 8)\ H264_MC(avg_, 4, MMX, 8)\ H264_MC(avg_, 8, MMX, 8)\ H264_MC(avg_, 16,MMX, 8)\ #define H264_MC_816(QPEL, XMM)\ QPEL(put_, 8, XMM, 16)\ QPEL(put_, 16,XMM, 16)\ QPEL(avg_, 8, XMM, 16)\ QPEL(avg_, 16,XMM, 16)\ #define AVG_3DNOW_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgusb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" #define AVG_MMX2_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" #define PAVGB "pavgusb" QPEL_H264(put_, PUT_OP, 3dnow) QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow) #undef PAVGB #define PAVGB "pavgb" QPEL_H264(put_, PUT_OP, mmx2) QPEL_H264(avg_, AVG_MMX2_OP, mmx2) QPEL_H264_V_XMM(put_, PUT_OP, sse2) QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2) QPEL_H264_HV_XMM(put_, PUT_OP, sse2) QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2) #if HAVE_SSSE3 QPEL_H264_H_XMM(put_, PUT_OP, ssse3) QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3) QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3) QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3) QPEL_H264_HV_XMM(put_, PUT_OP, ssse3) QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3) #endif #undef PAVGB H264_MC_4816(3dnow) H264_MC_4816(mmx2) H264_MC_816(H264_MC_V, sse2) H264_MC_816(H264_MC_HV, sse2) #if HAVE_SSSE3 H264_MC_816(H264_MC_H, ssse3) H264_MC_816(H264_MC_HV, ssse3) #endif /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL }; #define H264_CHROMA_OP(S,D) #define H264_CHROMA_OP4(S,D,T) #define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx #define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx #include "dsputil_h264_template_mmx.c" static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg); } static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2); } static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg); } #undef H264_CHROMA_OP #undef H264_CHROMA_OP4 #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL #undef H264_CHROMA_MC2_TMPL #undef H264_CHROMA_MC8_MV0 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ "pavgb " #T ", " #D " \n\t" #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 #include "dsputil_h264_template_mmx.c" static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); } static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2); } static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); } #undef H264_CHROMA_OP #undef H264_CHROMA_OP4 #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL #undef H264_CHROMA_MC2_TMPL #undef H264_CHROMA_MC8_MV0 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ "pavgusb " #T ", " #D " \n\t" #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow #include "dsputil_h264_template_mmx.c" static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); } static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); } #undef H264_CHROMA_OP #undef H264_CHROMA_OP4 #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL #undef H264_CHROMA_MC8_MV0 #if HAVE_SSSE3 #define AVG_OP(X) #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx #include "dsputil_h264_template_ssse3.c" static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); } static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); } #undef AVG_OP #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL #undef H264_CHROMA_MC8_MV0 #define AVG_OP(X) X #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 #include "dsputil_h264_template_ssse3.c" static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); } static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); } #undef AVG_OP #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL #undef H264_CHROMA_MC8_MV0 #endif /***********************************/ /* weighted prediction */ static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h) { int x, y; offset <<= log2_denom; offset += (1 << log2_denom) >> 1; __asm__ volatile( "movd %0, %%mm4 \n\t" "movd %1, %%mm5 \n\t" "movd %2, %%mm6 \n\t" "pshufw $0, %%mm4, %%mm4 \n\t" "pshufw $0, %%mm5, %%mm5 \n\t" "pxor %%mm7, %%mm7 \n\t" :: "g"(weight), "g"(offset), "g"(log2_denom) ); for(y=0; y<h; y+=2){ for(x=0; x<w; x+=4){ __asm__ volatile( "movd %0, %%mm0 \n\t" "movd %1, %%mm1 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "pmullw %%mm4, %%mm0 \n\t" "pmullw %%mm4, %%mm1 \n\t" "paddsw %%mm5, %%mm0 \n\t" "paddsw %%mm5, %%mm1 \n\t" "psraw %%mm6, %%mm0 \n\t" "psraw %%mm6, %%mm1 \n\t" "packuswb %%mm7, %%mm0 \n\t" "packuswb %%mm7, %%mm1 \n\t" "movd %%mm0, %0 \n\t" "movd %%mm1, %1 \n\t" : "+m"(*(uint32_t*)(dst+x)), "+m"(*(uint32_t*)(dst+x+stride)) ); } dst += 2*stride; } } static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h) { int x, y; offset = ((offset + 1) | 1) << log2_denom; __asm__ volatile( "movd %0, %%mm3 \n\t" "movd %1, %%mm4 \n\t" "movd %2, %%mm5 \n\t" "movd %3, %%mm6 \n\t" "pshufw $0, %%mm3, %%mm3 \n\t" "pshufw $0, %%mm4, %%mm4 \n\t" "pshufw $0, %%mm5, %%mm5 \n\t" "pxor %%mm7, %%mm7 \n\t" :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1) ); for(y=0; y<h; y++){ for(x=0; x<w; x+=4){ __asm__ volatile( "movd %0, %%mm0 \n\t" "movd %1, %%mm1 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "pmullw %%mm3, %%mm0 \n\t" "pmullw %%mm4, %%mm1 \n\t" "paddsw %%mm1, %%mm0 \n\t" "paddsw %%mm5, %%mm0 \n\t" "psraw %%mm6, %%mm0 \n\t" "packuswb %%mm0, %%mm0 \n\t" "movd %%mm0, %0 \n\t" : "+m"(*(uint32_t*)(dst+x)) : "m"(*(uint32_t*)(src+x)) ); } src += stride; dst += stride; } } #define H264_WEIGHT(W,H) \ static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ } \ static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \ ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \ } H264_WEIGHT(16,16) H264_WEIGHT(16, 8) H264_WEIGHT( 8,16) H264_WEIGHT( 8, 8) H264_WEIGHT( 8, 4) H264_WEIGHT( 4, 8) H264_WEIGHT( 4, 4) H264_WEIGHT( 4, 2)
123linslouis-android-video-cutter
jni/libavcodec/x86/h264dsp_mmx.c
C
asf20
95,736
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_FFT_H #define AVCODEC_X86_FFT_H #include "libavcodec/fft.h" void ff_fft_permute_sse(FFTContext *s, FFTComplex *z); void ff_fft_calc_sse(FFTContext *s, FFTComplex *z); void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z); void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z); void ff_imdct_calc_3dn(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_half_3dn(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_calc_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_half_3dn2(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input); void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input); #endif
123linslouis-android-video-cutter
jni/libavcodec/x86/fft.h
C
asf20
1,561
/* * MMX optimized DSP utils * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_DSPUTIL_MMX_H #define AVCODEC_X86_DSPUTIL_MMX_H #include <stdint.h> #include "libavcodec/dsputil.h" typedef struct { uint64_t a, b; } xmm_reg; extern const uint64_t ff_bone; extern const uint64_t ff_wtwo; extern const uint64_t ff_pdw_80000000[2]; extern const uint64_t ff_pw_3; extern const uint64_t ff_pw_4; extern const xmm_reg ff_pw_5; extern const xmm_reg ff_pw_8; extern const uint64_t ff_pw_15; extern const xmm_reg ff_pw_16; extern const uint64_t ff_pw_20; extern const xmm_reg ff_pw_28; extern const xmm_reg ff_pw_32; extern const uint64_t ff_pw_42; extern const xmm_reg ff_pw_64; extern const uint64_t ff_pw_96; extern const uint64_t ff_pw_128; extern const uint64_t ff_pw_255; extern const uint64_t ff_pb_1; extern const uint64_t ff_pb_3; extern const uint64_t ff_pb_7; extern const uint64_t ff_pb_1F; extern const uint64_t ff_pb_3F; extern const uint64_t ff_pb_81; extern const uint64_t ff_pb_A1; extern const uint64_t ff_pb_FC; extern const double ff_pd_1[2]; extern const double ff_pd_2[2]; #define LOAD4(stride,in,a,b,c,d)\ "movq 0*"#stride"+"#in", "#a"\n\t"\ "movq 1*"#stride"+"#in", "#b"\n\t"\ "movq 2*"#stride"+"#in", "#c"\n\t"\ "movq 3*"#stride"+"#in", "#d"\n\t" #define STORE4(stride,out,a,b,c,d)\ "movq "#a", 0*"#stride"+"#out"\n\t"\ "movq "#b", 1*"#stride"+"#out"\n\t"\ "movq "#c", 2*"#stride"+"#out"\n\t"\ "movq "#d", 3*"#stride"+"#out"\n\t" /* in/out: mma=mma+mmb, mmb=mmb-mma */ #define SUMSUB_BA( a, b ) \ "paddw "#b", "#a" \n\t"\ "paddw "#b", "#b" \n\t"\ "psubw "#a", "#b" \n\t" #define SBUTTERFLY(a,b,t,n,m)\ "mov" #m " " #a ", " #t " \n\t" /* abcd */\ "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ #define TRANSPOSE4(a,b,c,d,t)\ SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\ SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\ SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\ SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */ // e,f,g,h can be memory // out: a,d,t,c #define TRANSPOSE8x4(a,b,c,d,e,f,g,h,t)\ "punpcklbw " #e ", " #a " \n\t" /* a0 e0 a1 e1 a2 e2 a3 e3 */\ "punpcklbw " #f ", " #b " \n\t" /* b0 f0 b1 f1 b2 f2 b3 f3 */\ "punpcklbw " #g ", " #c " \n\t" /* c0 g0 c1 g1 c2 g2 d3 g3 */\ "punpcklbw " #h ", " #d " \n\t" /* d0 h0 d1 h1 d2 h2 d3 h3 */\ SBUTTERFLY(a, b, t, bw, q) /* a= a0 b0 e0 f0 a1 b1 e1 f1 */\ /* t= a2 b2 e2 f2 a3 b3 e3 f3 */\ SBUTTERFLY(c, d, b, bw, q) /* c= c0 d0 g0 h0 c1 d1 g1 h1 */\ /* b= c2 d2 g2 h2 c3 d3 g3 h3 */\ SBUTTERFLY(a, c, d, wd, q) /* a= a0 b0 c0 d0 e0 f0 g0 h0 */\ /* d= a1 b1 c1 d1 e1 f1 g1 h1 */\ SBUTTERFLY(t, b, c, wd, q) /* t= a2 b2 c2 d2 e2 f2 g2 h2 */\ /* c= a3 b3 c3 d3 e3 f3 g3 h3 */ #if ARCH_X86_64 // permutes 01234567 -> 05736421 #define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\ SBUTTERFLY(a,b,%%xmm8,wd,dqa)\ SBUTTERFLY(c,d,b,wd,dqa)\ SBUTTERFLY(e,f,d,wd,dqa)\ SBUTTERFLY(g,h,f,wd,dqa)\ SBUTTERFLY(a,c,h,dq,dqa)\ SBUTTERFLY(%%xmm8,b,c,dq,dqa)\ SBUTTERFLY(e,g,b,dq,dqa)\ SBUTTERFLY(d,f,g,dq,dqa)\ SBUTTERFLY(a,e,f,qdq,dqa)\ SBUTTERFLY(%%xmm8,d,e,qdq,dqa)\ SBUTTERFLY(h,b,d,qdq,dqa)\ SBUTTERFLY(c,g,b,qdq,dqa)\ "movdqa %%xmm8, "#g" \n\t" #else #define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\ "movdqa "#h", "#t" \n\t"\ SBUTTERFLY(a,b,h,wd,dqa)\ "movdqa "#h", 16"#t" \n\t"\ "movdqa "#t", "#h" \n\t"\ SBUTTERFLY(c,d,b,wd,dqa)\ SBUTTERFLY(e,f,d,wd,dqa)\ SBUTTERFLY(g,h,f,wd,dqa)\ SBUTTERFLY(a,c,h,dq,dqa)\ "movdqa "#h", "#t" \n\t"\ "movdqa 16"#t", "#h" \n\t"\ SBUTTERFLY(h,b,c,dq,dqa)\ SBUTTERFLY(e,g,b,dq,dqa)\ SBUTTERFLY(d,f,g,dq,dqa)\ SBUTTERFLY(a,e,f,qdq,dqa)\ SBUTTERFLY(h,d,e,qdq,dqa)\ "movdqa "#h", 16"#t" \n\t"\ "movdqa "#t", "#h" \n\t"\ SBUTTERFLY(h,b,d,qdq,dqa)\ SBUTTERFLY(c,g,b,qdq,dqa)\ "movdqa 16"#t", "#g" \n\t" #endif #define MOVQ_WONE(regd) \ __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ "psrlw $15, %%" #regd ::) void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx); void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx); void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx); void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx); void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd); void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd); void ff_lpc_compute_autocorr_sse2(const int32_t *data, int len, int lag, double *autoc); void ff_mmx_idct(DCTELEM *block); void ff_mmxext_idct(DCTELEM *block); #endif /* AVCODEC_X86_DSPUTIL_MMX_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_mmx.h
C
asf20
6,456
/* * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 part10 codec. * non-MMX i386-specific optimizations for H.264 * @author Michael Niedermayer <michaelni@gmx.at> */ #ifndef AVCODEC_X86_H264_I386_H #define AVCODEC_X86_H264_I386_H #include "libavcodec/cabac.h" //FIXME use some macros to avoid duplicating get_cabac (cannot be done yet //as that would make optimization work hard) #if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE && !defined(BROKEN_RELOCATIONS) static int decode_significance_x86(CABACContext *c, int max_coeff, uint8_t *significant_coeff_ctx_base, int *index){ void *end= significant_coeff_ctx_base + max_coeff - 1; int minusstart= -(int)significant_coeff_ctx_base; int minusindex= 4-(int)index; int coeff_count; __asm__ volatile( "movl "RANGE "(%3), %%esi \n\t" "movl "LOW "(%3), %%ebx \n\t" "2: \n\t" BRANCHLESS_GET_CABAC("%%edx", "%3", "(%1)", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") "test $1, %%edx \n\t" " jz 3f \n\t" BRANCHLESS_GET_CABAC("%%edx", "%3", "61(%1)", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") "mov %2, %%"REG_a" \n\t" "movl %4, %%ecx \n\t" "add %1, %%"REG_c" \n\t" "movl %%ecx, (%%"REG_a") \n\t" "test $1, %%edx \n\t" " jnz 4f \n\t" "add $4, %%"REG_a" \n\t" "mov %%"REG_a", %2 \n\t" "3: \n\t" "add $1, %1 \n\t" "cmp %5, %1 \n\t" " jb 2b \n\t" "mov %2, %%"REG_a" \n\t" "movl %4, %%ecx \n\t" "add %1, %%"REG_c" \n\t" "movl %%ecx, (%%"REG_a") \n\t" "4: \n\t" "add %6, %%eax \n\t" "shr $2, %%eax \n\t" "movl %%esi, "RANGE "(%3) \n\t" "movl %%ebx, "LOW "(%3) \n\t" :"=&a"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index) :"r"(c), "m"(minusstart), "m"(end), "m"(minusindex) : "%"REG_c, "%ebx", "%edx", "%esi", "memory" ); return coeff_count; } static int decode_significance_8x8_x86(CABACContext *c, uint8_t *significant_coeff_ctx_base, int *index, const uint8_t *sig_off){ int minusindex= 4-(int)index; int coeff_count; x86_reg last=0; __asm__ volatile( "movl "RANGE "(%3), %%esi \n\t" "movl "LOW "(%3), %%ebx \n\t" "mov %1, %%"REG_D" \n\t" "2: \n\t" "mov %6, %%"REG_a" \n\t" "movzbl (%%"REG_a", %%"REG_D"), %%edi \n\t" "add %5, %%"REG_D" \n\t" BRANCHLESS_GET_CABAC("%%edx", "%3", "(%%"REG_D")", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") "mov %1, %%edi \n\t" "test $1, %%edx \n\t" " jz 3f \n\t" "movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%%edi), %%edi\n\t" "add %5, %%"REG_D" \n\t" BRANCHLESS_GET_CABAC("%%edx", "%3", "15(%%"REG_D")", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") "mov %2, %%"REG_a" \n\t" "mov %1, %%edi \n\t" "movl %%edi, (%%"REG_a") \n\t" "test $1, %%edx \n\t" " jnz 4f \n\t" "add $4, %%"REG_a" \n\t" "mov %%"REG_a", %2 \n\t" "3: \n\t" "addl $1, %%edi \n\t" "mov %%edi, %1 \n\t" "cmpl $63, %%edi \n\t" " jb 2b \n\t" "mov %2, %%"REG_a" \n\t" "movl %%edi, (%%"REG_a") \n\t" "4: \n\t" "addl %4, %%eax \n\t" "shr $2, %%eax \n\t" "movl %%esi, "RANGE "(%3) \n\t" "movl %%ebx, "LOW "(%3) \n\t" :"=&a"(coeff_count),"+m"(last), "+m"(index) :"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off) : "%"REG_c, "%ebx", "%edx", "%esi", "%"REG_D, "memory" ); return coeff_count; } #endif /* ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE */ /* !defined(BROKEN_RELOCATIONS) */ #endif /* AVCODEC_X86_H264_I386_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/h264_i386.h
C
asf20
6,231
/* * VC-1 and WMV3 - DSP functions MMX-optimized * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #define OP_PUT(S,D) #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t" /** Add rounder from mm7 to mm3 and pack result at destination */ #define NORMALIZE_MMX(SHIFT) \ "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \ "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \ "psraw "SHIFT", %%mm3 \n\t" \ "psraw "SHIFT", %%mm4 \n\t" #define TRANSFER_DO_PACK(OP) \ "packuswb %%mm4, %%mm3 \n\t" \ OP((%2), %%mm3) \ "movq %%mm3, (%2) \n\t" #define TRANSFER_DONT_PACK(OP) \ OP(0(%2), %%mm3) \ OP(8(%2), %%mm4) \ "movq %%mm3, 0(%2) \n\t" \ "movq %%mm4, 8(%2) \n\t" /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */ #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t" #define DONT_UNPACK(reg) /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */ #define LOAD_ROUNDER_MMX(ROUND) \ "movd "ROUND", %%mm7 \n\t" \ "punpcklwd %%mm7, %%mm7 \n\t" \ "punpckldq %%mm7, %%mm7 \n\t" #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \ "paddw %%mm"#R2", %%mm"#R1" \n\t" \ "movd (%0,%3), %%mm"#R0" \n\t" \ "pmullw %%mm6, %%mm"#R1" \n\t" \ "punpcklbw %%mm0, %%mm"#R0" \n\t" \ "movd (%0,%2), %%mm"#R3" \n\t" \ "psubw %%mm"#R0", %%mm"#R1" \n\t" \ "punpcklbw %%mm0, %%mm"#R3" \n\t" \ "paddw %%mm7, %%mm"#R1" \n\t" \ "psubw %%mm"#R3", %%mm"#R1" \n\t" \ "psraw %4, %%mm"#R1" \n\t" \ "movq %%mm"#R1", "#OFF"(%1) \n\t" \ "add %2, %0 \n\t" DECLARE_ALIGNED(16, const uint64_t, ff_pw_9) = 0x0009000900090009ULL; /** Sacrifying mm6 allows to pipeline loads from src */ static void vc1_put_ver_16b_shift2_mmx(int16_t *dst, const uint8_t *src, x86_reg stride, int rnd, int64_t shift) { __asm__ volatile( "mov $3, %%"REG_c" \n\t" LOAD_ROUNDER_MMX("%5") "movq "MANGLE(ff_pw_9)", %%mm6 \n\t" "1: \n\t" "movd (%0), %%mm2 \n\t" "add %2, %0 \n\t" "movd (%0), %%mm3 \n\t" "punpcklbw %%mm0, %%mm2 \n\t" "punpcklbw %%mm0, %%mm3 \n\t" SHIFT2_LINE( 0, 1, 2, 3, 4) SHIFT2_LINE( 24, 2, 3, 4, 1) SHIFT2_LINE( 48, 3, 4, 1, 2) SHIFT2_LINE( 72, 4, 1, 2, 3) SHIFT2_LINE( 96, 1, 2, 3, 4) SHIFT2_LINE(120, 2, 3, 4, 1) SHIFT2_LINE(144, 3, 4, 1, 2) SHIFT2_LINE(168, 4, 1, 2, 3) "sub %6, %0 \n\t" "add $8, %1 \n\t" "dec %%"REG_c" \n\t" "jnz 1b \n\t" : "+r"(src), "+r"(dst) : "r"(stride), "r"(-2*stride), "m"(shift), "m"(rnd), "r"(9*stride-4) : "%"REG_c, "memory" ); } /** * Data is already unpacked, so some operations can directly be made from * memory. */ #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\ static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\ const int16_t *src, int rnd)\ {\ int h = 8;\ \ src -= 1;\ rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\ __asm__ volatile(\ LOAD_ROUNDER_MMX("%4")\ "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\ "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\ "1: \n\t"\ "movq 2*0+0(%1), %%mm1 \n\t"\ "movq 2*0+8(%1), %%mm2 \n\t"\ "movq 2*1+0(%1), %%mm3 \n\t"\ "movq 2*1+8(%1), %%mm4 \n\t"\ "paddw 2*3+0(%1), %%mm1 \n\t"\ "paddw 2*3+8(%1), %%mm2 \n\t"\ "paddw 2*2+0(%1), %%mm3 \n\t"\ "paddw 2*2+8(%1), %%mm4 \n\t"\ "pmullw %%mm5, %%mm3 \n\t"\ "pmullw %%mm5, %%mm4 \n\t"\ "psubw %%mm1, %%mm3 \n\t"\ "psubw %%mm2, %%mm4 \n\t"\ NORMALIZE_MMX("$7")\ /* Remove bias */\ "paddw %%mm6, %%mm3 \n\t"\ "paddw %%mm6, %%mm4 \n\t"\ TRANSFER_DO_PACK(OP)\ "add $24, %1 \n\t"\ "add %3, %2 \n\t"\ "decl %0 \n\t"\ "jnz 1b \n\t"\ : "+r"(h), "+r" (src), "+r" (dst)\ : "r"(stride), "m"(rnd)\ : "memory"\ );\ } VC1_HOR_16b_SHIFT2(OP_PUT, put_) VC1_HOR_16b_SHIFT2(OP_AVG, avg_) /** * Purely vertical or horizontal 1/2 shift interpolation. * Sacrify mm6 for *9 factor. */ #define VC1_SHIFT2(OP, OPNAME)\ static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\ x86_reg stride, int rnd, x86_reg offset)\ {\ rnd = 8-rnd;\ __asm__ volatile(\ "mov $8, %%"REG_c" \n\t"\ LOAD_ROUNDER_MMX("%5")\ "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\ "1: \n\t"\ "movd 0(%0 ), %%mm3 \n\t"\ "movd 4(%0 ), %%mm4 \n\t"\ "movd 0(%0,%2), %%mm1 \n\t"\ "movd 4(%0,%2), %%mm2 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm0, %%mm3 \n\t"\ "punpcklbw %%mm0, %%mm4 \n\t"\ "punpcklbw %%mm0, %%mm1 \n\t"\ "punpcklbw %%mm0, %%mm2 \n\t"\ "paddw %%mm1, %%mm3 \n\t"\ "paddw %%mm2, %%mm4 \n\t"\ "movd 0(%0,%3), %%mm1 \n\t"\ "movd 4(%0,%3), %%mm2 \n\t"\ "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\ "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\ "punpcklbw %%mm0, %%mm1 \n\t"\ "punpcklbw %%mm0, %%mm2 \n\t"\ "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\ "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\ "movd 0(%0,%2), %%mm1 \n\t"\ "movd 4(%0,%2), %%mm2 \n\t"\ "punpcklbw %%mm0, %%mm1 \n\t"\ "punpcklbw %%mm0, %%mm2 \n\t"\ "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\ "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\ NORMALIZE_MMX("$4")\ "packuswb %%mm4, %%mm3 \n\t"\ OP((%1), %%mm3)\ "movq %%mm3, (%1) \n\t"\ "add %6, %0 \n\t"\ "add %4, %1 \n\t"\ "dec %%"REG_c" \n\t"\ "jnz 1b \n\t"\ : "+r"(src), "+r"(dst)\ : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\ "g"(stride-offset)\ : "%"REG_c, "memory"\ );\ } VC1_SHIFT2(OP_PUT, put_) VC1_SHIFT2(OP_AVG, avg_) /** * Filter coefficients made global to allow access by all 1 or 3 quarter shift * interpolation functions. */ DECLARE_ASM_CONST(16, uint64_t, ff_pw_53) = 0x0035003500350035ULL; DECLARE_ASM_CONST(16, uint64_t, ff_pw_18) = 0x0012001200120012ULL; /** * Core of the 1/4 and 3/4 shift bicubic interpolation. * * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty). * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked. * @param A1 Address of 1st tap (beware of unpacked/packed). * @param A2 Address of 2nd tap * @param A3 Address of 3rd tap * @param A4 Address of 4th tap */ #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \ MOVQ "*0+"A1", %%mm1 \n\t" \ MOVQ "*4+"A1", %%mm2 \n\t" \ UNPACK("%%mm1") \ UNPACK("%%mm2") \ "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \ "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \ MOVQ "*0+"A2", %%mm3 \n\t" \ MOVQ "*4+"A2", %%mm4 \n\t" \ UNPACK("%%mm3") \ UNPACK("%%mm4") \ "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \ "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \ "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \ "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \ MOVQ "*0+"A4", %%mm1 \n\t" \ MOVQ "*4+"A4", %%mm2 \n\t" \ UNPACK("%%mm1") \ UNPACK("%%mm2") \ "psllw $2, %%mm1 \n\t" /* 4* */ \ "psllw $2, %%mm2 \n\t" /* 4* */ \ "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \ "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \ MOVQ "*0+"A3", %%mm1 \n\t" \ MOVQ "*4+"A3", %%mm2 \n\t" \ UNPACK("%%mm1") \ UNPACK("%%mm2") \ "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \ "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \ "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \ "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */ /** * Macro to build the vertical 16bits version of vc1_put_shift[13]. * Here, offset=src_stride. Parameters passed A1 to A4 must use * %3 (src_stride) and %4 (3*src_stride). * * @param NAME Either 1 or 3 * @see MSPEL_FILTER13_CORE for information on A1->A4 */ #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \ static void \ vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \ x86_reg src_stride, \ int rnd, int64_t shift) \ { \ int h = 8; \ src -= src_stride; \ __asm__ volatile( \ LOAD_ROUNDER_MMX("%5") \ "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \ "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \ ASMALIGN(3) \ "1: \n\t" \ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \ NORMALIZE_MMX("%6") \ TRANSFER_DONT_PACK(OP_PUT) \ /* Last 3 (in fact 4) bytes on the line */ \ "movd 8+"A1", %%mm1 \n\t" \ DO_UNPACK("%%mm1") \ "movq %%mm1, %%mm3 \n\t" \ "paddw %%mm1, %%mm1 \n\t" \ "paddw %%mm3, %%mm1 \n\t" /* 3* */ \ "movd 8+"A2", %%mm3 \n\t" \ DO_UNPACK("%%mm3") \ "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \ "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \ "movd 8+"A3", %%mm1 \n\t" \ DO_UNPACK("%%mm1") \ "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \ "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \ "movd 8+"A4", %%mm1 \n\t" \ DO_UNPACK("%%mm1") \ "psllw $2, %%mm1 \n\t" /* 4* */ \ "psubw %%mm1, %%mm3 \n\t" \ "paddw %%mm7, %%mm3 \n\t" \ "psraw %6, %%mm3 \n\t" \ "movq %%mm3, 16(%2) \n\t" \ "add %3, %1 \n\t" \ "add $24, %2 \n\t" \ "decl %0 \n\t" \ "jnz 1b \n\t" \ : "+r"(h), "+r" (src), "+r" (dst) \ : "r"(src_stride), "r"(3*src_stride), \ "m"(rnd), "m"(shift) \ : "memory" \ ); \ } /** * Macro to build the horizontal 16bits version of vc1_put_shift[13]. * Here, offset=16bits, so parameters passed A1 to A4 should be simple. * * @param NAME Either 1 or 3 * @see MSPEL_FILTER13_CORE for information on A1->A4 */ #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \ static void \ OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \ const int16_t *src, int rnd) \ { \ int h = 8; \ src -= 1; \ rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \ __asm__ volatile( \ LOAD_ROUNDER_MMX("%4") \ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ ASMALIGN(3) \ "1: \n\t" \ MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \ NORMALIZE_MMX("$7") \ /* Remove bias */ \ "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \ "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \ TRANSFER_DO_PACK(OP) \ "add $24, %1 \n\t" \ "add %3, %2 \n\t" \ "decl %0 \n\t" \ "jnz 1b \n\t" \ : "+r"(h), "+r" (src), "+r" (dst) \ : "r"(stride), "m"(rnd) \ : "memory" \ ); \ } /** * Macro to build the 8bits, any direction, version of vc1_put_shift[13]. * Here, offset=src_stride. Parameters passed A1 to A4 must use * %3 (offset) and %4 (3*offset). * * @param NAME Either 1 or 3 * @see MSPEL_FILTER13_CORE for information on A1->A4 */ #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \ static void \ OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \ x86_reg stride, int rnd, x86_reg offset) \ { \ int h = 8; \ src -= offset; \ rnd = 32-rnd; \ __asm__ volatile ( \ LOAD_ROUNDER_MMX("%6") \ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ ASMALIGN(3) \ "1: \n\t" \ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \ NORMALIZE_MMX("$6") \ TRANSFER_DO_PACK(OP) \ "add %5, %1 \n\t" \ "add %5, %2 \n\t" \ "decl %0 \n\t" \ "jnz 1b \n\t" \ : "+r"(h), "+r" (src), "+r" (dst) \ : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \ : "memory" \ ); \ } /** 1/4 shift bicubic interpolation */ MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_) MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_) MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )") MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_) MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_) /** 3/4 shift bicubic interpolation */ MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_) MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_) MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )") MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_) MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_) typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift); typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd); typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset); /** * Interpolates fractional pel values by applying proper vertical then * horizontal filter. * * @param dst Destination buffer for interpolated pels. * @param src Source buffer. * @param stride Stride for both src and dst buffers. * @param hmode Horizontal filter (expressed in quarter pixels shift). * @param hmode Vertical filter. * @param rnd Rounding bias. */ #define VC1_MSPEL_MC(OP)\ static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\ int hmode, int vmode, int rnd)\ {\ static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\ { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\ static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\ { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\ static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\ { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\ \ __asm__ volatile(\ "pxor %%mm0, %%mm0 \n\t"\ ::: "memory"\ );\ \ if (vmode) { /* Vertical filter to apply */\ if (hmode) { /* Horizontal filter to apply, output to tmp */\ static const int shift_value[] = { 0, 5, 1, 5 };\ int shift = (shift_value[hmode]+shift_value[vmode])>>1;\ int r;\ DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\ \ r = (1<<(shift-1)) + rnd-1;\ vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\ \ vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\ return;\ }\ else { /* No horizontal filter, output 8 lines to dst */\ vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\ return;\ }\ }\ \ /* Horizontal mode with no vertical mode */\ vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\ } VC1_MSPEL_MC(put_) VC1_MSPEL_MC(avg_) /** Macro to ease bicubic filter interpolation functions declarations */ #define DECLARE_FUNCTION(a, b) \ static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \ put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ }\ static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \ avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ } DECLARE_FUNCTION(0, 1) DECLARE_FUNCTION(0, 2) DECLARE_FUNCTION(0, 3) DECLARE_FUNCTION(1, 0) DECLARE_FUNCTION(1, 1) DECLARE_FUNCTION(1, 2) DECLARE_FUNCTION(1, 3) DECLARE_FUNCTION(2, 0) DECLARE_FUNCTION(2, 1) DECLARE_FUNCTION(2, 2) DECLARE_FUNCTION(2, 3) DECLARE_FUNCTION(3, 0) DECLARE_FUNCTION(3, 1) DECLARE_FUNCTION(3, 2) DECLARE_FUNCTION(3, 3) static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block) { int dc = block[0]; dc = (17 * dc + 4) >> 3; dc = (17 * dc + 64) >> 7; __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); __asm__ volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" "movd %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movd %%mm2, %0 \n\t" "movd %%mm3, %1 \n\t" "movd %%mm4, %2 \n\t" "movd %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dest+0*linesize)), "+m"(*(uint32_t*)(dest+1*linesize)), "+m"(*(uint32_t*)(dest+2*linesize)), "+m"(*(uint32_t*)(dest+3*linesize)) ); } static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block) { int dc = block[0]; dc = (17 * dc + 4) >> 3; dc = (12 * dc + 64) >> 7; __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); __asm__ volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" "movd %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movd %%mm2, %0 \n\t" "movd %%mm3, %1 \n\t" "movd %%mm4, %2 \n\t" "movd %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dest+0*linesize)), "+m"(*(uint32_t*)(dest+1*linesize)), "+m"(*(uint32_t*)(dest+2*linesize)), "+m"(*(uint32_t*)(dest+3*linesize)) ); dest += 4*linesize; __asm__ volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" "movd %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movd %%mm2, %0 \n\t" "movd %%mm3, %1 \n\t" "movd %%mm4, %2 \n\t" "movd %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dest+0*linesize)), "+m"(*(uint32_t*)(dest+1*linesize)), "+m"(*(uint32_t*)(dest+2*linesize)), "+m"(*(uint32_t*)(dest+3*linesize)) ); } static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block) { int dc = block[0]; dc = ( 3 * dc + 1) >> 1; dc = (17 * dc + 64) >> 7; __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); __asm__ volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" "movq %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movq %%mm2, %0 \n\t" "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dest+0*linesize)), "+m"(*(uint32_t*)(dest+1*linesize)), "+m"(*(uint32_t*)(dest+2*linesize)), "+m"(*(uint32_t*)(dest+3*linesize)) ); } static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block) { int dc = block[0]; dc = (3 * dc + 1) >> 1; dc = (3 * dc + 16) >> 5; __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); __asm__ volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" "movq %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movq %%mm2, %0 \n\t" "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dest+0*linesize)), "+m"(*(uint32_t*)(dest+1*linesize)), "+m"(*(uint32_t*)(dest+2*linesize)), "+m"(*(uint32_t*)(dest+3*linesize)) ); dest += 4*linesize; __asm__ volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" "movq %3, %%mm5 \n\t" "paddusb %%mm0, %%mm2 \n\t" "paddusb %%mm0, %%mm3 \n\t" "paddusb %%mm0, %%mm4 \n\t" "paddusb %%mm0, %%mm5 \n\t" "psubusb %%mm1, %%mm2 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm1, %%mm5 \n\t" "movq %%mm2, %0 \n\t" "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %3 \n\t" :"+m"(*(uint32_t*)(dest+0*linesize)), "+m"(*(uint32_t*)(dest+1*linesize)), "+m"(*(uint32_t*)(dest+2*linesize)), "+m"(*(uint32_t*)(dest+3*linesize)) ); } void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) { mm_flags = mm_support(); dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx; dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx; dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx; dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx; dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx; dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx; dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx; dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx; dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx; dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx; dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx; dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx; dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx; dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx; dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx; dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx; if (mm_flags & FF_MM_MMX2){ dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2; dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2; dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2; dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2; dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2; dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2; dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2; dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2; dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2; dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2; dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2; dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2; } }
123linslouis-android-video-cutter
jni/libavcodec/x86/vc1dsp_mmx.c
C
asf20
33,398
/* * CPU detection code, extracted from mmx.h * (c)1997-99 by H. Dietz and R. Fisher * Converted to C and improved by Fabrice Bellard. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #undef printf /* ebx saving is necessary for PIC. gcc seems unable to see it alone */ #define cpuid(index,eax,ebx,ecx,edx)\ __asm__ volatile\ ("mov %%"REG_b", %%"REG_S"\n\t"\ "cpuid\n\t"\ "xchg %%"REG_b", %%"REG_S\ : "=a" (eax), "=S" (ebx),\ "=c" (ecx), "=d" (edx)\ : "0" (index)); /* Function to test if multimedia instructions are supported... */ int mm_support(void) { int rval = 0; int eax, ebx, ecx, edx; int max_std_level, max_ext_level, std_caps=0, ext_caps=0; #if ARCH_X86_32 x86_reg a, c; __asm__ volatile ( /* See if CPUID instruction is supported ... */ /* ... Get copies of EFLAGS into eax and ecx */ "pushfl\n\t" "pop %0\n\t" "mov %0, %1\n\t" /* ... Toggle the ID bit in one copy and store */ /* to the EFLAGS reg */ "xor $0x200000, %0\n\t" "push %0\n\t" "popfl\n\t" /* ... Get the (hopefully modified) EFLAGS */ "pushfl\n\t" "pop %0\n\t" : "=a" (a), "=c" (c) : : "cc" ); if (a == c) return 0; /* CPUID not supported */ #endif cpuid(0, max_std_level, ebx, ecx, edx); if(max_std_level >= 1){ cpuid(1, eax, ebx, ecx, std_caps); if (std_caps & (1<<23)) rval |= FF_MM_MMX; if (std_caps & (1<<25)) rval |= FF_MM_MMX2 #if HAVE_SSE | FF_MM_SSE; if (std_caps & (1<<26)) rval |= FF_MM_SSE2; if (ecx & 1) rval |= FF_MM_SSE3; if (ecx & 0x00000200 ) rval |= FF_MM_SSSE3; if (ecx & 0x00080000 ) rval |= FF_MM_SSE4; if (ecx & 0x00100000 ) rval |= FF_MM_SSE42; #endif ; } cpuid(0x80000000, max_ext_level, ebx, ecx, edx); if(max_ext_level >= 0x80000001){ cpuid(0x80000001, eax, ebx, ecx, ext_caps); if (ext_caps & (1<<31)) rval |= FF_MM_3DNOW; if (ext_caps & (1<<30)) rval |= FF_MM_3DNOWEXT; if (ext_caps & (1<<23)) rval |= FF_MM_MMX; if (ext_caps & (1<<22)) rval |= FF_MM_MMX2; } #if 0 av_log(NULL, AV_LOG_DEBUG, "%s%s%s%s%s%s%s%s%s%s\n", (rval&FF_MM_MMX) ? "MMX ":"", (rval&FF_MM_MMX2) ? "MMX2 ":"", (rval&FF_MM_SSE) ? "SSE ":"", (rval&FF_MM_SSE2) ? "SSE2 ":"", (rval&FF_MM_SSE3) ? "SSE3 ":"", (rval&FF_MM_SSSE3) ? "SSSE3 ":"", (rval&FF_MM_SSE4) ? "SSE4.1 ":"", (rval&FF_MM_SSE42) ? "SSE4.2 ":"", (rval&FF_MM_3DNOW) ? "3DNow ":"", (rval&FF_MM_3DNOWEXT) ? "3DNowExt ":""); #endif return rval; } #ifdef TEST int main ( void ) { int mm_flags; mm_flags = mm_support(); printf("mm_support = 0x%08X\n",mm_flags); return 0; } #endif
123linslouis-android-video-cutter
jni/libavcodec/x86/cpuid.c
C
asf20
3,855
/* * Simple IDCT MMX * * Copyright (c) 2001, 2002 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "libavcodec/simple_idct.h" #include "dsputil_mmx.h" /* 23170.475006 22725.260826 21406.727617 19265.545870 16384.000000 12872.826198 8866.956905 4520.335430 */ #define C0 23170 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #if 0 #define C4 16384 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #else #define C4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) - 0.5 #endif #define C5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define C7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define ROW_SHIFT 11 #define COL_SHIFT 20 // 6 DECLARE_ASM_CONST(8, uint64_t, wm1010)= 0xFFFF0000FFFF0000ULL; DECLARE_ASM_CONST(8, uint64_t, d40000)= 0x0000000000040000ULL; DECLARE_ALIGNED(8, static const int16_t, coeffs)[]= { 1<<(ROW_SHIFT-1), 0, 1<<(ROW_SHIFT-1), 0, // 1<<(COL_SHIFT-1), 0, 1<<(COL_SHIFT-1), 0, // 0, 1<<(COL_SHIFT-1-16), 0, 1<<(COL_SHIFT-1-16), 1<<(ROW_SHIFT-1), 1, 1<<(ROW_SHIFT-1), 0, // the 1 = ((1<<(COL_SHIFT-1))/C4)<<ROW_SHIFT :) // 0, 0, 0, 0, // 0, 0, 0, 0, C4, C4, C4, C4, C4, -C4, C4, -C4, C2, C6, C2, C6, C6, -C2, C6, -C2, C1, C3, C1, C3, C5, C7, C5, C7, C3, -C7, C3, -C7, -C1, -C5, -C1, -C5, C5, -C1, C5, -C1, C7, C3, C7, C3, C7, -C5, C7, -C5, C3, -C1, C3, -C1 }; #if 0 static void unused_var_killer(void) { int a= wm1010 + d40000; temp[0]=a; } static void inline idctCol (int16_t * col, int16_t *input) { #undef C0 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 int a0, a1, a2, a3, b0, b1, b2, b3; const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 /* if( !(col[8*1] | col[8*2] |col[8*3] |col[8*4] |col[8*5] |col[8*6] | col[8*7])) { col[8*0] = col[8*1] = col[8*2] = col[8*3] = col[8*4] = col[8*5] = col[8*6] = col[8*7] = col[8*0]<<3; return; }*/ col[8*0] = input[8*0 + 0]; col[8*1] = input[8*2 + 0]; col[8*2] = input[8*0 + 1]; col[8*3] = input[8*2 + 1]; col[8*4] = input[8*4 + 0]; col[8*5] = input[8*6 + 0]; col[8*6] = input[8*4 + 1]; col[8*7] = input[8*6 + 1]; a0 = C4*col[8*0] + C2*col[8*2] + C4*col[8*4] + C6*col[8*6] + (1<<(COL_SHIFT-1)); a1 = C4*col[8*0] + C6*col[8*2] - C4*col[8*4] - C2*col[8*6] + (1<<(COL_SHIFT-1)); a2 = C4*col[8*0] - C6*col[8*2] - C4*col[8*4] + C2*col[8*6] + (1<<(COL_SHIFT-1)); a3 = C4*col[8*0] - C2*col[8*2] + C4*col[8*4] - C6*col[8*6] + (1<<(COL_SHIFT-1)); b0 = C1*col[8*1] + C3*col[8*3] + C5*col[8*5] + C7*col[8*7]; b1 = C3*col[8*1] - C7*col[8*3] - C1*col[8*5] - C5*col[8*7]; b2 = C5*col[8*1] - C1*col[8*3] + C7*col[8*5] + C3*col[8*7]; b3 = C7*col[8*1] - C5*col[8*3] + C3*col[8*5] - C1*col[8*7]; col[8*0] = (a0 + b0) >> COL_SHIFT; col[8*1] = (a1 + b1) >> COL_SHIFT; col[8*2] = (a2 + b2) >> COL_SHIFT; col[8*3] = (a3 + b3) >> COL_SHIFT; col[8*4] = (a3 - b3) >> COL_SHIFT; col[8*5] = (a2 - b2) >> COL_SHIFT; col[8*6] = (a1 - b1) >> COL_SHIFT; col[8*7] = (a0 - b0) >> COL_SHIFT; } static void inline idctRow (int16_t * output, int16_t * input) { int16_t row[8]; int a0, a1, a2, a3, b0, b1, b2, b3; const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 row[0] = input[0]; row[2] = input[1]; row[4] = input[4]; row[6] = input[5]; row[1] = input[8]; row[3] = input[9]; row[5] = input[12]; row[7] = input[13]; if( !(row[1] | row[2] |row[3] |row[4] |row[5] |row[6] | row[7]) ) { row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = row[7] = row[0]<<3; output[0] = row[0]; output[2] = row[1]; output[4] = row[2]; output[6] = row[3]; output[8] = row[4]; output[10] = row[5]; output[12] = row[6]; output[14] = row[7]; return; } a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + (1<<(ROW_SHIFT-1)); a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + (1<<(ROW_SHIFT-1)); a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + (1<<(ROW_SHIFT-1)); a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + (1<<(ROW_SHIFT-1)); b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; row[0] = (a0 + b0) >> ROW_SHIFT; row[1] = (a1 + b1) >> ROW_SHIFT; row[2] = (a2 + b2) >> ROW_SHIFT; row[3] = (a3 + b3) >> ROW_SHIFT; row[4] = (a3 - b3) >> ROW_SHIFT; row[5] = (a2 - b2) >> ROW_SHIFT; row[6] = (a1 - b1) >> ROW_SHIFT; row[7] = (a0 - b0) >> ROW_SHIFT; output[0] = row[0]; output[2] = row[1]; output[4] = row[2]; output[6] = row[3]; output[8] = row[4]; output[10] = row[5]; output[12] = row[6]; output[14] = row[7]; } #endif static inline void idct(int16_t *block) { DECLARE_ALIGNED(8, int64_t, align_tmp)[16]; int16_t * const temp= (int16_t*)align_tmp; __asm__ volatile( #if 0 //Alternative, simpler variant #define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ #rounder ", %%mm4 \n\t"\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ #rounder ", %%mm0 \n\t"\ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ "paddd %%mm0, %%mm0 \n\t" \ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ "movq %%mm7, " #dst " \n\t"\ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "movq %%mm2, 24+" #dst " \n\t"\ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ "movq %%mm2, 8+" #dst " \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ "movq %%mm4, 16+" #dst " \n\t"\ #define COL_IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm7, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm2, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm2, 32+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm6, 48+" #dst " \n\t"\ "movd %%mm4, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t"\ #define DC_COND_ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq "MANGLE(wm1010)", %%mm4 \n\t"\ "pand %%mm0, %%mm4 \n\t"\ "por %%mm1, %%mm4 \n\t"\ "por %%mm2, %%mm4 \n\t"\ "por %%mm3, %%mm4 \n\t"\ "packssdw %%mm4,%%mm4 \n\t"\ "movd %%mm4, %%eax \n\t"\ "orl %%eax, %%eax \n\t"\ "jz 1f \n\t"\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ #rounder ", %%mm4 \n\t"\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ #rounder ", %%mm0 \n\t"\ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ "paddd %%mm0, %%mm0 \n\t" \ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ "movq %%mm7, " #dst " \n\t"\ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "movq %%mm2, 24+" #dst " \n\t"\ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ "movq %%mm2, 8+" #dst " \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ "movq %%mm4, 16+" #dst " \n\t"\ "jmp 2f \n\t"\ "1: \n\t"\ "pslld $16, %%mm0 \n\t"\ "#paddd "MANGLE(d40000)", %%mm0 \n\t"\ "psrad $13, %%mm0 \n\t"\ "packssdw %%mm0, %%mm0 \n\t"\ "movq %%mm0, " #dst " \n\t"\ "movq %%mm0, 8+" #dst " \n\t"\ "movq %%mm0, 16+" #dst " \n\t"\ "movq %%mm0, 24+" #dst " \n\t"\ "2: \n\t" //IDCT( src0, src4, src1, src5, dst, rounder, shift) ROW_IDCT( (%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11) /*ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1), paddd (%2), 11) ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1), paddd (%2), 11) ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1), paddd (%2), 11)*/ DC_COND_ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11) DC_COND_ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11) DC_COND_ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11) //IDCT( src0, src4, src1, src5, dst, shift) COL_IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) COL_IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) COL_IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) COL_IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) #else #define DC_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq "MANGLE(wm1010)", %%mm4 \n\t"\ "pand %%mm0, %%mm4 \n\t"\ "por %%mm1, %%mm4 \n\t"\ "por %%mm2, %%mm4 \n\t"\ "por %%mm3, %%mm4 \n\t"\ "packssdw %%mm4,%%mm4 \n\t"\ "movd %%mm4, %%eax \n\t"\ "orl %%eax, %%eax \n\t"\ "jz 1f \n\t"\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ #rounder ", %%mm4 \n\t"\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ #rounder ", %%mm0 \n\t"\ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ "paddd %%mm0, %%mm0 \n\t" \ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ "movq %%mm7, " #dst " \n\t"\ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "movq %%mm2, 24+" #dst " \n\t"\ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ "movq %%mm2, 8+" #dst " \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ "movq %%mm4, 16+" #dst " \n\t"\ "jmp 2f \n\t"\ "1: \n\t"\ "pslld $16, %%mm0 \n\t"\ "paddd "MANGLE(d40000)", %%mm0 \n\t"\ "psrad $13, %%mm0 \n\t"\ "packssdw %%mm0, %%mm0 \n\t"\ "movq %%mm0, " #dst " \n\t"\ "movq %%mm0, 8+" #dst " \n\t"\ "movq %%mm0, 16+" #dst " \n\t"\ "movq %%mm0, 24+" #dst " \n\t"\ "2: \n\t" #define Z_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift, bt) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq %%mm0, %%mm4 \n\t"\ "por %%mm1, %%mm4 \n\t"\ "por %%mm2, %%mm4 \n\t"\ "por %%mm3, %%mm4 \n\t"\ "packssdw %%mm4,%%mm4 \n\t"\ "movd %%mm4, %%eax \n\t"\ "orl %%eax, %%eax \n\t"\ "jz " #bt " \n\t"\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ #rounder ", %%mm4 \n\t"\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ #rounder ", %%mm0 \n\t"\ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ "paddd %%mm0, %%mm0 \n\t" \ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ "movq %%mm7, " #dst " \n\t"\ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "movq %%mm2, 24+" #dst " \n\t"\ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ "movq %%mm2, 8+" #dst " \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ "movq %%mm4, 16+" #dst " \n\t"\ #define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ #rounder ", %%mm4 \n\t"\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ #rounder ", %%mm0 \n\t"\ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ "paddd %%mm0, %%mm0 \n\t" \ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ "movq %%mm7, " #dst " \n\t"\ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "movq %%mm2, 24+" #dst " \n\t"\ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ "movq %%mm2, 8+" #dst " \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ "movq %%mm4, 16+" #dst " \n\t"\ //IDCT( src0, src4, src1, src5, dst, rounder, shift) DC_COND_IDCT( 0(%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11) Z_COND_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11, 4f) Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 2f) Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f) #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm7, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm2, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm2, 32+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm6, 48+" #dst " \n\t"\ "movd %%mm4, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) \ "4: \n\t" Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f) Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f) #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm1, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm2, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm1 \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm2, 32+" #dst " \n\t"\ "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm6, 48+" #dst " \n\t"\ "movd %%mm1, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) \ "6: \n\t" Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f) #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm1, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm2, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm1 \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm2, 32+" #dst " \n\t"\ "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm6, 48+" #dst " \n\t"\ "movd %%mm1, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) \ "2: \n\t" Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f) #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm7, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm2, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm2 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm2, 32+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm6, 48+" #dst " \n\t"\ "movd %%mm4, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) \ "3: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 64(%2), %%mm3 \n\t"\ "pmaddwd %%mm2, %%mm3 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm1 \n\t" /* A1 a1 */\ "paddd %%mm3, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm3, %%mm1 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm1 \n\t"\ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm7, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm1, %%mm1 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm1, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "movq %%mm5, %%mm1 \n\t" /* A2 a2 */\ "paddd %%mm4, %%mm1 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm1 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm1, %%mm1 \n\t" /* A2+B2 a2+b2 */\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm1, 32+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm6, 48+" #dst " \n\t"\ "movd %%mm4, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) \ "5: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\ "movq 8+" #src4 ", %%mm3 \n\t" /* R6 R2 r6 r2 */\ "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm3, %%mm7 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "pmaddwd 40(%2), %%mm3 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "paddd %%mm1, %%mm7 \n\t" /* A0 a0 */\ "paddd %%mm1, %%mm1 \n\t" /* 2C0 2c0 */\ "psubd %%mm7, %%mm1 \n\t" /* A3 a3 */\ "paddd %%mm2, %%mm3 \n\t" /* A1 a1 */\ "paddd %%mm2, %%mm2 \n\t" /* 2C1 2c1 */\ "psubd %%mm3, %%mm2 \n\t" /* A2 a2 */\ "psrad $" #shift ", %%mm4 \n\t"\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm3 \n\t"\ "packssdw %%mm7, %%mm4 \n\t" /* A0 a0 */\ "movq %%mm4, " #dst " \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "packssdw %%mm3, %%mm0 \n\t" /* A1 a1 */\ "movq %%mm0, 16+" #dst " \n\t"\ "movq %%mm0, 96+" #dst " \n\t"\ "movq %%mm4, 112+" #dst " \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "psrad $" #shift ", %%mm6 \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm2, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movq %%mm5, 32+" #dst " \n\t"\ "psrad $" #shift ", %%mm1 \n\t"\ "packssdw %%mm1, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movq %%mm6, 48+" #dst " \n\t"\ "movq %%mm6, 64+" #dst " \n\t"\ "movq %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) //IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) //IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) \ "1: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ "movq 64(%2), %%mm1 \n\t"\ "pmaddwd %%mm2, %%mm1 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "psrad $" #shift ", %%mm7 \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "movq %%mm0, %%mm3 \n\t" /* A1 a1 */\ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "psubd %%mm1, %%mm3 \n\t" /* A1-B1 a1-b1 */\ "psrad $" #shift ", %%mm0 \n\t"\ "psrad $" #shift ", %%mm3 \n\t"\ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ "movd %%mm7, " #dst " \n\t"\ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ "movd %%mm0, 16+" #dst " \n\t"\ "packssdw %%mm3, %%mm3 \n\t" /* A1-B1 a1-b1 */\ "movd %%mm3, 96+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ "movd %%mm4, 112+" #dst " \n\t"\ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ "movq %%mm5, %%mm3 \n\t" /* A2 a2 */\ "paddd %%mm4, %%mm3 \n\t" /* A2+B2 a2+b2 */\ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ "psrad $" #shift ", %%mm3 \n\t"\ "psrad $" #shift ", %%mm5 \n\t"\ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\ "psrad $" #shift ", %%mm6 \n\t"\ "packssdw %%mm3, %%mm3 \n\t" /* A2+B2 a2+b2 */\ "movd %%mm3, 32+" #dst " \n\t"\ "psrad $" #shift ", %%mm4 \n\t"\ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ "movd %%mm6, 48+" #dst " \n\t"\ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ "movd %%mm4, 64+" #dst " \n\t"\ "movd %%mm5, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) "jmp 9f \n\t" "#" ASMALIGN(4) "7: \n\t" #undef IDCT #define IDCT(src0, src4, src1, src5, dst, shift) \ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "psrad $" #shift ", %%mm4 \n\t"\ "psrad $" #shift ", %%mm0 \n\t"\ "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\ "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\ "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\ "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\ "psrad $" #shift ", %%mm1 \n\t"\ "packssdw %%mm1, %%mm4 \n\t" /* A0 a0 */\ "movq %%mm4, " #dst " \n\t"\ "psrad $" #shift ", %%mm2 \n\t"\ "packssdw %%mm2, %%mm0 \n\t" /* A1 a1 */\ "movq %%mm0, 16+" #dst " \n\t"\ "movq %%mm0, 96+" #dst " \n\t"\ "movq %%mm4, 112+" #dst " \n\t"\ "movq %%mm0, 32+" #dst " \n\t"\ "movq %%mm4, 48+" #dst " \n\t"\ "movq %%mm4, 64+" #dst " \n\t"\ "movq %%mm0, 80+" #dst " \n\t" //IDCT( src0, src4, src1, src5, dst, shift) IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) //IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) //IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) #endif /* Input 00 40 04 44 20 60 24 64 10 30 14 34 50 70 54 74 01 41 03 43 21 61 23 63 11 31 13 33 51 71 53 73 02 42 06 46 22 62 26 66 12 32 16 36 52 72 56 76 05 45 07 47 25 65 27 67 15 35 17 37 55 75 57 77 Temp 00 04 10 14 20 24 30 34 40 44 50 54 60 64 70 74 01 03 11 13 21 23 31 33 41 43 51 53 61 63 71 73 02 06 12 16 22 26 32 36 42 46 52 56 62 66 72 76 05 07 15 17 25 27 35 37 45 47 55 57 65 67 75 77 */ "9: \n\t" :: "r" (block), "r" (temp), "r" (coeffs) : "%eax" ); } void ff_simple_idct_mmx(int16_t *block) { idct(block); } //FIXME merge add/put into the idct void ff_simple_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block) { idct(block); put_pixels_clamped_mmx(block, dest, line_size); } void ff_simple_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block) { idct(block); add_pixels_clamped_mmx(block, dest, line_size); }
123linslouis-android-video-cutter
jni/libavcodec/x86/simple_idct_mmx.c
C
asf20
72,738
OBJS-$(CONFIG_MLP_DECODER) += x86/mlpdsp.o OBJS-$(CONFIG_TRUEHD_DECODER) += x86/mlpdsp.o YASM-OBJS-FFT-$(HAVE_AMD3DNOW) += x86/fft_3dn.o YASM-OBJS-FFT-$(HAVE_AMD3DNOWEXT) += x86/fft_3dn2.o YASM-OBJS-FFT-$(HAVE_SSE) += x86/fft_sse.o YASM-OBJS-$(CONFIG_FFT) += x86/fft_mmx.o \ $(YASM-OBJS-FFT-yes) YASM-OBJS-$(CONFIG_GPL) += x86/h264_deblock_sse2.o \ x86/h264_idct_sse2.o \ MMX-OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp_mmx.o MMX-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_mmx.o MMX-OBJS-$(CONFIG_GPL) += x86/idct_mmx.o MMX-OBJS-$(CONFIG_LPC) += x86/lpc_mmx.o MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp_mmx.o MMX-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_mmx.o MMX-OBJS-$(CONFIG_VP3_DECODER) += x86/vp3dsp_mmx.o \ x86/vp3dsp_sse2.o MMX-OBJS-$(CONFIG_VP5_DECODER) += x86/vp3dsp_mmx.o \ x86/vp3dsp_sse2.o MMX-OBJS-$(CONFIG_VP6_DECODER) += x86/vp3dsp_mmx.o \ x86/vp3dsp_sse2.o \ x86/vp6dsp_mmx.o \ x86/vp6dsp_sse2.o MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \ $(YASM-OBJS-yes) MMX-OBJS-$(CONFIG_FFT) += x86/fft.o OBJS-$(HAVE_MMX) += x86/cpuid.o \ x86/dnxhd_mmx.o \ x86/dsputil_mmx.o \ x86/fdct_mmx.o \ x86/idct_mmx_xvid.o \ x86/idct_sse2_xvid.o \ x86/motion_est_mmx.o \ x86/mpegvideo_mmx.o \ x86/simple_idct_mmx.o \
123linslouis-android-video-cutter
jni/libavcodec/x86/Makefile
Makefile
asf20
2,324
/* * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> * * MMX-optimized DSP functions, based on H.264 optimizations by * Michael Niedermayer and Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/common.h" #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" /***************************************************************************** * * inverse transform * ****************************************************************************/ static inline void cavs_idct8_1d(int16_t *block, uint64_t bias) { __asm__ volatile( "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */ "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */ "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */ "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */ "movq %%mm4, %%mm0 \n\t" "movq %%mm5, %%mm3 \n\t" "movq %%mm2, %%mm6 \n\t" "movq %%mm7, %%mm1 \n\t" "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */ "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */ "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */ "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */ "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */ "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */ "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */ "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */ "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */ "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */ "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */ "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */ "movq %%mm5, %%mm4 \n\t" "movq %%mm7, %%mm6 \n\t" "movq %%mm3, %%mm0 \n\t" "movq %%mm1, %%mm2 \n\t" SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */ "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */ "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */ "paddw %%mm7, %%mm7 \n\t" "paddw %%mm5, %%mm5 \n\t" "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */ "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */ SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */ "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */ "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */ "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */ "paddw %%mm1, %%mm1 \n\t" "paddw %%mm3, %%mm3 \n\t" "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */ "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */ "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */ "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */ "movq %%mm2, %%mm4 \n\t" "movq %%mm6, %%mm0 \n\t" "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */ "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */ "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */ "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */ "paddw %%mm2, %%mm2 \n\t" "paddw %%mm0, %%mm0 \n\t" "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */ "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */ "movq (%0), %%mm2 \n\t" /* mm2 = src0 */ "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */ SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */ "psllw $3, %%mm0 \n\t" "psllw $3, %%mm2 \n\t" "paddw %1, %%mm0 \n\t" /* add rounding bias */ "paddw %1, %%mm2 \n\t" /* add rounding bias */ SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */ SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */ SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */ SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */ SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */ SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */ :: "r"(block), "m"(bias) ); } static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) { int i; DECLARE_ALIGNED(8, int16_t, b2)[64]; for(i=0; i<2; i++){ DECLARE_ALIGNED(8, uint64_t, tmp); cavs_idct8_1d(block+4*i, ff_pw_4); __asm__ volatile( "psraw $3, %%mm7 \n\t" "psraw $3, %%mm6 \n\t" "psraw $3, %%mm5 \n\t" "psraw $3, %%mm4 \n\t" "psraw $3, %%mm3 \n\t" "psraw $3, %%mm2 \n\t" "psraw $3, %%mm1 \n\t" "psraw $3, %%mm0 \n\t" "movq %%mm7, %0 \n\t" TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) "movq %%mm0, 8(%1) \n\t" "movq %%mm6, 24(%1) \n\t" "movq %%mm7, 40(%1) \n\t" "movq %%mm4, 56(%1) \n\t" "movq %0, %%mm7 \n\t" TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) "movq %%mm7, (%1) \n\t" "movq %%mm1, 16(%1) \n\t" "movq %%mm0, 32(%1) \n\t" "movq %%mm3, 48(%1) \n\t" : "=m"(tmp) : "r"(b2+32*i) : "memory" ); } for(i=0; i<2; i++){ cavs_idct8_1d(b2+4*i, ff_pw_64.a); __asm__ volatile( "psraw $7, %%mm7 \n\t" "psraw $7, %%mm6 \n\t" "psraw $7, %%mm5 \n\t" "psraw $7, %%mm4 \n\t" "psraw $7, %%mm3 \n\t" "psraw $7, %%mm2 \n\t" "psraw $7, %%mm1 \n\t" "psraw $7, %%mm0 \n\t" "movq %%mm7, (%0) \n\t" "movq %%mm5, 16(%0) \n\t" "movq %%mm3, 32(%0) \n\t" "movq %%mm1, 48(%0) \n\t" "movq %%mm0, 64(%0) \n\t" "movq %%mm2, 80(%0) \n\t" "movq %%mm4, 96(%0) \n\t" "movq %%mm6, 112(%0) \n\t" :: "r"(b2+4*i) : "memory" ); } add_pixels_clamped_mmx(b2, dst, stride); } /***************************************************************************** * * motion compensation * ****************************************************************************/ /* vertical filter [-1 -2 96 42 -7 0] */ #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \ "movd (%0), "#F" \n\t"\ "movq "#C", %%mm6 \n\t"\ "pmullw %5, %%mm6 \n\t"\ "movq "#D", %%mm7 \n\t"\ "pmullw "MANGLE(MUL2)", %%mm7\n\t"\ "psllw $3, "#E" \n\t"\ "psubw "#E", %%mm6 \n\t"\ "psraw $3, "#E" \n\t"\ "paddw %%mm7, %%mm6 \n\t"\ "paddw "#E", %%mm6 \n\t"\ "paddw "#B", "#B" \n\t"\ "pxor %%mm7, %%mm7 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, "#F" \n\t"\ "psubw "#B", %%mm6 \n\t"\ "psraw $1, "#B" \n\t"\ "psubw "#A", %%mm6 \n\t"\ "paddw %4, %%mm6 \n\t"\ "psraw $7, %%mm6 \n\t"\ "packuswb %%mm6, %%mm6 \n\t"\ OP(%%mm6, (%1), A, d) \ "add %3, %1 \n\t" /* vertical filter [ 0 -1 5 5 -1 0] */ #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \ "movd (%0), "#F" \n\t"\ "movq "#C", %%mm6 \n\t"\ "paddw "#D", %%mm6 \n\t"\ "pmullw %5, %%mm6 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, "#F" \n\t"\ "psubw "#B", %%mm6 \n\t"\ "psubw "#E", %%mm6 \n\t"\ "paddw %4, %%mm6 \n\t"\ "psraw $3, %%mm6 \n\t"\ "packuswb %%mm6, %%mm6 \n\t"\ OP(%%mm6, (%1), A, d) \ "add %3, %1 \n\t" /* vertical filter [ 0 -7 42 96 -2 -1] */ #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \ "movd (%0), "#F" \n\t"\ "movq "#C", %%mm6 \n\t"\ "pmullw "MANGLE(MUL2)", %%mm6\n\t"\ "movq "#D", %%mm7 \n\t"\ "pmullw %5, %%mm7 \n\t"\ "psllw $3, "#B" \n\t"\ "psubw "#B", %%mm6 \n\t"\ "psraw $3, "#B" \n\t"\ "paddw %%mm7, %%mm6 \n\t"\ "paddw "#B", %%mm6 \n\t"\ "paddw "#E", "#E" \n\t"\ "pxor %%mm7, %%mm7 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, "#F" \n\t"\ "psubw "#E", %%mm6 \n\t"\ "psraw $1, "#E" \n\t"\ "psubw "#F", %%mm6 \n\t"\ "paddw %4, %%mm6 \n\t"\ "psraw $7, %%mm6 \n\t"\ "packuswb %%mm6, %%mm6 \n\t"\ OP(%%mm6, (%1), A, d) \ "add %3, %1 \n\t" #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\ int w= 2;\ src -= 2*srcStride;\ \ while(w--){\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm1 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm2 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm3 \n\t"\ "add %2, %0 \n\t"\ "movd (%0), %%mm4 \n\t"\ "add %2, %0 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\ VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\ VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\ : "memory"\ );\ if(h==16){\ __asm__ volatile(\ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\ VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\ VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ \ : "+a"(src), "+c"(dst)\ : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\ : "memory"\ );\ }\ src += 4-(h+5)*srcStride;\ dst += 4-h*dstStride;\ } #define QPEL_CAVS(OPNAME, OP, MMX)\ static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=8;\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %5, %%mm6 \n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq 1(%0), %%mm2 \n\t"\ "movq %%mm0, %%mm1 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpckhbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "paddw %%mm2, %%mm0 \n\t"\ "paddw %%mm3, %%mm1 \n\t"\ "pmullw %%mm6, %%mm0 \n\t"\ "pmullw %%mm6, %%mm1 \n\t"\ "movq -1(%0), %%mm2 \n\t"\ "movq 2(%0), %%mm4 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "movq %%mm4, %%mm5 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ "punpckhbw %%mm7, %%mm5 \n\t"\ "paddw %%mm4, %%mm2 \n\t"\ "paddw %%mm3, %%mm5 \n\t"\ "psubw %%mm2, %%mm0 \n\t"\ "psubw %%mm5, %%mm1 \n\t"\ "movq %6, %%mm5 \n\t"\ "paddw %%mm5, %%mm0 \n\t"\ "paddw %%mm5, %%mm1 \n\t"\ "psraw $3, %%mm0 \n\t"\ "psraw $3, %%mm1 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm5, q) \ "add %3, %0 \n\t"\ "add %4, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(src), "+c"(dst), "+m"(h)\ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\ : "memory"\ );\ }\ \ static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \ }\ \ static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \ }\ \ static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \ }\ \ static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\ }\ static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\ OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ }\ \ static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\ }\ static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\ OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ }\ \ static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\ }\ static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\ OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ }\ \ static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\ OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\ src += 8*srcStride;\ dst += 8*dstStride;\ OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\ OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\ }\ #define CAVS_MC(OPNAME, SIZE, MMX) \ static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\ }\ \ static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\ }\ \ static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\ }\ \ static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\ }\ #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" #define AVG_3DNOW_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgusb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" #define AVG_MMX2_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" QPEL_CAVS(put_, PUT_OP, 3dnow) QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow) QPEL_CAVS(put_, PUT_OP, mmx2) QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2) CAVS_MC(put_, 8, 3dnow) CAVS_MC(put_, 16,3dnow) CAVS_MC(avg_, 8, 3dnow) CAVS_MC(avg_, 16,3dnow) CAVS_MC(put_, 8, mmx2) CAVS_MC(put_, 16,mmx2) CAVS_MC(avg_, 8, mmx2) CAVS_MC(avg_, 16,mmx2) void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx) { #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \ c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_mmx2; \ c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_mmx2; \ c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_mmx2; \ c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_mmx2; \ dspfunc(put_cavs_qpel, 0, 16); dspfunc(put_cavs_qpel, 1, 8); dspfunc(avg_cavs_qpel, 0, 16); dspfunc(avg_cavs_qpel, 1, 8); #undef dspfunc c->cavs_idct8_add = cavs_idct8_add_mmx; } void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx) { #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \ c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_3dnow; \ c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_3dnow; \ c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_3dnow; \ c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_3dnow; \ dspfunc(put_cavs_qpel, 0, 16); dspfunc(put_cavs_qpel, 1, 8); dspfunc(avg_cavs_qpel, 0, 16); dspfunc(avg_cavs_qpel, 1, 8); #undef dspfunc c->cavs_idct8_add = cavs_idct8_add_mmx; }
123linslouis-android-video-cutter
jni/libavcodec/x86/cavsdsp_mmx.c
C
asf20
19,352
/* * DSP utils : average functions are compiled twice for 3dnow/mmx2 * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer * * MMX optimization by Nick Kurshev <nickols_k@mail.ru> * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> * and improved by Zdenek Kabelac <kabi@users.sf.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm clobber bug - now it will work with 2.95.2 and also with -fPIC */ static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" PAVGB" 1(%1), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" PAVGB" 1(%1), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm1 \n\t" "add %%"REG_a", %1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movd (%1), %%mm0 \n\t" "movd (%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $4, %2 \n\t" PAVGB" %%mm1, %%mm0 \n\t" "movd %%mm0, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movd (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movd (%1), %%mm1 \n\t" "movd (%2), %%mm2 \n\t" "movd 4(%2), %%mm3 \n\t" "add %4, %1 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "movd %%mm0, (%3) \n\t" "add %5, %3 \n\t" "movd %%mm1, (%3) \n\t" "add %5, %3 \n\t" "movd (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movd (%1), %%mm1 \n\t" "movd 8(%2), %%mm2 \n\t" "movd 12(%2), %%mm3 \n\t" "add %4, %1 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "movd %%mm0, (%3) \n\t" "add %5, %3 \n\t" "movd %%mm1, (%3) \n\t" "add %5, %3 \n\t" "add $16, %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); } static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $8, %2 \n\t" PAVGB" %%mm1, %%mm0 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 8(%2), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "movq %%mm1, (%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" 16(%2), %%mm0 \n\t" PAVGB" 24(%2), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "movq %%mm1, (%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); //the following should be used, though better not with gcc ... /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) :"r"(src1Stride), "r"(dstStride) :"memory");*/ } static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "pcmpeqb %%mm6, %%mm6 \n\t" "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $8, %2 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" PAVGB" %%mm1, %%mm0 \n\t" "pxor %%mm6, %%mm0 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm1 \n\t" "add %4, %1 \n\t" "movq (%2), %%mm2 \n\t" "movq 8(%2), %%mm3 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "pxor %%mm6, %%mm2 \n\t" "pxor %%mm6, %%mm3 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "movq %%mm1, (%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm1 \n\t" "add %4, %1 \n\t" "movq 16(%2), %%mm2 \n\t" "movq 24(%2), %%mm3 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "pxor %%mm6, %%mm2 \n\t" "pxor %%mm6, %%mm3 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "movq %%mm1, (%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); //the following should be used, though better not with gcc ... /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) :"r"(src1Stride), "r"(dstStride) :"memory");*/ } static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movd (%1), %%mm0 \n\t" "movd (%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $4, %2 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" (%3), %%mm0 \n\t" "movd %%mm0, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movd (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movd (%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 4(%2), %%mm1 \n\t" PAVGB" (%3), %%mm0 \n\t" "movd %%mm0, (%3) \n\t" "add %5, %3 \n\t" PAVGB" (%3), %%mm1 \n\t" "movd %%mm1, (%3) \n\t" "add %5, %3 \n\t" "movd (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movd (%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" 8(%2), %%mm0 \n\t" PAVGB" 12(%2), %%mm1 \n\t" PAVGB" (%3), %%mm0 \n\t" "movd %%mm0, (%3) \n\t" "add %5, %3 \n\t" PAVGB" (%3), %%mm1 \n\t" "movd %%mm1, (%3) \n\t" "add %5, %3 \n\t" "add $16, %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); } static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $8, %2 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" (%3), %%mm0 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 8(%2), %%mm1 \n\t" PAVGB" (%3), %%mm0 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" PAVGB" (%3), %%mm1 \n\t" "movq %%mm1, (%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" 16(%2), %%mm0 \n\t" PAVGB" 24(%2), %%mm1 \n\t" PAVGB" (%3), %%mm0 \n\t" "movq %%mm0, (%3) \n\t" "add %5, %3 \n\t" PAVGB" (%3), %%mm1 \n\t" "movq %%mm1, (%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); //the following should be used, though better not with gcc ... /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) :"r"(src1Stride), "r"(dstStride) :"memory");*/ } static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 8(%1, %3), %%mm3 \n\t" PAVGB" 1(%1), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm1 \n\t" PAVGB" 9(%1), %%mm2 \n\t" PAVGB" 9(%1, %3), %%mm3 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq %%mm2, 8(%2) \n\t" "movq %%mm3, 8(%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 8(%1, %3), %%mm3 \n\t" PAVGB" 1(%1), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm1 \n\t" PAVGB" 9(%1), %%mm2 \n\t" PAVGB" 9(%1, %3), %%mm3 \n\t" "add %%"REG_a", %1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq %%mm2, 8(%2) \n\t" "movq %%mm3, 8(%2, %3) \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 8(%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $16, %2 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 8(%2), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" 16(%2), %%mm0 \n\t" PAVGB" 24(%2), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); //the following should be used, though better not with gcc ... /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) :"r"(src1Stride), "r"(dstStride) :"memory");*/ } static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 8(%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $16, %2 \n\t" PAVGB" (%3), %%mm0 \n\t" PAVGB" 8(%3), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" 8(%2), %%mm1 \n\t" PAVGB" (%3), %%mm0 \n\t" PAVGB" 8(%3), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "add %4, %1 \n\t" PAVGB" 16(%2), %%mm0 \n\t" PAVGB" 24(%2), %%mm1 \n\t" PAVGB" (%3), %%mm0 \n\t" PAVGB" 8(%3), %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); //the following should be used, though better not with gcc ... /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) :"r"(src1Stride), "r"(dstStride) :"memory");*/ } static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { __asm__ volatile( "pcmpeqb %%mm6, %%mm6 \n\t" "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "movq (%2), %%mm2 \n\t" "movq 8(%2), %%mm3 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "pxor %%mm6, %%mm2 \n\t" "pxor %%mm6, %%mm3 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "add %4, %1 \n\t" "add $16, %2 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "add %4, %1 \n\t" "movq (%2), %%mm2 \n\t" "movq 8(%2), %%mm3 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "pxor %%mm6, %%mm2 \n\t" "pxor %%mm6, %%mm3 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm1 \n\t" "add %4, %1 \n\t" "movq 16(%2), %%mm2 \n\t" "movq 24(%2), %%mm3 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "pxor %%mm6, %%mm2 \n\t" "pxor %%mm6, %%mm3 \n\t" PAVGB" %%mm2, %%mm0 \n\t" PAVGB" %%mm3, %%mm1 \n\t" "pxor %%mm6, %%mm0 \n\t" "pxor %%mm6, %%mm1 \n\t" "movq %%mm0, (%3) \n\t" "movq %%mm1, 8(%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); //the following should be used, though better not with gcc ... /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) :"r"(src1Stride), "r"(dstStride) :"memory");*/ } /* GL: this function does incorrect rounding if overflow */ static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BONE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm2 \n\t" "movq 1(%1), %%mm1 \n\t" "movq 1(%1, %3), %%mm3 \n\t" "add %%"REG_a", %1 \n\t" "psubusb %%mm6, %%mm0 \n\t" "psubusb %%mm6, %%mm2 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" %%mm3, %%mm2 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm2, (%2, %3) \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" "movq 1(%1, %3), %%mm3 \n\t" "add %%"REG_a", %2 \n\t" "add %%"REG_a", %1 \n\t" "psubusb %%mm6, %%mm0 \n\t" "psubusb %%mm6, %%mm2 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" %%mm3, %%mm2 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm2, (%2, %3) \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" "sub %3, %2 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" "add %%"REG_a", %1 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" %%mm2, %%mm1 \n\t" "movq %%mm0, (%2, %3) \n\t" "movq %%mm1, (%2, %%"REG_a") \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "add %%"REG_a", %2 \n\t" "add %%"REG_a", %1 \n\t" PAVGB" %%mm1, %%mm2 \n\t" PAVGB" %%mm0, %%mm1 \n\t" "movq %%mm2, (%2, %3) \n\t" "movq %%mm1, (%2, %%"REG_a") \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D" (block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } /* GL: this function does incorrect rounding if overflow */ static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BONE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" "sub %3, %2 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" "add %%"REG_a", %1 \n\t" "psubusb %%mm6, %%mm1 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" %%mm2, %%mm1 \n\t" "movq %%mm0, (%2, %3) \n\t" "movq %%mm1, (%2, %%"REG_a") \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "add %%"REG_a", %2 \n\t" "add %%"REG_a", %1 \n\t" "psubusb %%mm6, %%mm1 \n\t" PAVGB" %%mm1, %%mm2 \n\t" PAVGB" %%mm0, %%mm1 \n\t" "movq %%mm2, (%2, %3) \n\t" "movq %%mm1, (%2, %%"REG_a") \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D" (block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "1: \n\t" "movq (%2), %%mm0 \n\t" "movq (%2, %3), %%mm1 \n\t" PAVGB" (%1), %%mm0 \n\t" PAVGB" (%1, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%2), %%mm0 \n\t" "movq (%2, %3), %%mm1 \n\t" PAVGB" (%1), %%mm0 \n\t" PAVGB" (%1, %3), %%mm1 \n\t" "add %%"REG_a", %1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm2 \n\t" PAVGB" 1(%1), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm2 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" (%2, %3), %%mm2 \n\t" "add %%"REG_a", %1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm2, (%2, %3) \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm2 \n\t" PAVGB" 1(%1), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm2 \n\t" "add %%"REG_a", %2 \n\t" "add %%"REG_a", %1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" (%2, %3), %%mm2 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm2, (%2, %3) \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" "sub %3, %2 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" "add %%"REG_a", %1 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" %%mm2, %%mm1 \n\t" "movq (%2, %3), %%mm3 \n\t" "movq (%2, %%"REG_a"), %%mm4 \n\t" PAVGB" %%mm3, %%mm0 \n\t" PAVGB" %%mm4, %%mm1 \n\t" "movq %%mm0, (%2, %3) \n\t" "movq %%mm1, (%2, %%"REG_a") \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" PAVGB" %%mm1, %%mm2 \n\t" PAVGB" %%mm0, %%mm1 \n\t" "add %%"REG_a", %2 \n\t" "add %%"REG_a", %1 \n\t" "movq (%2, %3), %%mm3 \n\t" "movq (%2, %%"REG_a"), %%mm4 \n\t" PAVGB" %%mm3, %%mm2 \n\t" PAVGB" %%mm4, %%mm1 \n\t" "movq %%mm2, (%2, %3) \n\t" "movq %%mm1, (%2, %%"REG_a") \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } /* Note this is not correctly rounded, but this function is only * used for B-frames so it does not matter. */ static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BONE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" PAVGB" 1(%1), %%mm0 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" "movq (%1, %3), %%mm1 \n\t" "psubusb %%mm6, %%mm2 \n\t" PAVGB" 1(%1, %3), %%mm1 \n\t" PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t" "add %%"REG_a", %1 \n\t" PAVGB" %%mm1, %%mm0 \n\t" PAVGB" %%mm2, %%mm1 \n\t" PAVGB" (%2), %%mm0 \n\t" PAVGB" (%2, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" PAVGB" 1(%1, %3), %%mm1 \n\t" PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t" "add %%"REG_a", %2 \n\t" "add %%"REG_a", %1 \n\t" PAVGB" %%mm1, %%mm2 \n\t" PAVGB" %%mm0, %%mm1 \n\t" PAVGB" (%2), %%mm2 \n\t" PAVGB" (%2, %3), %%mm1 \n\t" "movq %%mm2, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r" ((x86_reg)line_size) :"%"REG_a, "memory"); } static void DEF(avg_pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { do { __asm__ volatile( "movd (%1), %%mm0 \n\t" "movd (%1, %2), %%mm1 \n\t" "movd (%1, %2, 2), %%mm2 \n\t" "movd (%1, %3), %%mm3 \n\t" PAVGB" (%0), %%mm0 \n\t" PAVGB" (%0, %2), %%mm1 \n\t" PAVGB" (%0, %2, 2), %%mm2 \n\t" PAVGB" (%0, %3), %%mm3 \n\t" "movd %%mm0, (%1) \n\t" "movd %%mm1, (%1, %2) \n\t" "movd %%mm2, (%1, %2, 2) \n\t" "movd %%mm3, (%1, %3) \n\t" ::"S"(pixels), "D"(block), "r" ((x86_reg)line_size), "r"((x86_reg)3L*line_size) :"memory"); block += 4*line_size; pixels += 4*line_size; h -= 4; } while(h > 0); } //FIXME the following could be optimized too ... static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); } static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(put_pixels8_y2)(block , pixels , line_size, h); DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); } static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); } static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(avg_pixels8)(block , pixels , line_size, h); DEF(avg_pixels8)(block+8, pixels+8, line_size, h); } static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(avg_pixels8_x2)(block , pixels , line_size, h); DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); } static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(avg_pixels8_y2)(block , pixels , line_size, h); DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); } static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(avg_pixels8_xy2)(block , pixels , line_size, h); DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); } #define QPEL_2TAP_L3(OPNAME) \ static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ __asm__ volatile(\ "1: \n\t"\ "movq (%1,%2), %%mm0 \n\t"\ "movq 8(%1,%2), %%mm1 \n\t"\ PAVGB" (%1,%3), %%mm0 \n\t"\ PAVGB" 8(%1,%3), %%mm1 \n\t"\ PAVGB" (%1), %%mm0 \n\t"\ PAVGB" 8(%1), %%mm1 \n\t"\ STORE_OP( (%1,%4),%%mm0)\ STORE_OP(8(%1,%4),%%mm1)\ "movq %%mm0, (%1,%4) \n\t"\ "movq %%mm1, 8(%1,%4) \n\t"\ "add %5, %1 \n\t"\ "decl %0 \n\t"\ "jnz 1b \n\t"\ :"+g"(h), "+r"(src)\ :"r"((x86_reg)off1), "r"((x86_reg)off2),\ "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ :"memory"\ );\ }\ static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ __asm__ volatile(\ "1: \n\t"\ "movq (%1,%2), %%mm0 \n\t"\ PAVGB" (%1,%3), %%mm0 \n\t"\ PAVGB" (%1), %%mm0 \n\t"\ STORE_OP((%1,%4),%%mm0)\ "movq %%mm0, (%1,%4) \n\t"\ "add %5, %1 \n\t"\ "decl %0 \n\t"\ "jnz 1b \n\t"\ :"+g"(h), "+r"(src)\ :"r"((x86_reg)off1), "r"((x86_reg)off2),\ "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ :"memory"\ );\ } #define STORE_OP(a,b) PAVGB" "#a","#b" \n\t" QPEL_2TAP_L3(avg_) #undef STORE_OP #define STORE_OP(a,b) QPEL_2TAP_L3(put_) #undef STORE_OP #undef QPEL_2TAP_L3
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_mmx_avg_template.c
C
asf20
38,297
/* * MLP DSP functions x86-optimized * Copyright (c) 2009 Ramiro Polla * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "libavcodec/mlp.h" #if HAVE_7REGS && HAVE_TEN_OPERANDS extern void ff_mlp_firorder_8; extern void ff_mlp_firorder_7; extern void ff_mlp_firorder_6; extern void ff_mlp_firorder_5; extern void ff_mlp_firorder_4; extern void ff_mlp_firorder_3; extern void ff_mlp_firorder_2; extern void ff_mlp_firorder_1; extern void ff_mlp_firorder_0; extern void ff_mlp_iirorder_4; extern void ff_mlp_iirorder_3; extern void ff_mlp_iirorder_2; extern void ff_mlp_iirorder_1; extern void ff_mlp_iirorder_0; static const void *firtable[9] = { &ff_mlp_firorder_0, &ff_mlp_firorder_1, &ff_mlp_firorder_2, &ff_mlp_firorder_3, &ff_mlp_firorder_4, &ff_mlp_firorder_5, &ff_mlp_firorder_6, &ff_mlp_firorder_7, &ff_mlp_firorder_8 }; static const void *iirtable[5] = { &ff_mlp_iirorder_0, &ff_mlp_iirorder_1, &ff_mlp_iirorder_2, &ff_mlp_iirorder_3, &ff_mlp_iirorder_4 }; #if ARCH_X86_64 #define MLPMUL(label, offset, offs, offc) \ LABEL_MANGLE(label)": \n\t" \ "movslq "offset"+"offs"(%0), %%rax\n\t" \ "movslq "offset"+"offc"(%1), %%rdx\n\t" \ "imul %%rdx, %%rax\n\t" \ "add %%rax, %%rsi\n\t" #define FIRMULREG(label, offset, firc)\ LABEL_MANGLE(label)": \n\t" \ "movslq "#offset"(%0), %%rax\n\t" \ "imul %"#firc", %%rax\n\t" \ "add %%rax, %%rsi\n\t" #define CLEAR_ACCUM \ "xor %%rsi, %%rsi\n\t" #define SHIFT_ACCUM \ "shr %%cl, %%rsi\n\t" #define ACCUM "%%rdx" #define RESULT "%%rsi" #define RESULT32 "%%esi" #else /* if ARCH_X86_32 */ #define MLPMUL(label, offset, offs, offc) \ LABEL_MANGLE(label)": \n\t" \ "mov "offset"+"offs"(%0), %%eax\n\t" \ "imull "offset"+"offc"(%1) \n\t" \ "add %%eax , %%esi\n\t" \ "adc %%edx , %%ecx\n\t" #define FIRMULREG(label, offset, firc) \ MLPMUL(label, #offset, "0", "0") #define CLEAR_ACCUM \ "xor %%esi, %%esi\n\t" \ "xor %%ecx, %%ecx\n\t" #define SHIFT_ACCUM \ "mov %%ecx, %%edx\n\t" \ "mov %%esi, %%eax\n\t" \ "movzbl %7 , %%ecx\n\t" \ "shrd %%cl, %%edx, %%eax\n\t" \ #define ACCUM "%%edx" #define RESULT "%%eax" #define RESULT32 "%%eax" #endif /* !ARCH_X86_64 */ #define BINC AV_STRINGIFY(4* MAX_CHANNELS) #define IOFFS AV_STRINGIFY(4*(MAX_FIR_ORDER + MAX_BLOCKSIZE)) #define IOFFC AV_STRINGIFY(4* MAX_FIR_ORDER) #define FIRMUL(label, offset) MLPMUL(label, #offset, "0", "0") #define IIRMUL(label, offset) MLPMUL(label, #offset, IOFFS, IOFFC) static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff, int firorder, int iirorder, unsigned int filter_shift, int32_t mask, int blocksize, int32_t *sample_buffer) { const void *firjump = firtable[firorder]; const void *iirjump = iirtable[iirorder]; blocksize = -blocksize; __asm__ volatile( "1: \n\t" CLEAR_ACCUM "jmp *%5 \n\t" FIRMUL (ff_mlp_firorder_8, 0x1c ) FIRMUL (ff_mlp_firorder_7, 0x18 ) FIRMUL (ff_mlp_firorder_6, 0x14 ) FIRMUL (ff_mlp_firorder_5, 0x10 ) FIRMUL (ff_mlp_firorder_4, 0x0c ) FIRMULREG(ff_mlp_firorder_3, 0x08,10) FIRMULREG(ff_mlp_firorder_2, 0x04, 9) FIRMULREG(ff_mlp_firorder_1, 0x00, 8) LABEL_MANGLE(ff_mlp_firorder_0)":\n\t" "jmp *%6 \n\t" IIRMUL (ff_mlp_iirorder_4, 0x0c ) IIRMUL (ff_mlp_iirorder_3, 0x08 ) IIRMUL (ff_mlp_iirorder_2, 0x04 ) IIRMUL (ff_mlp_iirorder_1, 0x00 ) LABEL_MANGLE(ff_mlp_iirorder_0)":\n\t" SHIFT_ACCUM "mov "RESULT" ,"ACCUM" \n\t" "add (%2) ,"RESULT" \n\t" "and %4 ,"RESULT" \n\t" "sub $4 , %0 \n\t" "mov "RESULT32", (%0) \n\t" "mov "RESULT32", (%2) \n\t" "add $"BINC" , %2 \n\t" "sub "ACCUM" ,"RESULT" \n\t" "mov "RESULT32","IOFFS"(%0) \n\t" "incl %3 \n\t" "js 1b \n\t" : /* 0*/"+r"(state), /* 1*/"+r"(coeff), /* 2*/"+r"(sample_buffer), #if ARCH_X86_64 /* 3*/"+r"(blocksize) : /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump), /* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift) , /* 8*/"r"((int64_t)coeff[0]) , /* 9*/"r"((int64_t)coeff[1]) , /*10*/"r"((int64_t)coeff[2]) : "rax", "rdx", "rsi" #else /* ARCH_X86_32 */ /* 3*/"+m"(blocksize) : /* 4*/"m"( mask), /* 5*/"m"(firjump), /* 6*/"m"(iirjump) , /* 7*/"m"(filter_shift) : "eax", "edx", "esi", "ecx" #endif /* !ARCH_X86_64 */ ); } #endif /* HAVE_7REGS && HAVE_TEN_OPERANDS */ void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx) { #if HAVE_7REGS && HAVE_TEN_OPERANDS c->mlp_filter_channel = mlp_filter_channel_x86; #endif }
123linslouis-android-video-cutter
jni/libavcodec/x86/mlpdsp.c
C
asf20
6,360
/* * vp3dsp SSE2 function declarations * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_VP3DSP_SSE2_H #define AVCODEC_X86_VP3DSP_SSE2_H #include "libavcodec/dsputil.h" void ff_vp3_idct_sse2(int16_t *input_data); void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block); void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block); #endif /* AVCODEC_X86_VP3DSP_SSE2_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/vp3dsp_sse2.h
C
asf20
1,193
;***************************************************************************** ;* SSE2-optimized H.264 iDCT ;***************************************************************************** ;* Copyright (C) 2003-2008 x264 project ;* ;* Authors: Laurent Aimar <fenrir@via.ecp.fr> ;* Loren Merritt <lorenm@u.washington.edu> ;* Holger Lubitz <hal@duncan.ol.sub.de> ;* Min Chen <chenm001.163.com> ;* ;* This program is free software; you can redistribute it and/or modify ;* it under the terms of the GNU General Public License as published by ;* the Free Software Foundation; either version 2 of the License, or ;* (at your option) any later version. ;* ;* This program is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;* GNU General Public License for more details. ;* ;* You should have received a copy of the GNU General Public License ;* along with this program; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. ;***************************************************************************** %include "x86inc.asm" %include "x86util.asm" SECTION_RODATA pw_32: times 8 dw 32 SECTION .text INIT_XMM cglobal x264_add8x4_idct_sse2, 3,3,8 movq m0, [r1+ 0] movq m1, [r1+ 8] movq m2, [r1+16] movq m3, [r1+24] movhps m0, [r1+32] movhps m1, [r1+40] movhps m2, [r1+48] movhps m3, [r1+56] IDCT4_1D 0,1,2,3,4,5 TRANSPOSE2x4x4W 0,1,2,3,4 paddw m0, [pw_32 GLOBAL] IDCT4_1D 0,1,2,3,4,5 pxor m7, m7 STORE_DIFF m0, m4, m7, [r0] STORE_DIFF m1, m4, m7, [r0+r2] lea r0, [r0+r2*2] STORE_DIFF m2, m4, m7, [r0] STORE_DIFF m3, m4, m7, [r0+r2] RET
123linslouis-android-video-cutter
jni/libavcodec/x86/h264_idct_sse2.asm
Assembly
asf20
1,838
/** * @file * MMX-optimized functions for the VP6 decoder * * Copyright (C) 2009 Sebastien Lucas <sebastien.lucas@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #include "vp6dsp_mmx.h" #define DIAG4_MMX(in1,in2,in3,in4) \ "movq "#in1"(%0), %%mm0 \n\t" \ "movq "#in2"(%0), %%mm1 \n\t" \ "movq %%mm0, %%mm3 \n\t" \ "movq %%mm1, %%mm4 \n\t" \ "punpcklbw %%mm7, %%mm0 \n\t" \ "punpcklbw %%mm7, %%mm1 \n\t" \ "punpckhbw %%mm7, %%mm3 \n\t" \ "punpckhbw %%mm7, %%mm4 \n\t" \ "pmullw 0(%2), %%mm0 \n\t" /* src[x-8 ] * biweight [0] */ \ "pmullw 8(%2), %%mm1 \n\t" /* src[x ] * biweight [1] */ \ "pmullw 0(%2), %%mm3 \n\t" /* src[x-8 ] * biweight [0] */ \ "pmullw 8(%2), %%mm4 \n\t" /* src[x ] * biweight [1] */ \ "paddw %%mm1, %%mm0 \n\t" \ "paddw %%mm4, %%mm3 \n\t" \ "movq "#in3"(%0), %%mm1 \n\t" \ "movq "#in4"(%0), %%mm2 \n\t" \ "movq %%mm1, %%mm4 \n\t" \ "movq %%mm2, %%mm5 \n\t" \ "punpcklbw %%mm7, %%mm1 \n\t" \ "punpcklbw %%mm7, %%mm2 \n\t" \ "punpckhbw %%mm7, %%mm4 \n\t" \ "punpckhbw %%mm7, %%mm5 \n\t" \ "pmullw 16(%2), %%mm1 \n\t" /* src[x+8 ] * biweight [2] */ \ "pmullw 24(%2), %%mm2 \n\t" /* src[x+16] * biweight [3] */ \ "pmullw 16(%2), %%mm4 \n\t" /* src[x+8 ] * biweight [2] */ \ "pmullw 24(%2), %%mm5 \n\t" /* src[x+16] * biweight [3] */ \ "paddw %%mm2, %%mm1 \n\t" \ "paddw %%mm5, %%mm4 \n\t" \ "paddsw %%mm1, %%mm0 \n\t" \ "paddsw %%mm4, %%mm3 \n\t" \ "paddsw %%mm6, %%mm0 \n\t" /* Add 64 */ \ "paddsw %%mm6, %%mm3 \n\t" /* Add 64 */ \ "psraw $7, %%mm0 \n\t" \ "psraw $7, %%mm3 \n\t" \ "packuswb %%mm3, %%mm0 \n\t" \ "movq %%mm0, (%1) \n\t" void ff_vp6_filter_diag4_mmx(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights, const int16_t *v_weights) { uint8_t tmp[8*11], *t = tmp; int16_t weights[4*4]; int i; src -= stride; for (i=0; i<4*4; i++) weights[i] = h_weights[i>>2]; __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movq "MANGLE(ff_pw_64)", %%mm6 \n\t" "1: \n\t" DIAG4_MMX(-1,0,1,2) "add $8, %1 \n\t" "add %3, %0 \n\t" "decl %4 \n\t" "jnz 1b \n\t" : "+r"(src), "+r"(t) : "r"(weights), "r"((x86_reg)stride), "r"(11) : "memory"); t = tmp + 8; for (i=0; i<4*4; i++) weights[i] = v_weights[i>>2]; __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movq "MANGLE(ff_pw_64)", %%mm6 \n\t" "1: \n\t" DIAG4_MMX(-8,0,8,16) "add $8, %0 \n\t" "add %3, %1 \n\t" "decl %4 \n\t" "jnz 1b \n\t" : "+r"(t), "+r"(dst) : "r"(weights), "r"((x86_reg)stride), "r"(8) : "memory"); }
123linslouis-android-video-cutter
jni/libavcodec/x86/vp6dsp_mmx.c
C
asf20
5,262
/* * idct_mmx.c * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca> * * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. * See http://libmpeg2.sourceforge.net/ for updates. * * mpeg2dec is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * mpeg2dec is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with mpeg2dec; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/common.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #include "mmx.h" #define ROW_SHIFT 11 #define COL_SHIFT 6 #define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT))) #define rounder(bias) {round (bias), round (bias)} #if 0 /* C row IDCT - it is just here to document the MMXEXT and MMX versions */ static inline void idct_row (int16_t * row, int offset, int16_t * table, int32_t * rounder) { int C1, C2, C3, C4, C5, C6, C7; int a0, a1, a2, a3, b0, b1, b2, b3; row += offset; C1 = table[1]; C2 = table[2]; C3 = table[3]; C4 = table[4]; C5 = table[5]; C6 = table[6]; C7 = table[7]; a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder; a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder; a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder; a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder; b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; row[0] = (a0 + b0) >> ROW_SHIFT; row[1] = (a1 + b1) >> ROW_SHIFT; row[2] = (a2 + b2) >> ROW_SHIFT; row[3] = (a3 + b3) >> ROW_SHIFT; row[4] = (a3 - b3) >> ROW_SHIFT; row[5] = (a2 - b2) >> ROW_SHIFT; row[6] = (a1 - b1) >> ROW_SHIFT; row[7] = (a0 - b0) >> ROW_SHIFT; } #endif /* MMXEXT row IDCT */ #define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \ c4, c6, c4, c6, \ c1, c3, -c1, -c5, \ c5, c7, c3, -c7, \ c4, -c6, c4, -c6, \ -c4, c2, c4, -c2, \ c5, -c1, c3, -c1, \ c7, c3, c7, -c5 } static inline void mmxext_row_head (int16_t * const row, const int offset, const int16_t * const table) { movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */ movq_m2r (*table, mm3); /* mm3 = -C2 -C4 C2 C4 */ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */ movq_m2r (*(table+4), mm4); /* mm4 = C6 C4 C6 C4 */ pmaddwd_r2r (mm0, mm3); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */ pshufw_r2r (mm2, mm2, 0x4e); /* mm2 = x2 x0 x6 x4 */ } static inline void mmxext_row (const int16_t * const table, const int32_t * const rounder) { movq_m2r (*(table+8), mm1); /* mm1 = -C5 -C1 C3 C1 */ pmaddwd_r2r (mm2, mm4); /* mm4 = C4*x0+C6*x2 C4*x4+C6*x6 */ pmaddwd_m2r (*(table+16), mm0); /* mm0 = C4*x4-C6*x6 C4*x0-C6*x2 */ pshufw_r2r (mm6, mm6, 0x4e); /* mm6 = x3 x1 x7 x5 */ movq_m2r (*(table+12), mm7); /* mm7 = -C7 C3 C7 C5 */ pmaddwd_r2r (mm5, mm1); /* mm1 = -C1*x5-C5*x7 C1*x1+C3*x3 */ paddd_m2r (*rounder, mm3); /* mm3 += rounder */ pmaddwd_r2r (mm6, mm7); /* mm7 = C3*x1-C7*x3 C5*x5+C7*x7 */ pmaddwd_m2r (*(table+20), mm2); /* mm2 = C4*x0-C2*x2 -C4*x4+C2*x6 */ paddd_r2r (mm4, mm3); /* mm3 = a1 a0 + rounder */ pmaddwd_m2r (*(table+24), mm5); /* mm5 = C3*x5-C1*x7 C5*x1-C1*x3 */ movq_r2r (mm3, mm4); /* mm4 = a1 a0 + rounder */ pmaddwd_m2r (*(table+28), mm6); /* mm6 = C7*x1-C5*x3 C7*x5+C3*x7 */ paddd_r2r (mm7, mm1); /* mm1 = b1 b0 */ paddd_m2r (*rounder, mm0); /* mm0 += rounder */ psubd_r2r (mm1, mm3); /* mm3 = a1-b1 a0-b0 + rounder */ psrad_i2r (ROW_SHIFT, mm3); /* mm3 = y6 y7 */ paddd_r2r (mm4, mm1); /* mm1 = a1+b1 a0+b0 + rounder */ paddd_r2r (mm2, mm0); /* mm0 = a3 a2 + rounder */ psrad_i2r (ROW_SHIFT, mm1); /* mm1 = y1 y0 */ paddd_r2r (mm6, mm5); /* mm5 = b3 b2 */ movq_r2r (mm0, mm4); /* mm4 = a3 a2 + rounder */ paddd_r2r (mm5, mm0); /* mm0 = a3+b3 a2+b2 + rounder */ psubd_r2r (mm5, mm4); /* mm4 = a3-b3 a2-b2 + rounder */ } static inline void mmxext_row_tail (int16_t * const row, const int store) { psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */ psrad_i2r (ROW_SHIFT, mm4); /* mm4 = y4 y5 */ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */ packssdw_r2r (mm3, mm4); /* mm4 = y6 y7 y4 y5 */ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */ pshufw_r2r (mm4, mm4, 0xb1); /* mm4 = y7 y6 y5 y4 */ /* slot */ movq_r2m (mm4, *(row+store+4)); /* save y7 y6 y5 y4 */ } static inline void mmxext_row_mid (int16_t * const row, const int store, const int offset, const int16_t * const table) { movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */ psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */ psrad_i2r (ROW_SHIFT, mm4); /* mm4 = y4 y5 */ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */ packssdw_r2r (mm3, mm4); /* mm4 = y6 y7 y4 y5 */ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */ pshufw_r2r (mm4, mm4, 0xb1); /* mm4 = y7 y6 y5 y4 */ movq_m2r (*table, mm3); /* mm3 = -C2 -C4 C2 C4 */ movq_r2m (mm4, *(row+store+4)); /* save y7 y6 y5 y4 */ pmaddwd_r2r (mm0, mm3); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */ movq_m2r (*(table+4), mm4); /* mm4 = C6 C4 C6 C4 */ pshufw_r2r (mm2, mm2, 0x4e); /* mm2 = x2 x0 x6 x4 */ } /* MMX row IDCT */ #define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \ c4, c6, -c4, -c2, \ c1, c3, c3, -c7, \ c5, c7, -c1, -c5, \ c4, -c6, c4, -c2, \ -c4, c2, c4, -c6, \ c5, -c1, c7, -c5, \ c7, c3, c3, -c1 } static inline void mmx_row_head (int16_t * const row, const int offset, const int16_t * const table) { movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */ movq_m2r (*table, mm3); /* mm3 = C6 C4 C2 C4 */ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */ punpckldq_r2r (mm0, mm0); /* mm0 = x2 x0 x2 x0 */ movq_m2r (*(table+4), mm4); /* mm4 = -C2 -C4 C6 C4 */ pmaddwd_r2r (mm0, mm3); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */ movq_m2r (*(table+8), mm1); /* mm1 = -C7 C3 C3 C1 */ punpckhdq_r2r (mm2, mm2); /* mm2 = x6 x4 x6 x4 */ } static inline void mmx_row (const int16_t * const table, const int32_t * const rounder) { pmaddwd_r2r (mm2, mm4); /* mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 */ punpckldq_r2r (mm5, mm5); /* mm5 = x3 x1 x3 x1 */ pmaddwd_m2r (*(table+16), mm0); /* mm0 = C4*x0-C2*x2 C4*x0-C6*x2 */ punpckhdq_r2r (mm6, mm6); /* mm6 = x7 x5 x7 x5 */ movq_m2r (*(table+12), mm7); /* mm7 = -C5 -C1 C7 C5 */ pmaddwd_r2r (mm5, mm1); /* mm1 = C3*x1-C7*x3 C1*x1+C3*x3 */ paddd_m2r (*rounder, mm3); /* mm3 += rounder */ pmaddwd_r2r (mm6, mm7); /* mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 */ pmaddwd_m2r (*(table+20), mm2); /* mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 */ paddd_r2r (mm4, mm3); /* mm3 = a1 a0 + rounder */ pmaddwd_m2r (*(table+24), mm5); /* mm5 = C7*x1-C5*x3 C5*x1-C1*x3 */ movq_r2r (mm3, mm4); /* mm4 = a1 a0 + rounder */ pmaddwd_m2r (*(table+28), mm6); /* mm6 = C3*x5-C1*x7 C7*x5+C3*x7 */ paddd_r2r (mm7, mm1); /* mm1 = b1 b0 */ paddd_m2r (*rounder, mm0); /* mm0 += rounder */ psubd_r2r (mm1, mm3); /* mm3 = a1-b1 a0-b0 + rounder */ psrad_i2r (ROW_SHIFT, mm3); /* mm3 = y6 y7 */ paddd_r2r (mm4, mm1); /* mm1 = a1+b1 a0+b0 + rounder */ paddd_r2r (mm2, mm0); /* mm0 = a3 a2 + rounder */ psrad_i2r (ROW_SHIFT, mm1); /* mm1 = y1 y0 */ paddd_r2r (mm6, mm5); /* mm5 = b3 b2 */ movq_r2r (mm0, mm7); /* mm7 = a3 a2 + rounder */ paddd_r2r (mm5, mm0); /* mm0 = a3+b3 a2+b2 + rounder */ psubd_r2r (mm5, mm7); /* mm7 = a3-b3 a2-b2 + rounder */ } static inline void mmx_row_tail (int16_t * const row, const int store) { psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */ psrad_i2r (ROW_SHIFT, mm7); /* mm7 = y4 y5 */ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */ packssdw_r2r (mm3, mm7); /* mm7 = y6 y7 y4 y5 */ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */ movq_r2r (mm7, mm4); /* mm4 = y6 y7 y4 y5 */ pslld_i2r (16, mm7); /* mm7 = y7 0 y5 0 */ psrld_i2r (16, mm4); /* mm4 = 0 y6 0 y4 */ por_r2r (mm4, mm7); /* mm7 = y7 y6 y5 y4 */ /* slot */ movq_r2m (mm7, *(row+store+4)); /* save y7 y6 y5 y4 */ } static inline void mmx_row_mid (int16_t * const row, const int store, const int offset, const int16_t * const table) { movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */ psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */ movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */ psrad_i2r (ROW_SHIFT, mm7); /* mm7 = y4 y5 */ packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */ movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */ packssdw_r2r (mm3, mm7); /* mm7 = y6 y7 y4 y5 */ movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */ movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */ movq_r2r (mm7, mm1); /* mm1 = y6 y7 y4 y5 */ punpckldq_r2r (mm0, mm0); /* mm0 = x2 x0 x2 x0 */ psrld_i2r (16, mm7); /* mm7 = 0 y6 0 y4 */ movq_m2r (*table, mm3); /* mm3 = C6 C4 C2 C4 */ pslld_i2r (16, mm1); /* mm1 = y7 0 y5 0 */ movq_m2r (*(table+4), mm4); /* mm4 = -C2 -C4 C6 C4 */ por_r2r (mm1, mm7); /* mm7 = y7 y6 y5 y4 */ movq_m2r (*(table+8), mm1); /* mm1 = -C7 C3 C3 C1 */ punpckhdq_r2r (mm2, mm2); /* mm2 = x6 x4 x6 x4 */ movq_r2m (mm7, *(row+store+4)); /* save y7 y6 y5 y4 */ pmaddwd_r2r (mm0, mm3); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */ } #if 0 /* C column IDCT - it is just here to document the MMXEXT and MMX versions */ static inline void idct_col (int16_t * col, int offset) { /* multiplication - as implemented on mmx */ #define F(c,x) (((c) * (x)) >> 16) /* saturation - it helps us handle torture test cases */ #define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x)) int16_t x0, x1, x2, x3, x4, x5, x6, x7; int16_t y0, y1, y2, y3, y4, y5, y6, y7; int16_t a0, a1, a2, a3, b0, b1, b2, b3; int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12; col += offset; x0 = col[0*8]; x1 = col[1*8]; x2 = col[2*8]; x3 = col[3*8]; x4 = col[4*8]; x5 = col[5*8]; x6 = col[6*8]; x7 = col[7*8]; u04 = S (x0 + x4); v04 = S (x0 - x4); u26 = S (F (T2, x6) + x2); v26 = S (F (T2, x2) - x6); a0 = S (u04 + u26); a1 = S (v04 + v26); a2 = S (v04 - v26); a3 = S (u04 - u26); u17 = S (F (T1, x7) + x1); v17 = S (F (T1, x1) - x7); u35 = S (F (T3, x5) + x3); v35 = S (F (T3, x3) - x5); b0 = S (u17 + u35); b3 = S (v17 - v35); u12 = S (u17 - u35); v12 = S (v17 + v35); u12 = S (2 * F (C4, u12)); v12 = S (2 * F (C4, v12)); b1 = S (u12 + v12); b2 = S (u12 - v12); y0 = S (a0 + b0) >> COL_SHIFT; y1 = S (a1 + b1) >> COL_SHIFT; y2 = S (a2 + b2) >> COL_SHIFT; y3 = S (a3 + b3) >> COL_SHIFT; y4 = S (a3 - b3) >> COL_SHIFT; y5 = S (a2 - b2) >> COL_SHIFT; y6 = S (a1 - b1) >> COL_SHIFT; y7 = S (a0 - b0) >> COL_SHIFT; col[0*8] = y0; col[1*8] = y1; col[2*8] = y2; col[3*8] = y3; col[4*8] = y4; col[5*8] = y5; col[6*8] = y6; col[7*8] = y7; } #endif /* MMX column IDCT */ static inline void idct_col (int16_t * const col, const int offset) { #define T1 13036 #define T2 27146 #define T3 43790 #define C4 23170 DECLARE_ALIGNED(8, static const short, t1_vector)[] = {T1,T1,T1,T1}; DECLARE_ALIGNED(8, static const short, t2_vector)[] = {T2,T2,T2,T2}; DECLARE_ALIGNED(8, static const short, t3_vector)[] = {T3,T3,T3,T3}; DECLARE_ALIGNED(8, static const short, c4_vector)[] = {C4,C4,C4,C4}; /* column code adapted from Peter Gubanov */ /* http://www.elecard.com/peter/idct.shtml */ movq_m2r (*t1_vector, mm0); /* mm0 = T1 */ movq_m2r (*(col+offset+1*8), mm1); /* mm1 = x1 */ movq_r2r (mm0, mm2); /* mm2 = T1 */ movq_m2r (*(col+offset+7*8), mm4); /* mm4 = x7 */ pmulhw_r2r (mm1, mm0); /* mm0 = T1*x1 */ movq_m2r (*t3_vector, mm5); /* mm5 = T3 */ pmulhw_r2r (mm4, mm2); /* mm2 = T1*x7 */ movq_m2r (*(col+offset+5*8), mm6); /* mm6 = x5 */ movq_r2r (mm5, mm7); /* mm7 = T3-1 */ movq_m2r (*(col+offset+3*8), mm3); /* mm3 = x3 */ psubsw_r2r (mm4, mm0); /* mm0 = v17 */ movq_m2r (*t2_vector, mm4); /* mm4 = T2 */ pmulhw_r2r (mm3, mm5); /* mm5 = (T3-1)*x3 */ paddsw_r2r (mm2, mm1); /* mm1 = u17 */ pmulhw_r2r (mm6, mm7); /* mm7 = (T3-1)*x5 */ /* slot */ movq_r2r (mm4, mm2); /* mm2 = T2 */ paddsw_r2r (mm3, mm5); /* mm5 = T3*x3 */ pmulhw_m2r (*(col+offset+2*8), mm4);/* mm4 = T2*x2 */ paddsw_r2r (mm6, mm7); /* mm7 = T3*x5 */ psubsw_r2r (mm6, mm5); /* mm5 = v35 */ paddsw_r2r (mm3, mm7); /* mm7 = u35 */ movq_m2r (*(col+offset+6*8), mm3); /* mm3 = x6 */ movq_r2r (mm0, mm6); /* mm6 = v17 */ pmulhw_r2r (mm3, mm2); /* mm2 = T2*x6 */ psubsw_r2r (mm5, mm0); /* mm0 = b3 */ psubsw_r2r (mm3, mm4); /* mm4 = v26 */ paddsw_r2r (mm6, mm5); /* mm5 = v12 */ movq_r2m (mm0, *(col+offset+3*8)); /* save b3 in scratch0 */ movq_r2r (mm1, mm6); /* mm6 = u17 */ paddsw_m2r (*(col+offset+2*8), mm2);/* mm2 = u26 */ paddsw_r2r (mm7, mm6); /* mm6 = b0 */ psubsw_r2r (mm7, mm1); /* mm1 = u12 */ movq_r2r (mm1, mm7); /* mm7 = u12 */ movq_m2r (*(col+offset+0*8), mm3); /* mm3 = x0 */ paddsw_r2r (mm5, mm1); /* mm1 = u12+v12 */ movq_m2r (*c4_vector, mm0); /* mm0 = C4/2 */ psubsw_r2r (mm5, mm7); /* mm7 = u12-v12 */ movq_r2m (mm6, *(col+offset+5*8)); /* save b0 in scratch1 */ pmulhw_r2r (mm0, mm1); /* mm1 = b1/2 */ movq_r2r (mm4, mm6); /* mm6 = v26 */ pmulhw_r2r (mm0, mm7); /* mm7 = b2/2 */ movq_m2r (*(col+offset+4*8), mm5); /* mm5 = x4 */ movq_r2r (mm3, mm0); /* mm0 = x0 */ psubsw_r2r (mm5, mm3); /* mm3 = v04 */ paddsw_r2r (mm5, mm0); /* mm0 = u04 */ paddsw_r2r (mm3, mm4); /* mm4 = a1 */ movq_r2r (mm0, mm5); /* mm5 = u04 */ psubsw_r2r (mm6, mm3); /* mm3 = a2 */ paddsw_r2r (mm2, mm5); /* mm5 = a0 */ paddsw_r2r (mm1, mm1); /* mm1 = b1 */ psubsw_r2r (mm2, mm0); /* mm0 = a3 */ paddsw_r2r (mm7, mm7); /* mm7 = b2 */ movq_r2r (mm3, mm2); /* mm2 = a2 */ movq_r2r (mm4, mm6); /* mm6 = a1 */ paddsw_r2r (mm7, mm3); /* mm3 = a2+b2 */ psraw_i2r (COL_SHIFT, mm3); /* mm3 = y2 */ paddsw_r2r (mm1, mm4); /* mm4 = a1+b1 */ psraw_i2r (COL_SHIFT, mm4); /* mm4 = y1 */ psubsw_r2r (mm1, mm6); /* mm6 = a1-b1 */ movq_m2r (*(col+offset+5*8), mm1); /* mm1 = b0 */ psubsw_r2r (mm7, mm2); /* mm2 = a2-b2 */ psraw_i2r (COL_SHIFT, mm6); /* mm6 = y6 */ movq_r2r (mm5, mm7); /* mm7 = a0 */ movq_r2m (mm4, *(col+offset+1*8)); /* save y1 */ psraw_i2r (COL_SHIFT, mm2); /* mm2 = y5 */ movq_r2m (mm3, *(col+offset+2*8)); /* save y2 */ paddsw_r2r (mm1, mm5); /* mm5 = a0+b0 */ movq_m2r (*(col+offset+3*8), mm4); /* mm4 = b3 */ psubsw_r2r (mm1, mm7); /* mm7 = a0-b0 */ psraw_i2r (COL_SHIFT, mm5); /* mm5 = y0 */ movq_r2r (mm0, mm3); /* mm3 = a3 */ movq_r2m (mm2, *(col+offset+5*8)); /* save y5 */ psubsw_r2r (mm4, mm3); /* mm3 = a3-b3 */ psraw_i2r (COL_SHIFT, mm7); /* mm7 = y7 */ paddsw_r2r (mm0, mm4); /* mm4 = a3+b3 */ movq_r2m (mm5, *(col+offset+0*8)); /* save y0 */ psraw_i2r (COL_SHIFT, mm3); /* mm3 = y4 */ movq_r2m (mm6, *(col+offset+6*8)); /* save y6 */ psraw_i2r (COL_SHIFT, mm4); /* mm4 = y3 */ movq_r2m (mm7, *(col+offset+7*8)); /* save y7 */ movq_r2m (mm3, *(col+offset+4*8)); /* save y4 */ movq_r2m (mm4, *(col+offset+3*8)); /* save y3 */ #undef T1 #undef T2 #undef T3 #undef C4 } DECLARE_ALIGNED(8, static const int32_t, rounder0)[] = rounder ((1 << (COL_SHIFT - 1)) - 0.5); DECLARE_ALIGNED(8, static const int32_t, rounder4)[] = rounder (0); DECLARE_ALIGNED(8, static const int32_t, rounder1)[] = rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */ DECLARE_ALIGNED(8, static const int32_t, rounder7)[] = rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */ DECLARE_ALIGNED(8, static const int32_t, rounder2)[] = rounder (0.60355339059); /* C2 * (C6+C2)/2 */ DECLARE_ALIGNED(8, static const int32_t, rounder6)[] = rounder (-0.25); /* C2 * (C6-C2)/2 */ DECLARE_ALIGNED(8, static const int32_t, rounder3)[] = rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */ DECLARE_ALIGNED(8, static const int32_t, rounder5)[] = rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */ #undef COL_SHIFT #undef ROW_SHIFT #define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \ void idct (int16_t * const block) \ { \ DECLARE_ALIGNED(16, static const int16_t, table04)[] = \ table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \ DECLARE_ALIGNED(16, static const int16_t, table17)[] = \ table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \ DECLARE_ALIGNED(16, static const int16_t, table26)[] = \ table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \ DECLARE_ALIGNED(16, static const int16_t, table35)[] = \ table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \ \ idct_row_head (block, 0*8, table04); \ idct_row (table04, rounder0); \ idct_row_mid (block, 0*8, 4*8, table04); \ idct_row (table04, rounder4); \ idct_row_mid (block, 4*8, 1*8, table17); \ idct_row (table17, rounder1); \ idct_row_mid (block, 1*8, 7*8, table17); \ idct_row (table17, rounder7); \ idct_row_mid (block, 7*8, 2*8, table26); \ idct_row (table26, rounder2); \ idct_row_mid (block, 2*8, 6*8, table26); \ idct_row (table26, rounder6); \ idct_row_mid (block, 6*8, 3*8, table35); \ idct_row (table35, rounder3); \ idct_row_mid (block, 3*8, 5*8, table35); \ idct_row (table35, rounder5); \ idct_row_tail (block, 5*8); \ \ idct_col (block, 0); \ idct_col (block, 4); \ } declare_idct (ff_mmxext_idct, mmxext_table, mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid) declare_idct (ff_mmx_idct, mmx_table, mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid)
123linslouis-android-video-cutter
jni/libavcodec/x86/idct_mmx.c
C
asf20
22,956
/* * MMX optimized DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * MMX optimization by Nick Kurshev <nickols_k@mail.ru> */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "libavcodec/mathops.h" #include "dsputil_mmx.h" static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size) { __asm__ volatile( "mov $-128, %%"REG_a" \n\t" "pxor %%mm7, %%mm7 \n\t" ASMALIGN(4) "1: \n\t" "movq (%0), %%mm0 \n\t" "movq (%0, %2), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "movq %%mm0, (%1, %%"REG_a") \n\t" "movq %%mm1, 8(%1, %%"REG_a") \n\t" "movq %%mm2, 16(%1, %%"REG_a") \n\t" "movq %%mm3, 24(%1, %%"REG_a") \n\t" "add %3, %0 \n\t" "add $32, %%"REG_a" \n\t" "js 1b \n\t" : "+r" (pixels) : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2) : "%"REG_a ); } static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size) { __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "movq (%0), %%xmm0 \n\t" "movq (%0, %2), %%xmm1 \n\t" "movq (%0, %2,2), %%xmm2 \n\t" "movq (%0, %3), %%xmm3 \n\t" "lea (%0,%2,4), %0 \n\t" "punpcklbw %%xmm7, %%xmm0 \n\t" "punpcklbw %%xmm7, %%xmm1 \n\t" "punpcklbw %%xmm7, %%xmm2 \n\t" "punpcklbw %%xmm7, %%xmm3 \n\t" "movdqa %%xmm0, (%1) \n\t" "movdqa %%xmm1, 16(%1) \n\t" "movdqa %%xmm2, 32(%1) \n\t" "movdqa %%xmm3, 48(%1) \n\t" "movq (%0), %%xmm0 \n\t" "movq (%0, %2), %%xmm1 \n\t" "movq (%0, %2,2), %%xmm2 \n\t" "movq (%0, %3), %%xmm3 \n\t" "punpcklbw %%xmm7, %%xmm0 \n\t" "punpcklbw %%xmm7, %%xmm1 \n\t" "punpcklbw %%xmm7, %%xmm2 \n\t" "punpcklbw %%xmm7, %%xmm3 \n\t" "movdqa %%xmm0, 64(%1) \n\t" "movdqa %%xmm1, 80(%1) \n\t" "movdqa %%xmm2, 96(%1) \n\t" "movdqa %%xmm3, 112(%1) \n\t" : "+r" (pixels) : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3) ); } static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride) { __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "mov $-128, %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" "movq (%0), %%mm0 \n\t" "movq (%1), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "psubw %%mm2, %%mm0 \n\t" "psubw %%mm3, %%mm1 \n\t" "movq %%mm0, (%2, %%"REG_a") \n\t" "movq %%mm1, 8(%2, %%"REG_a") \n\t" "add %3, %0 \n\t" "add %3, %1 \n\t" "add $16, %%"REG_a" \n\t" "jnz 1b \n\t" : "+r" (s1), "+r" (s2) : "r" (block+64), "r" ((x86_reg)stride) : "%"REG_a ); } static int pix_sum16_mmx(uint8_t * pix, int line_size){ const int h=16; int sum; x86_reg index= -line_size*h; __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "pxor %%mm6, %%mm6 \n\t" "1: \n\t" "movq (%2, %1), %%mm0 \n\t" "movq (%2, %1), %%mm1 \n\t" "movq 8(%2, %1), %%mm2 \n\t" "movq 8(%2, %1), %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddw %%mm0, %%mm1 \n\t" "paddw %%mm2, %%mm3 \n\t" "paddw %%mm1, %%mm3 \n\t" "paddw %%mm3, %%mm6 \n\t" "add %3, %1 \n\t" " js 1b \n\t" "movq %%mm6, %%mm5 \n\t" "psrlq $32, %%mm6 \n\t" "paddw %%mm5, %%mm6 \n\t" "movq %%mm6, %%mm5 \n\t" "psrlq $16, %%mm6 \n\t" "paddw %%mm5, %%mm6 \n\t" "movd %%mm6, %0 \n\t" "andl $0xFFFF, %0 \n\t" : "=&r" (sum), "+r" (index) : "r" (pix - index), "r" ((x86_reg)line_size) ); return sum; } static int pix_norm1_mmx(uint8_t *pix, int line_size) { int tmp; __asm__ volatile ( "movl $16,%%ecx\n" "pxor %%mm0,%%mm0\n" "pxor %%mm7,%%mm7\n" "1:\n" "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */ "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */ "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */ "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */ "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */ "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */ "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */ "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */ "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */ "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */ "pmaddwd %%mm3,%%mm3\n" "pmaddwd %%mm4,%%mm4\n" "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2, pix2^2+pix3^2+pix6^2+pix7^2) */ "paddd %%mm3,%%mm4\n" "paddd %%mm2,%%mm7\n" "add %2, %0\n" "paddd %%mm4,%%mm7\n" "dec %%ecx\n" "jnz 1b\n" "movq %%mm7,%%mm1\n" "psrlq $32, %%mm7\n" /* shift hi dword to lo */ "paddd %%mm7,%%mm1\n" "movd %%mm1,%1\n" : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" ); return tmp; } static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int tmp; __asm__ volatile ( "movl %4,%%ecx\n" "shr $1,%%ecx\n" "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ "1:\n" "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */ "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */ "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */ "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */ /* todo: mm1-mm2, mm3-mm4 */ /* algo: subtract mm1 from mm2 with saturation and vice versa */ /* OR the results to get absolute difference */ "movq %%mm1,%%mm5\n" "movq %%mm3,%%mm6\n" "psubusb %%mm2,%%mm1\n" "psubusb %%mm4,%%mm3\n" "psubusb %%mm5,%%mm2\n" "psubusb %%mm6,%%mm4\n" "por %%mm1,%%mm2\n" "por %%mm3,%%mm4\n" /* now convert to 16-bit vectors so we can square them */ "movq %%mm2,%%mm1\n" "movq %%mm4,%%mm3\n" "punpckhbw %%mm0,%%mm2\n" "punpckhbw %%mm0,%%mm4\n" "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ "pmaddwd %%mm2,%%mm2\n" "pmaddwd %%mm4,%%mm4\n" "pmaddwd %%mm1,%%mm1\n" "pmaddwd %%mm3,%%mm3\n" "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */ "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */ "paddd %%mm2,%%mm1\n" "paddd %%mm4,%%mm3\n" "paddd %%mm1,%%mm7\n" "paddd %%mm3,%%mm7\n" "decl %%ecx\n" "jnz 1b\n" "movq %%mm7,%%mm1\n" "psrlq $32, %%mm7\n" /* shift hi dword to lo */ "paddd %%mm7,%%mm1\n" "movd %%mm1,%2\n" : "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" ((x86_reg)line_size) , "m" (h) : "%ecx"); return tmp; } static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int tmp; __asm__ volatile ( "movl %4,%%ecx\n" "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ "1:\n" "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */ "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */ "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */ "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */ /* todo: mm1-mm2, mm3-mm4 */ /* algo: subtract mm1 from mm2 with saturation and vice versa */ /* OR the results to get absolute difference */ "movq %%mm1,%%mm5\n" "movq %%mm3,%%mm6\n" "psubusb %%mm2,%%mm1\n" "psubusb %%mm4,%%mm3\n" "psubusb %%mm5,%%mm2\n" "psubusb %%mm6,%%mm4\n" "por %%mm1,%%mm2\n" "por %%mm3,%%mm4\n" /* now convert to 16-bit vectors so we can square them */ "movq %%mm2,%%mm1\n" "movq %%mm4,%%mm3\n" "punpckhbw %%mm0,%%mm2\n" "punpckhbw %%mm0,%%mm4\n" "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ "pmaddwd %%mm2,%%mm2\n" "pmaddwd %%mm4,%%mm4\n" "pmaddwd %%mm1,%%mm1\n" "pmaddwd %%mm3,%%mm3\n" "add %3,%0\n" "add %3,%1\n" "paddd %%mm2,%%mm1\n" "paddd %%mm4,%%mm3\n" "paddd %%mm1,%%mm7\n" "paddd %%mm3,%%mm7\n" "decl %%ecx\n" "jnz 1b\n" "movq %%mm7,%%mm1\n" "psrlq $32, %%mm7\n" /* shift hi dword to lo */ "paddd %%mm7,%%mm1\n" "movd %%mm1,%2\n" : "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" ((x86_reg)line_size) , "m" (h) : "%ecx"); return tmp; } static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int tmp; __asm__ volatile ( "shr $1,%2\n" "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */ "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */ "1:\n" "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */ "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */ "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */ "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */ /* todo: mm1-mm2, mm3-mm4 */ /* algo: subtract mm1 from mm2 with saturation and vice versa */ /* OR the results to get absolute difference */ "movdqa %%xmm1,%%xmm5\n" "movdqa %%xmm3,%%xmm6\n" "psubusb %%xmm2,%%xmm1\n" "psubusb %%xmm4,%%xmm3\n" "psubusb %%xmm5,%%xmm2\n" "psubusb %%xmm6,%%xmm4\n" "por %%xmm1,%%xmm2\n" "por %%xmm3,%%xmm4\n" /* now convert to 16-bit vectors so we can square them */ "movdqa %%xmm2,%%xmm1\n" "movdqa %%xmm4,%%xmm3\n" "punpckhbw %%xmm0,%%xmm2\n" "punpckhbw %%xmm0,%%xmm4\n" "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */ "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */ "pmaddwd %%xmm2,%%xmm2\n" "pmaddwd %%xmm4,%%xmm4\n" "pmaddwd %%xmm1,%%xmm1\n" "pmaddwd %%xmm3,%%xmm3\n" "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */ "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */ "paddd %%xmm2,%%xmm1\n" "paddd %%xmm4,%%xmm3\n" "paddd %%xmm1,%%xmm7\n" "paddd %%xmm3,%%xmm7\n" "decl %2\n" "jnz 1b\n" "movdqa %%xmm7,%%xmm1\n" "psrldq $8, %%xmm7\n" /* shift hi qword to lo */ "paddd %%xmm1,%%xmm7\n" "movdqa %%xmm7,%%xmm1\n" "psrldq $4, %%xmm7\n" /* shift hi dword to lo */ "paddd %%xmm1,%%xmm7\n" "movd %%xmm7,%3\n" : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp) : "r" ((x86_reg)line_size)); return tmp; } static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) { int tmp; __asm__ volatile ( "movl %3,%%ecx\n" "pxor %%mm7,%%mm7\n" "pxor %%mm6,%%mm6\n" "movq (%0),%%mm0\n" "movq %%mm0, %%mm1\n" "psllq $8, %%mm0\n" "psrlq $8, %%mm1\n" "psrlq $8, %%mm0\n" "movq %%mm0, %%mm2\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm0\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm2\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm0\n" "psubw %%mm3, %%mm2\n" "add %2,%0\n" "movq (%0),%%mm4\n" "movq %%mm4, %%mm1\n" "psllq $8, %%mm4\n" "psrlq $8, %%mm1\n" "psrlq $8, %%mm4\n" "movq %%mm4, %%mm5\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm4\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm5\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm4\n" "psubw %%mm3, %%mm5\n" "psubw %%mm4, %%mm0\n" "psubw %%mm5, %%mm2\n" "pxor %%mm3, %%mm3\n" "pxor %%mm1, %%mm1\n" "pcmpgtw %%mm0, %%mm3\n\t" "pcmpgtw %%mm2, %%mm1\n\t" "pxor %%mm3, %%mm0\n" "pxor %%mm1, %%mm2\n" "psubw %%mm3, %%mm0\n" "psubw %%mm1, %%mm2\n" "paddw %%mm0, %%mm2\n" "paddw %%mm2, %%mm6\n" "add %2,%0\n" "1:\n" "movq (%0),%%mm0\n" "movq %%mm0, %%mm1\n" "psllq $8, %%mm0\n" "psrlq $8, %%mm1\n" "psrlq $8, %%mm0\n" "movq %%mm0, %%mm2\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm0\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm2\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm0\n" "psubw %%mm3, %%mm2\n" "psubw %%mm0, %%mm4\n" "psubw %%mm2, %%mm5\n" "pxor %%mm3, %%mm3\n" "pxor %%mm1, %%mm1\n" "pcmpgtw %%mm4, %%mm3\n\t" "pcmpgtw %%mm5, %%mm1\n\t" "pxor %%mm3, %%mm4\n" "pxor %%mm1, %%mm5\n" "psubw %%mm3, %%mm4\n" "psubw %%mm1, %%mm5\n" "paddw %%mm4, %%mm5\n" "paddw %%mm5, %%mm6\n" "add %2,%0\n" "movq (%0),%%mm4\n" "movq %%mm4, %%mm1\n" "psllq $8, %%mm4\n" "psrlq $8, %%mm1\n" "psrlq $8, %%mm4\n" "movq %%mm4, %%mm5\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm4\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm5\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm4\n" "psubw %%mm3, %%mm5\n" "psubw %%mm4, %%mm0\n" "psubw %%mm5, %%mm2\n" "pxor %%mm3, %%mm3\n" "pxor %%mm1, %%mm1\n" "pcmpgtw %%mm0, %%mm3\n\t" "pcmpgtw %%mm2, %%mm1\n\t" "pxor %%mm3, %%mm0\n" "pxor %%mm1, %%mm2\n" "psubw %%mm3, %%mm0\n" "psubw %%mm1, %%mm2\n" "paddw %%mm0, %%mm2\n" "paddw %%mm2, %%mm6\n" "add %2,%0\n" "subl $2, %%ecx\n" " jnz 1b\n" "movq %%mm6, %%mm0\n" "punpcklwd %%mm7,%%mm0\n" "punpckhwd %%mm7,%%mm6\n" "paddd %%mm0, %%mm6\n" "movq %%mm6,%%mm0\n" "psrlq $32, %%mm6\n" "paddd %%mm6,%%mm0\n" "movd %%mm0,%1\n" : "+r" (pix1), "=r"(tmp) : "r" ((x86_reg)line_size) , "g" (h-2) : "%ecx"); return tmp; } static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) { int tmp; uint8_t * pix= pix1; __asm__ volatile ( "movl %3,%%ecx\n" "pxor %%mm7,%%mm7\n" "pxor %%mm6,%%mm6\n" "movq (%0),%%mm0\n" "movq 1(%0),%%mm1\n" "movq %%mm0, %%mm2\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm0\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm2\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm0\n" "psubw %%mm3, %%mm2\n" "add %2,%0\n" "movq (%0),%%mm4\n" "movq 1(%0),%%mm1\n" "movq %%mm4, %%mm5\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm4\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm5\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm4\n" "psubw %%mm3, %%mm5\n" "psubw %%mm4, %%mm0\n" "psubw %%mm5, %%mm2\n" "pxor %%mm3, %%mm3\n" "pxor %%mm1, %%mm1\n" "pcmpgtw %%mm0, %%mm3\n\t" "pcmpgtw %%mm2, %%mm1\n\t" "pxor %%mm3, %%mm0\n" "pxor %%mm1, %%mm2\n" "psubw %%mm3, %%mm0\n" "psubw %%mm1, %%mm2\n" "paddw %%mm0, %%mm2\n" "paddw %%mm2, %%mm6\n" "add %2,%0\n" "1:\n" "movq (%0),%%mm0\n" "movq 1(%0),%%mm1\n" "movq %%mm0, %%mm2\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm0\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm2\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm0\n" "psubw %%mm3, %%mm2\n" "psubw %%mm0, %%mm4\n" "psubw %%mm2, %%mm5\n" "pxor %%mm3, %%mm3\n" "pxor %%mm1, %%mm1\n" "pcmpgtw %%mm4, %%mm3\n\t" "pcmpgtw %%mm5, %%mm1\n\t" "pxor %%mm3, %%mm4\n" "pxor %%mm1, %%mm5\n" "psubw %%mm3, %%mm4\n" "psubw %%mm1, %%mm5\n" "paddw %%mm4, %%mm5\n" "paddw %%mm5, %%mm6\n" "add %2,%0\n" "movq (%0),%%mm4\n" "movq 1(%0),%%mm1\n" "movq %%mm4, %%mm5\n" "movq %%mm1, %%mm3\n" "punpcklbw %%mm7,%%mm4\n" "punpcklbw %%mm7,%%mm1\n" "punpckhbw %%mm7,%%mm5\n" "punpckhbw %%mm7,%%mm3\n" "psubw %%mm1, %%mm4\n" "psubw %%mm3, %%mm5\n" "psubw %%mm4, %%mm0\n" "psubw %%mm5, %%mm2\n" "pxor %%mm3, %%mm3\n" "pxor %%mm1, %%mm1\n" "pcmpgtw %%mm0, %%mm3\n\t" "pcmpgtw %%mm2, %%mm1\n\t" "pxor %%mm3, %%mm0\n" "pxor %%mm1, %%mm2\n" "psubw %%mm3, %%mm0\n" "psubw %%mm1, %%mm2\n" "paddw %%mm0, %%mm2\n" "paddw %%mm2, %%mm6\n" "add %2,%0\n" "subl $2, %%ecx\n" " jnz 1b\n" "movq %%mm6, %%mm0\n" "punpcklwd %%mm7,%%mm0\n" "punpckhwd %%mm7,%%mm6\n" "paddd %%mm0, %%mm6\n" "movq %%mm6,%%mm0\n" "psrlq $32, %%mm6\n" "paddd %%mm6,%%mm0\n" "movd %%mm0,%1\n" : "+r" (pix1), "=r"(tmp) : "r" ((x86_reg)line_size) , "g" (h-2) : "%ecx"); return tmp + hf_noise8_mmx(pix+8, line_size, h); } static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { MpegEncContext *c = p; int score1, score2; if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h); else score1 = sse16_mmx(c, pix1, pix2, line_size, h); score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h); if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight; else return score1 + FFABS(score2)*8; } static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { MpegEncContext *c = p; int score1= sse8_mmx(c, pix1, pix2, line_size, h); int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h); if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight; else return score1 + FFABS(score2)*8; } static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { int tmp; assert( (((int)pix) & 7) == 0); assert((line_size &7) ==0); #define SUM(in0, in1, out0, out1) \ "movq (%0), %%mm2\n"\ "movq 8(%0), %%mm3\n"\ "add %2,%0\n"\ "movq %%mm2, " #out0 "\n"\ "movq %%mm3, " #out1 "\n"\ "psubusb " #in0 ", %%mm2\n"\ "psubusb " #in1 ", %%mm3\n"\ "psubusb " #out0 ", " #in0 "\n"\ "psubusb " #out1 ", " #in1 "\n"\ "por %%mm2, " #in0 "\n"\ "por %%mm3, " #in1 "\n"\ "movq " #in0 ", %%mm2\n"\ "movq " #in1 ", %%mm3\n"\ "punpcklbw %%mm7, " #in0 "\n"\ "punpcklbw %%mm7, " #in1 "\n"\ "punpckhbw %%mm7, %%mm2\n"\ "punpckhbw %%mm7, %%mm3\n"\ "paddw " #in1 ", " #in0 "\n"\ "paddw %%mm3, %%mm2\n"\ "paddw %%mm2, " #in0 "\n"\ "paddw " #in0 ", %%mm6\n" __asm__ volatile ( "movl %3,%%ecx\n" "pxor %%mm6,%%mm6\n" "pxor %%mm7,%%mm7\n" "movq (%0),%%mm0\n" "movq 8(%0),%%mm1\n" "add %2,%0\n" "jmp 2f\n" "1:\n" SUM(%%mm4, %%mm5, %%mm0, %%mm1) "2:\n" SUM(%%mm0, %%mm1, %%mm4, %%mm5) "subl $2, %%ecx\n" "jnz 1b\n" "movq %%mm6,%%mm0\n" "psrlq $32, %%mm6\n" "paddw %%mm6,%%mm0\n" "movq %%mm0,%%mm6\n" "psrlq $16, %%mm0\n" "paddw %%mm6,%%mm0\n" "movd %%mm0,%1\n" : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) , "m" (h) : "%ecx"); return tmp & 0xFFFF; } #undef SUM static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { int tmp; assert( (((int)pix) & 7) == 0); assert((line_size &7) ==0); #define SUM(in0, in1, out0, out1) \ "movq (%0), " #out0 "\n"\ "movq 8(%0), " #out1 "\n"\ "add %2,%0\n"\ "psadbw " #out0 ", " #in0 "\n"\ "psadbw " #out1 ", " #in1 "\n"\ "paddw " #in1 ", " #in0 "\n"\ "paddw " #in0 ", %%mm6\n" __asm__ volatile ( "movl %3,%%ecx\n" "pxor %%mm6,%%mm6\n" "pxor %%mm7,%%mm7\n" "movq (%0),%%mm0\n" "movq 8(%0),%%mm1\n" "add %2,%0\n" "jmp 2f\n" "1:\n" SUM(%%mm4, %%mm5, %%mm0, %%mm1) "2:\n" SUM(%%mm0, %%mm1, %%mm4, %%mm5) "subl $2, %%ecx\n" "jnz 1b\n" "movd %%mm6,%1\n" : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) , "m" (h) : "%ecx"); return tmp; } #undef SUM static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int tmp; assert( (((int)pix1) & 7) == 0); assert( (((int)pix2) & 7) == 0); assert((line_size &7) ==0); #define SUM(in0, in1, out0, out1) \ "movq (%0),%%mm2\n"\ "movq (%1)," #out0 "\n"\ "movq 8(%0),%%mm3\n"\ "movq 8(%1)," #out1 "\n"\ "add %3,%0\n"\ "add %3,%1\n"\ "psubb " #out0 ", %%mm2\n"\ "psubb " #out1 ", %%mm3\n"\ "pxor %%mm7, %%mm2\n"\ "pxor %%mm7, %%mm3\n"\ "movq %%mm2, " #out0 "\n"\ "movq %%mm3, " #out1 "\n"\ "psubusb " #in0 ", %%mm2\n"\ "psubusb " #in1 ", %%mm3\n"\ "psubusb " #out0 ", " #in0 "\n"\ "psubusb " #out1 ", " #in1 "\n"\ "por %%mm2, " #in0 "\n"\ "por %%mm3, " #in1 "\n"\ "movq " #in0 ", %%mm2\n"\ "movq " #in1 ", %%mm3\n"\ "punpcklbw %%mm7, " #in0 "\n"\ "punpcklbw %%mm7, " #in1 "\n"\ "punpckhbw %%mm7, %%mm2\n"\ "punpckhbw %%mm7, %%mm3\n"\ "paddw " #in1 ", " #in0 "\n"\ "paddw %%mm3, %%mm2\n"\ "paddw %%mm2, " #in0 "\n"\ "paddw " #in0 ", %%mm6\n" __asm__ volatile ( "movl %4,%%ecx\n" "pxor %%mm6,%%mm6\n" "pcmpeqw %%mm7,%%mm7\n" "psllw $15, %%mm7\n" "packsswb %%mm7, %%mm7\n" "movq (%0),%%mm0\n" "movq (%1),%%mm2\n" "movq 8(%0),%%mm1\n" "movq 8(%1),%%mm3\n" "add %3,%0\n" "add %3,%1\n" "psubb %%mm2, %%mm0\n" "psubb %%mm3, %%mm1\n" "pxor %%mm7, %%mm0\n" "pxor %%mm7, %%mm1\n" "jmp 2f\n" "1:\n" SUM(%%mm4, %%mm5, %%mm0, %%mm1) "2:\n" SUM(%%mm0, %%mm1, %%mm4, %%mm5) "subl $2, %%ecx\n" "jnz 1b\n" "movq %%mm6,%%mm0\n" "psrlq $32, %%mm6\n" "paddw %%mm6,%%mm0\n" "movq %%mm0,%%mm6\n" "psrlq $16, %%mm0\n" "paddw %%mm6,%%mm0\n" "movd %%mm0,%2\n" : "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" ((x86_reg)line_size) , "m" (h) : "%ecx"); return tmp & 0x7FFF; } #undef SUM static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int tmp; assert( (((int)pix1) & 7) == 0); assert( (((int)pix2) & 7) == 0); assert((line_size &7) ==0); #define SUM(in0, in1, out0, out1) \ "movq (%0)," #out0 "\n"\ "movq (%1),%%mm2\n"\ "movq 8(%0)," #out1 "\n"\ "movq 8(%1),%%mm3\n"\ "add %3,%0\n"\ "add %3,%1\n"\ "psubb %%mm2, " #out0 "\n"\ "psubb %%mm3, " #out1 "\n"\ "pxor %%mm7, " #out0 "\n"\ "pxor %%mm7, " #out1 "\n"\ "psadbw " #out0 ", " #in0 "\n"\ "psadbw " #out1 ", " #in1 "\n"\ "paddw " #in1 ", " #in0 "\n"\ "paddw " #in0 ", %%mm6\n" __asm__ volatile ( "movl %4,%%ecx\n" "pxor %%mm6,%%mm6\n" "pcmpeqw %%mm7,%%mm7\n" "psllw $15, %%mm7\n" "packsswb %%mm7, %%mm7\n" "movq (%0),%%mm0\n" "movq (%1),%%mm2\n" "movq 8(%0),%%mm1\n" "movq 8(%1),%%mm3\n" "add %3,%0\n" "add %3,%1\n" "psubb %%mm2, %%mm0\n" "psubb %%mm3, %%mm1\n" "pxor %%mm7, %%mm0\n" "pxor %%mm7, %%mm1\n" "jmp 2f\n" "1:\n" SUM(%%mm4, %%mm5, %%mm0, %%mm1) "2:\n" SUM(%%mm0, %%mm1, %%mm4, %%mm5) "subl $2, %%ecx\n" "jnz 1b\n" "movd %%mm6,%2\n" : "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" ((x86_reg)line_size) , "m" (h) : "%ecx"); return tmp; } #undef SUM static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ x86_reg i=0; __asm__ volatile( "1: \n\t" "movq (%2, %0), %%mm0 \n\t" "movq (%1, %0), %%mm1 \n\t" "psubb %%mm0, %%mm1 \n\t" "movq %%mm1, (%3, %0) \n\t" "movq 8(%2, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" "psubb %%mm0, %%mm1 \n\t" "movq %%mm1, 8(%3, %0) \n\t" "add $16, %0 \n\t" "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (i) : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15) ); for(; i<w; i++) dst[i+0] = src1[i+0]-src2[i+0]; } static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){ x86_reg i=0; uint8_t l, lt; __asm__ volatile( "1: \n\t" "movq -1(%1, %0), %%mm0 \n\t" // LT "movq (%1, %0), %%mm1 \n\t" // T "movq -1(%2, %0), %%mm2 \n\t" // L "movq (%2, %0), %%mm3 \n\t" // X "movq %%mm2, %%mm4 \n\t" // L "psubb %%mm0, %%mm2 \n\t" "paddb %%mm1, %%mm2 \n\t" // L + T - LT "movq %%mm4, %%mm5 \n\t" // L "pmaxub %%mm1, %%mm4 \n\t" // max(T, L) "pminub %%mm5, %%mm1 \n\t" // min(T, L) "pminub %%mm2, %%mm4 \n\t" "pmaxub %%mm1, %%mm4 \n\t" "psubb %%mm4, %%mm3 \n\t" // dst - pred "movq %%mm3, (%3, %0) \n\t" "add $8, %0 \n\t" "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (i) : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w) ); l= *left; lt= *left_top; dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF); *left_top= src1[w-1]; *left = src2[w-1]; } #define DIFF_PIXELS_1(m,a,t,p1,p2)\ "mov"#m" "#p1", "#a" \n\t"\ "mov"#m" "#p2", "#t" \n\t"\ "punpcklbw "#a", "#t" \n\t"\ "punpcklbw "#a", "#a" \n\t"\ "psubw "#t", "#a" \n\t"\ #define DIFF_PIXELS_8(m0,m1,mm,p1,p2,stride,temp) {\ uint8_t *p1b=p1, *p2b=p2;\ __asm__ volatile(\ DIFF_PIXELS_1(m0, mm##0, mm##7, (%1), (%2))\ DIFF_PIXELS_1(m0, mm##1, mm##7, (%1,%3), (%2,%3))\ DIFF_PIXELS_1(m0, mm##2, mm##7, (%1,%3,2), (%2,%3,2))\ "add %4, %1 \n\t"\ "add %4, %2 \n\t"\ DIFF_PIXELS_1(m0, mm##3, mm##7, (%1), (%2))\ DIFF_PIXELS_1(m0, mm##4, mm##7, (%1,%3), (%2,%3))\ DIFF_PIXELS_1(m0, mm##5, mm##7, (%1,%3,2), (%2,%3,2))\ DIFF_PIXELS_1(m0, mm##6, mm##7, (%1,%4), (%2,%4))\ "mov"#m1" "#mm"0, %0 \n\t"\ DIFF_PIXELS_1(m0, mm##7, mm##0, (%1,%3,4), (%2,%3,4))\ "mov"#m1" %0, "#mm"0 \n\t"\ : "+m"(temp), "+r"(p1b), "+r"(p2b)\ : "r"((x86_reg)stride), "r"((x86_reg)stride*3)\ );\ } //the "+m"(temp) is needed as gcc 2.95 sometimes fails to compile "=m"(temp) #define DIFF_PIXELS_4x8(p1,p2,stride,temp) DIFF_PIXELS_8(d, q, %%mm, p1, p2, stride, temp) #define DIFF_PIXELS_8x8(p1,p2,stride,temp) DIFF_PIXELS_8(q, dqa, %%xmm, p1, p2, stride, temp) #define LBUTTERFLY2(a1,b1,a2,b2)\ "paddw " #b1 ", " #a1 " \n\t"\ "paddw " #b2 ", " #a2 " \n\t"\ "paddw " #b1 ", " #b1 " \n\t"\ "paddw " #b2 ", " #b2 " \n\t"\ "psubw " #a1 ", " #b1 " \n\t"\ "psubw " #a2 ", " #b2 " \n\t" #define HADAMARD8(m0, m1, m2, m3, m4, m5, m6, m7)\ LBUTTERFLY2(m0, m1, m2, m3)\ LBUTTERFLY2(m4, m5, m6, m7)\ LBUTTERFLY2(m0, m2, m1, m3)\ LBUTTERFLY2(m4, m6, m5, m7)\ LBUTTERFLY2(m0, m4, m1, m5)\ LBUTTERFLY2(m2, m6, m3, m7)\ #define HADAMARD48 HADAMARD8(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm6, %%mm7) #define MMABS_MMX(a,z)\ "pxor " #z ", " #z " \n\t"\ "pcmpgtw " #a ", " #z " \n\t"\ "pxor " #z ", " #a " \n\t"\ "psubw " #z ", " #a " \n\t" #define MMABS_MMX2(a,z)\ "pxor " #z ", " #z " \n\t"\ "psubw " #a ", " #z " \n\t"\ "pmaxsw " #z ", " #a " \n\t" #define MMABS_SSSE3(a,z)\ "pabsw " #a ", " #a " \n\t" #define MMABS_SUM(a,z, sum)\ MMABS(a,z)\ "paddusw " #a ", " #sum " \n\t" #define MMABS_SUM_8x8_NOSPILL\ MMABS(%%xmm0, %%xmm8)\ MMABS(%%xmm1, %%xmm9)\ MMABS_SUM(%%xmm2, %%xmm8, %%xmm0)\ MMABS_SUM(%%xmm3, %%xmm9, %%xmm1)\ MMABS_SUM(%%xmm4, %%xmm8, %%xmm0)\ MMABS_SUM(%%xmm5, %%xmm9, %%xmm1)\ MMABS_SUM(%%xmm6, %%xmm8, %%xmm0)\ MMABS_SUM(%%xmm7, %%xmm9, %%xmm1)\ "paddusw %%xmm1, %%xmm0 \n\t" #if ARCH_X86_64 #define MMABS_SUM_8x8_SSE2 MMABS_SUM_8x8_NOSPILL #else #define MMABS_SUM_8x8_SSE2\ "movdqa %%xmm7, (%1) \n\t"\ MMABS(%%xmm0, %%xmm7)\ MMABS(%%xmm1, %%xmm7)\ MMABS_SUM(%%xmm2, %%xmm7, %%xmm0)\ MMABS_SUM(%%xmm3, %%xmm7, %%xmm1)\ MMABS_SUM(%%xmm4, %%xmm7, %%xmm0)\ MMABS_SUM(%%xmm5, %%xmm7, %%xmm1)\ MMABS_SUM(%%xmm6, %%xmm7, %%xmm0)\ "movdqa (%1), %%xmm2 \n\t"\ MMABS_SUM(%%xmm2, %%xmm7, %%xmm1)\ "paddusw %%xmm1, %%xmm0 \n\t" #endif /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to * about 100k on extreme inputs. But that's very unlikely to occur in natural video, * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */ #define HSUM_MMX(a, t, dst)\ "movq "#a", "#t" \n\t"\ "psrlq $32, "#a" \n\t"\ "paddusw "#t", "#a" \n\t"\ "movq "#a", "#t" \n\t"\ "psrlq $16, "#a" \n\t"\ "paddusw "#t", "#a" \n\t"\ "movd "#a", "#dst" \n\t"\ #define HSUM_MMX2(a, t, dst)\ "pshufw $0x0E, "#a", "#t" \n\t"\ "paddusw "#t", "#a" \n\t"\ "pshufw $0x01, "#a", "#t" \n\t"\ "paddusw "#t", "#a" \n\t"\ "movd "#a", "#dst" \n\t"\ #define HSUM_SSE2(a, t, dst)\ "movhlps "#a", "#t" \n\t"\ "paddusw "#t", "#a" \n\t"\ "pshuflw $0x0E, "#a", "#t" \n\t"\ "paddusw "#t", "#a" \n\t"\ "pshuflw $0x01, "#a", "#t" \n\t"\ "paddusw "#t", "#a" \n\t"\ "movd "#a", "#dst" \n\t"\ #define HADAMARD8_DIFF_MMX(cpu) \ static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\ DECLARE_ALIGNED(8, uint64_t, temp)[13];\ int sum;\ \ assert(h==8);\ \ DIFF_PIXELS_4x8(src1, src2, stride, temp[0]);\ \ __asm__ volatile(\ HADAMARD48\ \ "movq %%mm7, 96(%1) \n\t"\ \ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\ STORE4(8, 0(%1), %%mm0, %%mm3, %%mm7, %%mm2)\ \ "movq 96(%1), %%mm7 \n\t"\ TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\ STORE4(8, 64(%1), %%mm4, %%mm7, %%mm0, %%mm6)\ \ : "=r" (sum)\ : "r"(temp)\ );\ \ DIFF_PIXELS_4x8(src1+4, src2+4, stride, temp[4]);\ \ __asm__ volatile(\ HADAMARD48\ \ "movq %%mm7, 96(%1) \n\t"\ \ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\ STORE4(8, 32(%1), %%mm0, %%mm3, %%mm7, %%mm2)\ \ "movq 96(%1), %%mm7 \n\t"\ TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\ "movq %%mm7, %%mm5 \n\t"/*FIXME remove*/\ "movq %%mm6, %%mm7 \n\t"\ "movq %%mm0, %%mm6 \n\t"\ \ LOAD4(8, 64(%1), %%mm0, %%mm1, %%mm2, %%mm3)\ \ HADAMARD48\ "movq %%mm7, 64(%1) \n\t"\ MMABS(%%mm0, %%mm7)\ MMABS(%%mm1, %%mm7)\ MMABS_SUM(%%mm2, %%mm7, %%mm0)\ MMABS_SUM(%%mm3, %%mm7, %%mm1)\ MMABS_SUM(%%mm4, %%mm7, %%mm0)\ MMABS_SUM(%%mm5, %%mm7, %%mm1)\ MMABS_SUM(%%mm6, %%mm7, %%mm0)\ "movq 64(%1), %%mm2 \n\t"\ MMABS_SUM(%%mm2, %%mm7, %%mm1)\ "paddusw %%mm1, %%mm0 \n\t"\ "movq %%mm0, 64(%1) \n\t"\ \ LOAD4(8, 0(%1), %%mm0, %%mm1, %%mm2, %%mm3)\ LOAD4(8, 32(%1), %%mm4, %%mm5, %%mm6, %%mm7)\ \ HADAMARD48\ "movq %%mm7, (%1) \n\t"\ MMABS(%%mm0, %%mm7)\ MMABS(%%mm1, %%mm7)\ MMABS_SUM(%%mm2, %%mm7, %%mm0)\ MMABS_SUM(%%mm3, %%mm7, %%mm1)\ MMABS_SUM(%%mm4, %%mm7, %%mm0)\ MMABS_SUM(%%mm5, %%mm7, %%mm1)\ MMABS_SUM(%%mm6, %%mm7, %%mm0)\ "movq (%1), %%mm2 \n\t"\ MMABS_SUM(%%mm2, %%mm7, %%mm1)\ "paddusw 64(%1), %%mm0 \n\t"\ "paddusw %%mm1, %%mm0 \n\t"\ \ HSUM(%%mm0, %%mm1, %0)\ \ : "=r" (sum)\ : "r"(temp)\ );\ return sum&0xFFFF;\ }\ WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu) #define HADAMARD8_DIFF_SSE2(cpu) \ static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\ DECLARE_ALIGNED(16, uint64_t, temp)[4];\ int sum;\ \ assert(h==8);\ \ DIFF_PIXELS_8x8(src1, src2, stride, temp[0]);\ \ __asm__ volatile(\ HADAMARD8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)\ TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%1))\ HADAMARD8(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)\ MMABS_SUM_8x8\ HSUM_SSE2(%%xmm0, %%xmm1, %0)\ : "=r" (sum)\ : "r"(temp)\ );\ return sum&0xFFFF;\ }\ WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu) #define MMABS(a,z) MMABS_MMX(a,z) #define HSUM(a,t,dst) HSUM_MMX(a,t,dst) HADAMARD8_DIFF_MMX(mmx) #undef MMABS #undef HSUM #define MMABS(a,z) MMABS_MMX2(a,z) #define MMABS_SUM_8x8 MMABS_SUM_8x8_SSE2 #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst) HADAMARD8_DIFF_MMX(mmx2) HADAMARD8_DIFF_SSE2(sse2) #undef MMABS #undef MMABS_SUM_8x8 #undef HSUM #if HAVE_SSSE3 #define MMABS(a,z) MMABS_SSSE3(a,z) #define MMABS_SUM_8x8 MMABS_SUM_8x8_NOSPILL HADAMARD8_DIFF_SSE2(ssse3) #undef MMABS #undef MMABS_SUM_8x8 #endif #define DCT_SAD4(m,mm,o)\ "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\ "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\ "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\ "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\ MMABS_SUM(mm##2, mm##6, mm##0)\ MMABS_SUM(mm##3, mm##7, mm##1)\ MMABS_SUM(mm##4, mm##6, mm##0)\ MMABS_SUM(mm##5, mm##7, mm##1)\ #define DCT_SAD_MMX\ "pxor %%mm0, %%mm0 \n\t"\ "pxor %%mm1, %%mm1 \n\t"\ DCT_SAD4(q, %%mm, 0)\ DCT_SAD4(q, %%mm, 8)\ DCT_SAD4(q, %%mm, 64)\ DCT_SAD4(q, %%mm, 72)\ "paddusw %%mm1, %%mm0 \n\t"\ HSUM(%%mm0, %%mm1, %0) #define DCT_SAD_SSE2\ "pxor %%xmm0, %%xmm0 \n\t"\ "pxor %%xmm1, %%xmm1 \n\t"\ DCT_SAD4(dqa, %%xmm, 0)\ DCT_SAD4(dqa, %%xmm, 64)\ "paddusw %%xmm1, %%xmm0 \n\t"\ HSUM(%%xmm0, %%xmm1, %0) #define DCT_SAD_FUNC(cpu) \ static int sum_abs_dctelem_##cpu(DCTELEM *block){\ int sum;\ __asm__ volatile(\ DCT_SAD\ :"=r"(sum)\ :"r"(block)\ );\ return sum&0xFFFF;\ } #define DCT_SAD DCT_SAD_MMX #define HSUM(a,t,dst) HSUM_MMX(a,t,dst) #define MMABS(a,z) MMABS_MMX(a,z) DCT_SAD_FUNC(mmx) #undef MMABS #undef HSUM #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst) #define MMABS(a,z) MMABS_MMX2(a,z) DCT_SAD_FUNC(mmx2) #undef HSUM #undef DCT_SAD #define DCT_SAD DCT_SAD_SSE2 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst) DCT_SAD_FUNC(sse2) #undef MMABS #if HAVE_SSSE3 #define MMABS(a,z) MMABS_SSSE3(a,z) DCT_SAD_FUNC(ssse3) #undef MMABS #endif #undef HSUM #undef DCT_SAD static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){ int sum; x86_reg i=size; __asm__ volatile( "pxor %%mm4, %%mm4 \n" "1: \n" "sub $8, %0 \n" "movq (%2,%0), %%mm2 \n" "movq (%3,%0,2), %%mm0 \n" "movq 8(%3,%0,2), %%mm1 \n" "punpckhbw %%mm2, %%mm3 \n" "punpcklbw %%mm2, %%mm2 \n" "psraw $8, %%mm3 \n" "psraw $8, %%mm2 \n" "psubw %%mm3, %%mm1 \n" "psubw %%mm2, %%mm0 \n" "pmaddwd %%mm1, %%mm1 \n" "pmaddwd %%mm0, %%mm0 \n" "paddd %%mm1, %%mm4 \n" "paddd %%mm0, %%mm4 \n" "jg 1b \n" "movq %%mm4, %%mm3 \n" "psrlq $32, %%mm3 \n" "paddd %%mm3, %%mm4 \n" "movd %%mm4, %1 \n" :"+r"(i), "=r"(sum) :"r"(pix1), "r"(pix2) ); return sum; } #define PHADDD(a, t)\ "movq "#a", "#t" \n\t"\ "psrlq $32, "#a" \n\t"\ "paddd "#t", "#a" \n\t" /* pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31] pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31] pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30] */ #define PMULHRW(x, y, s, o)\ "pmulhw " #s ", "#x " \n\t"\ "pmulhw " #s ", "#y " \n\t"\ "paddw " #o ", "#x " \n\t"\ "paddw " #o ", "#y " \n\t"\ "psraw $1, "#x " \n\t"\ "psraw $1, "#y " \n\t" #define DEF(x) x ## _mmx #define SET_RND MOVQ_WONE #define SCALE_OFFSET 1 #include "dsputil_mmx_qns_template.c" #undef DEF #undef SET_RND #undef SCALE_OFFSET #undef PMULHRW #define DEF(x) x ## _3dnow #define SET_RND(x) #define SCALE_OFFSET 0 #define PMULHRW(x, y, s, o)\ "pmulhrw " #s ", "#x " \n\t"\ "pmulhrw " #s ", "#y " \n\t" #include "dsputil_mmx_qns_template.c" #undef DEF #undef SET_RND #undef SCALE_OFFSET #undef PMULHRW #if HAVE_SSSE3 #undef PHADDD #define DEF(x) x ## _ssse3 #define SET_RND(x) #define SCALE_OFFSET -1 #define PHADDD(a, t)\ "pshufw $0x0E, "#a", "#t" \n\t"\ "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */ #define PMULHRW(x, y, s, o)\ "pmulhrsw " #s ", "#x " \n\t"\ "pmulhrsw " #s ", "#y " \n\t" #include "dsputil_mmx_qns_template.c" #undef DEF #undef SET_RND #undef SCALE_OFFSET #undef PMULHRW #undef PHADDD #endif //HAVE_SSSE3 void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx) { if (mm_flags & FF_MM_MMX) { const int dct_algo = avctx->dct_algo; if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ if(mm_flags & FF_MM_SSE2){ c->fdct = ff_fdct_sse2; }else if(mm_flags & FF_MM_MMX2){ c->fdct = ff_fdct_mmx2; }else{ c->fdct = ff_fdct_mmx; } } c->get_pixels = get_pixels_mmx; c->diff_pixels = diff_pixels_mmx; c->pix_sum = pix_sum16_mmx; c->diff_bytes= diff_bytes_mmx; c->sum_abs_dctelem= sum_abs_dctelem_mmx; c->hadamard8_diff[0]= hadamard8_diff16_mmx; c->hadamard8_diff[1]= hadamard8_diff_mmx; c->pix_norm1 = pix_norm1_mmx; c->sse[0] = (mm_flags & FF_MM_SSE2) ? sse16_sse2 : sse16_mmx; c->sse[1] = sse8_mmx; c->vsad[4]= vsad_intra16_mmx; c->nsse[0] = nsse16_mmx; c->nsse[1] = nsse8_mmx; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->vsad[0] = vsad16_mmx; } if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->try_8x8basis= try_8x8basis_mmx; } c->add_8x8basis= add_8x8basis_mmx; c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx; if (mm_flags & FF_MM_MMX2) { c->sum_abs_dctelem= sum_abs_dctelem_mmx2; c->hadamard8_diff[0]= hadamard8_diff16_mmx2; c->hadamard8_diff[1]= hadamard8_diff_mmx2; c->vsad[4]= vsad_intra16_mmx2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->vsad[0] = vsad16_mmx2; } c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; } if(mm_flags & FF_MM_SSE2){ c->get_pixels = get_pixels_sse2; c->sum_abs_dctelem= sum_abs_dctelem_sse2; c->hadamard8_diff[0]= hadamard8_diff16_sse2; c->hadamard8_diff[1]= hadamard8_diff_sse2; #if CONFIG_LPC c->lpc_compute_autocorr = ff_lpc_compute_autocorr_sse2; #endif } #if HAVE_SSSE3 if(mm_flags & FF_MM_SSSE3){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->try_8x8basis= try_8x8basis_ssse3; } c->add_8x8basis= add_8x8basis_ssse3; c->sum_abs_dctelem= sum_abs_dctelem_ssse3; c->hadamard8_diff[0]= hadamard8_diff16_ssse3; c->hadamard8_diff[1]= hadamard8_diff_ssse3; } #endif if(mm_flags & FF_MM_3DNOW){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->try_8x8basis= try_8x8basis_3dnow; } c->add_8x8basis= add_8x8basis_3dnow; } } dsputil_init_pix_mmx(c, avctx); }
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputilenc_mmx.c
C
asf20
43,965
/* * MMX optimized DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * MMX optimization by Nick Kurshev <nickols_k@mail.ru> */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "libavcodec/h264dsp.h" #include "libavcodec/mpegvideo.h" #include "libavcodec/simple_idct.h" #include "dsputil_mmx.h" #include "vp3dsp_mmx.h" #include "vp3dsp_sse2.h" #include "vp6dsp_mmx.h" #include "vp6dsp_sse2.h" #include "idct_xvid.h" //#undef NDEBUG //#include <assert.h> int mm_flags; /* multimedia extension flags */ /* pixel operations */ DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL; DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL; DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] = {0x8000000080000000ULL, 0x8000000080000000ULL}; DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL}; DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 }; DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) #define MOVQ_BFE(regd) \ __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ "paddb %%" #regd ", %%" #regd " \n\t" ::) #ifndef PIC #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) #else // for shared library it's better to use this way for accessing constants // pcmpeqd -> -1 #define MOVQ_BONE(regd) \ __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ "psrlw $15, %%" #regd " \n\t" \ "packuswb %%" #regd ", %%" #regd " \n\t" ::) #define MOVQ_WTWO(regd) \ __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ "psrlw $15, %%" #regd " \n\t" \ "psllw $1, %%" #regd " \n\t"::) #endif // using regr as temporary and for the output result // first argument is unmodifed and second is trashed // regfe is supposed to contain 0xfefefefefefefefe #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ "movq " #rega ", " #regr " \n\t"\ "pand " #regb ", " #regr " \n\t"\ "pxor " #rega ", " #regb " \n\t"\ "pand " #regfe "," #regb " \n\t"\ "psrlq $1, " #regb " \n\t"\ "paddb " #regb ", " #regr " \n\t" #define PAVGB_MMX(rega, regb, regr, regfe) \ "movq " #rega ", " #regr " \n\t"\ "por " #regb ", " #regr " \n\t"\ "pxor " #rega ", " #regb " \n\t"\ "pand " #regfe "," #regb " \n\t"\ "psrlq $1, " #regb " \n\t"\ "psubb " #regb ", " #regr " \n\t" // mm6 is supposed to contain 0xfefefefefefefefe #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ "movq " #rega ", " #regr " \n\t"\ "movq " #regc ", " #regp " \n\t"\ "pand " #regb ", " #regr " \n\t"\ "pand " #regd ", " #regp " \n\t"\ "pxor " #rega ", " #regb " \n\t"\ "pxor " #regc ", " #regd " \n\t"\ "pand %%mm6, " #regb " \n\t"\ "pand %%mm6, " #regd " \n\t"\ "psrlq $1, " #regb " \n\t"\ "psrlq $1, " #regd " \n\t"\ "paddb " #regb ", " #regr " \n\t"\ "paddb " #regd ", " #regp " \n\t" #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ "movq " #rega ", " #regr " \n\t"\ "movq " #regc ", " #regp " \n\t"\ "por " #regb ", " #regr " \n\t"\ "por " #regd ", " #regp " \n\t"\ "pxor " #rega ", " #regb " \n\t"\ "pxor " #regc ", " #regd " \n\t"\ "pand %%mm6, " #regb " \n\t"\ "pand %%mm6, " #regd " \n\t"\ "psrlq $1, " #regd " \n\t"\ "psrlq $1, " #regb " \n\t"\ "psubb " #regb ", " #regr " \n\t"\ "psubb " #regd ", " #regp " \n\t" /***********************************/ /* MMX no rounding */ #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx #define SET_RND MOVQ_WONE #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) #include "dsputil_mmx_rnd_template.c" #undef DEF #undef SET_RND #undef PAVGBP #undef PAVGB /***********************************/ /* MMX rounding */ #define DEF(x, y) x ## _ ## y ##_mmx #define SET_RND MOVQ_WTWO #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) #include "dsputil_mmx_rnd_template.c" #undef DEF #undef SET_RND #undef PAVGBP #undef PAVGB #undef OP_AVG /***********************************/ /* 3Dnow specific */ #define DEF(x) x ## _3dnow #define PAVGB "pavgusb" #define OP_AVG PAVGB #include "dsputil_mmx_avg_template.c" #undef DEF #undef PAVGB #undef OP_AVG /***********************************/ /* MMX2 specific */ #define DEF(x) x ## _mmx2 /* Introduced only in MMX2 set */ #define PAVGB "pavgb" #define OP_AVG PAVGB #include "dsputil_mmx_avg_template.c" #undef DEF #undef PAVGB #undef OP_AVG #define put_no_rnd_pixels16_mmx put_pixels16_mmx #define put_no_rnd_pixels8_mmx put_pixels8_mmx #define put_pixels16_mmx2 put_pixels16_mmx #define put_pixels8_mmx2 put_pixels8_mmx #define put_pixels4_mmx2 put_pixels4_mmx #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx #define put_pixels16_3dnow put_pixels16_mmx #define put_pixels8_3dnow put_pixels8_mmx #define put_pixels4_3dnow put_pixels4_mmx #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx /***********************************/ /* standard MMX */ void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) { const DCTELEM *p; uint8_t *pix; /* read the pixels */ p = block; pix = pixels; /* unrolled loop */ __asm__ volatile( "movq %3, %%mm0 \n\t" "movq 8%3, %%mm1 \n\t" "movq 16%3, %%mm2 \n\t" "movq 24%3, %%mm3 \n\t" "movq 32%3, %%mm4 \n\t" "movq 40%3, %%mm5 \n\t" "movq 48%3, %%mm6 \n\t" "movq 56%3, %%mm7 \n\t" "packuswb %%mm1, %%mm0 \n\t" "packuswb %%mm3, %%mm2 \n\t" "packuswb %%mm5, %%mm4 \n\t" "packuswb %%mm7, %%mm6 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm2, (%0, %1) \n\t" "movq %%mm4, (%0, %1, 2) \n\t" "movq %%mm6, (%0, %2) \n\t" ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) :"memory"); pix += line_size*4; p += 32; // if here would be an exact copy of the code above // compiler would generate some very strange code // thus using "r" __asm__ volatile( "movq (%3), %%mm0 \n\t" "movq 8(%3), %%mm1 \n\t" "movq 16(%3), %%mm2 \n\t" "movq 24(%3), %%mm3 \n\t" "movq 32(%3), %%mm4 \n\t" "movq 40(%3), %%mm5 \n\t" "movq 48(%3), %%mm6 \n\t" "movq 56(%3), %%mm7 \n\t" "packuswb %%mm1, %%mm0 \n\t" "packuswb %%mm3, %%mm2 \n\t" "packuswb %%mm5, %%mm4 \n\t" "packuswb %%mm7, %%mm6 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm2, (%0, %1) \n\t" "movq %%mm4, (%0, %1, 2) \n\t" "movq %%mm6, (%0, %2) \n\t" ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) :"memory"); } DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; #define put_signed_pixels_clamped_mmx_half(off) \ "movq "#off"(%2), %%mm1 \n\t"\ "movq 16+"#off"(%2), %%mm2 \n\t"\ "movq 32+"#off"(%2), %%mm3 \n\t"\ "movq 48+"#off"(%2), %%mm4 \n\t"\ "packsswb 8+"#off"(%2), %%mm1 \n\t"\ "packsswb 24+"#off"(%2), %%mm2 \n\t"\ "packsswb 40+"#off"(%2), %%mm3 \n\t"\ "packsswb 56+"#off"(%2), %%mm4 \n\t"\ "paddb %%mm0, %%mm1 \n\t"\ "paddb %%mm0, %%mm2 \n\t"\ "paddb %%mm0, %%mm3 \n\t"\ "paddb %%mm0, %%mm4 \n\t"\ "movq %%mm1, (%0) \n\t"\ "movq %%mm2, (%0, %3) \n\t"\ "movq %%mm3, (%0, %3, 2) \n\t"\ "movq %%mm4, (%0, %1) \n\t" void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) { x86_reg line_skip = line_size; x86_reg line_skip3; __asm__ volatile ( "movq "MANGLE(ff_vector128)", %%mm0 \n\t" "lea (%3, %3, 2), %1 \n\t" put_signed_pixels_clamped_mmx_half(0) "lea (%0, %3, 4), %0 \n\t" put_signed_pixels_clamped_mmx_half(64) :"+&r" (pixels), "=&r" (line_skip3) :"r" (block), "r"(line_skip) :"memory"); } void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) { const DCTELEM *p; uint8_t *pix; int i; /* read the pixels */ p = block; pix = pixels; MOVQ_ZERO(mm7); i = 4; do { __asm__ volatile( "movq (%2), %%mm0 \n\t" "movq 8(%2), %%mm1 \n\t" "movq 16(%2), %%mm2 \n\t" "movq 24(%2), %%mm3 \n\t" "movq %0, %%mm4 \n\t" "movq %1, %%mm6 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddsw %%mm4, %%mm0 \n\t" "paddsw %%mm5, %%mm1 \n\t" "movq %%mm6, %%mm5 \n\t" "punpcklbw %%mm7, %%mm6 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddsw %%mm6, %%mm2 \n\t" "paddsw %%mm5, %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "packuswb %%mm3, %%mm2 \n\t" "movq %%mm0, %0 \n\t" "movq %%mm2, %1 \n\t" :"+m"(*pix), "+m"(*(pix+line_size)) :"r"(p) :"memory"); pix += line_size*2; p += 16; } while (--i); } static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" "movd (%1), %%mm0 \n\t" "movd (%1, %3), %%mm1 \n\t" "movd %%mm0, (%2) \n\t" "movd %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movd (%1), %%mm0 \n\t" "movd (%1, %3), %%mm1 \n\t" "movd %%mm0, (%2) \n\t" "movd %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) : "r"((x86_reg)line_size) : "%"REG_a, "memory" ); } static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) : "r"((x86_reg)line_size) : "%"REG_a, "memory" ); } static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm4 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq 8(%1, %3), %%mm5 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm4, 8(%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq %%mm5, 8(%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm4 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq 8(%1, %3), %%mm5 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm4, 8(%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq %%mm5, 8(%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) : "r"((x86_reg)line_size) : "%"REG_a, "memory" ); } static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1,%3), %%xmm1 \n\t" "movdqu (%1,%3,2), %%xmm2 \n\t" "movdqu (%1,%4), %%xmm3 \n\t" "movdqa %%xmm0, (%2) \n\t" "movdqa %%xmm1, (%2,%3) \n\t" "movdqa %%xmm2, (%2,%3,2) \n\t" "movdqa %%xmm3, (%2,%4) \n\t" "subl $4, %0 \n\t" "lea (%1,%3,4), %1 \n\t" "lea (%2,%3,4), %2 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) : "memory" ); } static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1,%3), %%xmm1 \n\t" "movdqu (%1,%3,2), %%xmm2 \n\t" "movdqu (%1,%4), %%xmm3 \n\t" "pavgb (%2), %%xmm0 \n\t" "pavgb (%2,%3), %%xmm1 \n\t" "pavgb (%2,%3,2), %%xmm2 \n\t" "pavgb (%2,%4), %%xmm3 \n\t" "movdqa %%xmm0, (%2) \n\t" "movdqa %%xmm1, (%2,%3) \n\t" "movdqa %%xmm2, (%2,%3,2) \n\t" "movdqa %%xmm3, (%2,%4) \n\t" "subl $4, %0 \n\t" "lea (%1,%3,4), %1 \n\t" "lea (%2,%3,4), %2 \n\t" "jnz 1b \n\t" : "+g"(h), "+r" (pixels), "+r" (block) : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) : "memory" ); } #define CLEAR_BLOCKS(name,n) \ static void name(DCTELEM *blocks)\ {\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "mov %1, %%"REG_a" \n\t"\ "1: \n\t"\ "movq %%mm7, (%0, %%"REG_a") \n\t"\ "movq %%mm7, 8(%0, %%"REG_a") \n\t"\ "movq %%mm7, 16(%0, %%"REG_a") \n\t"\ "movq %%mm7, 24(%0, %%"REG_a") \n\t"\ "add $32, %%"REG_a" \n\t"\ " js 1b \n\t"\ : : "r" (((uint8_t *)blocks)+128*n),\ "i" (-128*n)\ : "%"REG_a\ );\ } CLEAR_BLOCKS(clear_blocks_mmx, 6) CLEAR_BLOCKS(clear_block_mmx, 1) static void clear_block_sse(DCTELEM *block) { __asm__ volatile( "xorps %%xmm0, %%xmm0 \n" "movaps %%xmm0, (%0) \n" "movaps %%xmm0, 16(%0) \n" "movaps %%xmm0, 32(%0) \n" "movaps %%xmm0, 48(%0) \n" "movaps %%xmm0, 64(%0) \n" "movaps %%xmm0, 80(%0) \n" "movaps %%xmm0, 96(%0) \n" "movaps %%xmm0, 112(%0) \n" :: "r"(block) : "memory" ); } static void clear_blocks_sse(DCTELEM *blocks) {\ __asm__ volatile( "xorps %%xmm0, %%xmm0 \n" "mov %1, %%"REG_a" \n" "1: \n" "movaps %%xmm0, (%0, %%"REG_a") \n" "movaps %%xmm0, 16(%0, %%"REG_a") \n" "movaps %%xmm0, 32(%0, %%"REG_a") \n" "movaps %%xmm0, 48(%0, %%"REG_a") \n" "movaps %%xmm0, 64(%0, %%"REG_a") \n" "movaps %%xmm0, 80(%0, %%"REG_a") \n" "movaps %%xmm0, 96(%0, %%"REG_a") \n" "movaps %%xmm0, 112(%0, %%"REG_a") \n" "add $128, %%"REG_a" \n" " js 1b \n" : : "r" (((uint8_t *)blocks)+128*6), "i" (-128*6) : "%"REG_a ); } static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ x86_reg i=0; __asm__ volatile( "jmp 2f \n\t" "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq (%2, %0), %%mm1 \n\t" "paddb %%mm0, %%mm1 \n\t" "movq %%mm1, (%2, %0) \n\t" "movq 8(%1, %0), %%mm0 \n\t" "movq 8(%2, %0), %%mm1 \n\t" "paddb %%mm0, %%mm1 \n\t" "movq %%mm1, 8(%2, %0) \n\t" "add $16, %0 \n\t" "2: \n\t" "cmp %3, %0 \n\t" " js 1b \n\t" : "+r" (i) : "r"(src), "r"(dst), "r"((x86_reg)w-15) ); for(; i<w; i++) dst[i+0] += src[i+0]; } static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ x86_reg i=0; __asm__ volatile( "jmp 2f \n\t" "1: \n\t" "movq (%2, %0), %%mm0 \n\t" "movq 8(%2, %0), %%mm1 \n\t" "paddb (%3, %0), %%mm0 \n\t" "paddb 8(%3, %0), %%mm1 \n\t" "movq %%mm0, (%1, %0) \n\t" "movq %%mm1, 8(%1, %0) \n\t" "add $16, %0 \n\t" "2: \n\t" "cmp %4, %0 \n\t" " js 1b \n\t" : "+r" (i) : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15) ); for(; i<w; i++) dst[i] = src1[i] + src2[i]; } #if HAVE_7REGS && HAVE_TEN_OPERANDS static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) { x86_reg w2 = -w; x86_reg x; int l = *left & 0xff; int tl = *left_top & 0xff; int t; __asm__ volatile( "mov %7, %3 \n" "1: \n" "movzx (%3,%4), %2 \n" "mov %2, %k3 \n" "sub %b1, %b3 \n" "add %b0, %b3 \n" "mov %2, %1 \n" "cmp %0, %2 \n" "cmovg %0, %2 \n" "cmovg %1, %0 \n" "cmp %k3, %0 \n" "cmovg %k3, %0 \n" "mov %7, %3 \n" "cmp %2, %0 \n" "cmovl %2, %0 \n" "add (%6,%4), %b0 \n" "mov %b0, (%5,%4) \n" "inc %4 \n" "jl 1b \n" :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2) :"r"(dst+w), "r"(diff+w), "rm"(top+w) ); *left = l; *left_top = tl; } #endif #define H263_LOOP_FILTER \ "pxor %%mm7, %%mm7 \n\t"\ "movq %0, %%mm0 \n\t"\ "movq %0, %%mm1 \n\t"\ "movq %3, %%mm2 \n\t"\ "movq %3, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpckhbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "psubw %%mm2, %%mm0 \n\t"\ "psubw %%mm3, %%mm1 \n\t"\ "movq %1, %%mm2 \n\t"\ "movq %1, %%mm3 \n\t"\ "movq %2, %%mm4 \n\t"\ "movq %2, %%mm5 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ "punpckhbw %%mm7, %%mm5 \n\t"\ "psubw %%mm2, %%mm4 \n\t"\ "psubw %%mm3, %%mm5 \n\t"\ "psllw $2, %%mm4 \n\t"\ "psllw $2, %%mm5 \n\t"\ "paddw %%mm0, %%mm4 \n\t"\ "paddw %%mm1, %%mm5 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "pcmpgtw %%mm4, %%mm6 \n\t"\ "pcmpgtw %%mm5, %%mm7 \n\t"\ "pxor %%mm6, %%mm4 \n\t"\ "pxor %%mm7, %%mm5 \n\t"\ "psubw %%mm6, %%mm4 \n\t"\ "psubw %%mm7, %%mm5 \n\t"\ "psrlw $3, %%mm4 \n\t"\ "psrlw $3, %%mm5 \n\t"\ "packuswb %%mm5, %%mm4 \n\t"\ "packsswb %%mm7, %%mm6 \n\t"\ "pxor %%mm7, %%mm7 \n\t"\ "movd %4, %%mm2 \n\t"\ "punpcklbw %%mm2, %%mm2 \n\t"\ "punpcklbw %%mm2, %%mm2 \n\t"\ "punpcklbw %%mm2, %%mm2 \n\t"\ "psubusb %%mm4, %%mm2 \n\t"\ "movq %%mm2, %%mm3 \n\t"\ "psubusb %%mm4, %%mm3 \n\t"\ "psubb %%mm3, %%mm2 \n\t"\ "movq %1, %%mm3 \n\t"\ "movq %2, %%mm4 \n\t"\ "pxor %%mm6, %%mm3 \n\t"\ "pxor %%mm6, %%mm4 \n\t"\ "paddusb %%mm2, %%mm3 \n\t"\ "psubusb %%mm2, %%mm4 \n\t"\ "pxor %%mm6, %%mm3 \n\t"\ "pxor %%mm6, %%mm4 \n\t"\ "paddusb %%mm2, %%mm2 \n\t"\ "packsswb %%mm1, %%mm0 \n\t"\ "pcmpgtb %%mm0, %%mm7 \n\t"\ "pxor %%mm7, %%mm0 \n\t"\ "psubb %%mm7, %%mm0 \n\t"\ "movq %%mm0, %%mm1 \n\t"\ "psubusb %%mm2, %%mm0 \n\t"\ "psubb %%mm0, %%mm1 \n\t"\ "pand %5, %%mm1 \n\t"\ "psrlw $2, %%mm1 \n\t"\ "pxor %%mm7, %%mm1 \n\t"\ "psubb %%mm7, %%mm1 \n\t"\ "movq %0, %%mm5 \n\t"\ "movq %3, %%mm6 \n\t"\ "psubb %%mm1, %%mm5 \n\t"\ "paddb %%mm1, %%mm6 \n\t" static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { const int strength= ff_h263_loop_filter_strength[qscale]; __asm__ volatile( H263_LOOP_FILTER "movq %%mm3, %1 \n\t" "movq %%mm4, %2 \n\t" "movq %%mm5, %0 \n\t" "movq %%mm6, %3 \n\t" : "+m" (*(uint64_t*)(src - 2*stride)), "+m" (*(uint64_t*)(src - 1*stride)), "+m" (*(uint64_t*)(src + 0*stride)), "+m" (*(uint64_t*)(src + 1*stride)) : "g" (2*strength), "m"(ff_pb_FC) ); } } static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ... "movd %4, %%mm0 \n\t" "movd %5, %%mm1 \n\t" "movd %6, %%mm2 \n\t" "movd %7, %%mm3 \n\t" "punpcklbw %%mm1, %%mm0 \n\t" "punpcklbw %%mm3, %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "punpcklwd %%mm2, %%mm0 \n\t" "punpckhwd %%mm2, %%mm1 \n\t" "movd %%mm0, %0 \n\t" "punpckhdq %%mm0, %%mm0 \n\t" "movd %%mm0, %1 \n\t" "movd %%mm1, %2 \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movd %%mm1, %3 \n\t" : "=m" (*(uint32_t*)(dst + 0*dst_stride)), "=m" (*(uint32_t*)(dst + 1*dst_stride)), "=m" (*(uint32_t*)(dst + 2*dst_stride)), "=m" (*(uint32_t*)(dst + 3*dst_stride)) : "m" (*(uint32_t*)(src + 0*src_stride)), "m" (*(uint32_t*)(src + 1*src_stride)), "m" (*(uint32_t*)(src + 2*src_stride)), "m" (*(uint32_t*)(src + 3*src_stride)) ); } static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { const int strength= ff_h263_loop_filter_strength[qscale]; DECLARE_ALIGNED(8, uint64_t, temp)[4]; uint8_t *btemp= (uint8_t*)temp; src -= 2; transpose4x4(btemp , src , 8, stride); transpose4x4(btemp+4, src + 4*stride, 8, stride); __asm__ volatile( H263_LOOP_FILTER // 5 3 4 6 : "+m" (temp[0]), "+m" (temp[1]), "+m" (temp[2]), "+m" (temp[3]) : "g" (2*strength), "m"(ff_pb_FC) ); __asm__ volatile( "movq %%mm5, %%mm1 \n\t" "movq %%mm4, %%mm0 \n\t" "punpcklbw %%mm3, %%mm5 \n\t" "punpcklbw %%mm6, %%mm4 \n\t" "punpckhbw %%mm3, %%mm1 \n\t" "punpckhbw %%mm6, %%mm0 \n\t" "movq %%mm5, %%mm3 \n\t" "movq %%mm1, %%mm6 \n\t" "punpcklwd %%mm4, %%mm5 \n\t" "punpcklwd %%mm0, %%mm1 \n\t" "punpckhwd %%mm4, %%mm3 \n\t" "punpckhwd %%mm0, %%mm6 \n\t" "movd %%mm5, (%0) \n\t" "punpckhdq %%mm5, %%mm5 \n\t" "movd %%mm5, (%0,%2) \n\t" "movd %%mm3, (%0,%2,2) \n\t" "punpckhdq %%mm3, %%mm3 \n\t" "movd %%mm3, (%0,%3) \n\t" "movd %%mm1, (%1) \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movd %%mm1, (%1,%2) \n\t" "movd %%mm6, (%1,%2,2) \n\t" "punpckhdq %%mm6, %%mm6 \n\t" "movd %%mm6, (%1,%3) \n\t" :: "r" (src), "r" (src + 4*stride), "r" ((x86_reg) stride ), "r" ((x86_reg)(3*stride)) ); } } /* draw the edges of width 'w' of an image of size width, height this mmx version can only handle w==8 || w==16 */ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) { uint8_t *ptr, *last_line; int i; last_line = buf + (height - 1) * wrap; /* left and right */ ptr = buf; if(w==8) { __asm__ volatile( "1: \n\t" "movd (%0), %%mm0 \n\t" "punpcklbw %%mm0, %%mm0 \n\t" "punpcklwd %%mm0, %%mm0 \n\t" "punpckldq %%mm0, %%mm0 \n\t" "movq %%mm0, -8(%0) \n\t" "movq -8(%0, %2), %%mm1 \n\t" "punpckhbw %%mm1, %%mm1 \n\t" "punpckhwd %%mm1, %%mm1 \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movq %%mm1, (%0, %2) \n\t" "add %1, %0 \n\t" "cmp %3, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) ); } else { __asm__ volatile( "1: \n\t" "movd (%0), %%mm0 \n\t" "punpcklbw %%mm0, %%mm0 \n\t" "punpcklwd %%mm0, %%mm0 \n\t" "punpckldq %%mm0, %%mm0 \n\t" "movq %%mm0, -8(%0) \n\t" "movq %%mm0, -16(%0) \n\t" "movq -8(%0, %2), %%mm1 \n\t" "punpckhbw %%mm1, %%mm1 \n\t" "punpckhwd %%mm1, %%mm1 \n\t" "punpckhdq %%mm1, %%mm1 \n\t" "movq %%mm1, (%0, %2) \n\t" "movq %%mm1, 8(%0, %2) \n\t" "add %1, %0 \n\t" "cmp %3, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) ); } for(i=0;i<w;i+=4) { /* top and bottom (and hopefully also the corners) */ ptr= buf - (i + 1) * wrap - w; __asm__ volatile( "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm0, (%0, %2) \n\t" "movq %%mm0, (%0, %2, 2) \n\t" "movq %%mm0, (%0, %3) \n\t" "add $8, %0 \n\t" "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) ); ptr= last_line + (i + 1) * wrap - w; __asm__ volatile( "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm0, (%0, %2) \n\t" "movq %%mm0, (%0, %2, 2) \n\t" "movq %%mm0, (%0, %3) \n\t" "add $8, %0 \n\t" "cmp %4, %0 \n\t" " jb 1b \n\t" : "+r" (ptr) : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) ); } } #define PAETH(cpu, abs3)\ static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\ {\ x86_reg i = -bpp;\ x86_reg end = w-3;\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n"\ "movd (%1,%0), %%mm0 \n"\ "movd (%2,%0), %%mm1 \n"\ "punpcklbw %%mm7, %%mm0 \n"\ "punpcklbw %%mm7, %%mm1 \n"\ "add %4, %0 \n"\ "1: \n"\ "movq %%mm1, %%mm2 \n"\ "movd (%2,%0), %%mm1 \n"\ "movq %%mm2, %%mm3 \n"\ "punpcklbw %%mm7, %%mm1 \n"\ "movq %%mm2, %%mm4 \n"\ "psubw %%mm1, %%mm3 \n"\ "psubw %%mm0, %%mm4 \n"\ "movq %%mm3, %%mm5 \n"\ "paddw %%mm4, %%mm5 \n"\ abs3\ "movq %%mm4, %%mm6 \n"\ "pminsw %%mm5, %%mm6 \n"\ "pcmpgtw %%mm6, %%mm3 \n"\ "pcmpgtw %%mm5, %%mm4 \n"\ "movq %%mm4, %%mm6 \n"\ "pand %%mm3, %%mm4 \n"\ "pandn %%mm3, %%mm6 \n"\ "pandn %%mm0, %%mm3 \n"\ "movd (%3,%0), %%mm0 \n"\ "pand %%mm1, %%mm6 \n"\ "pand %%mm4, %%mm2 \n"\ "punpcklbw %%mm7, %%mm0 \n"\ "movq %6, %%mm5 \n"\ "paddw %%mm6, %%mm0 \n"\ "paddw %%mm2, %%mm3 \n"\ "paddw %%mm3, %%mm0 \n"\ "pand %%mm5, %%mm0 \n"\ "movq %%mm0, %%mm3 \n"\ "packuswb %%mm3, %%mm3 \n"\ "movd %%mm3, (%1,%0) \n"\ "add %4, %0 \n"\ "cmp %5, %0 \n"\ "jle 1b \n"\ :"+r"(i)\ :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ "m"(ff_pw_255)\ :"memory"\ );\ } #define ABS3_MMX2\ "psubw %%mm5, %%mm7 \n"\ "pmaxsw %%mm7, %%mm5 \n"\ "pxor %%mm6, %%mm6 \n"\ "pxor %%mm7, %%mm7 \n"\ "psubw %%mm3, %%mm6 \n"\ "psubw %%mm4, %%mm7 \n"\ "pmaxsw %%mm6, %%mm3 \n"\ "pmaxsw %%mm7, %%mm4 \n"\ "pxor %%mm7, %%mm7 \n" #define ABS3_SSSE3\ "pabsw %%mm3, %%mm3 \n"\ "pabsw %%mm4, %%mm4 \n"\ "pabsw %%mm5, %%mm5 \n" PAETH(mmx2, ABS3_MMX2) #if HAVE_SSSE3 PAETH(ssse3, ABS3_SSSE3) #endif #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ "movq "#in7", " #m3 " \n\t" /* d */\ "movq "#in0", %%mm5 \n\t" /* D */\ "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ "movq "#in1", %%mm5 \n\t" /* C */\ "movq "#in2", %%mm6 \n\t" /* B */\ "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ "psraw $5, %%mm5 \n\t"\ "packuswb %%mm5, %%mm5 \n\t"\ OP(%%mm5, out, %%mm7, d) #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ uint64_t temp;\ \ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ "paddw %%mm3, %%mm5 \n\t" /* b */\ "paddw %%mm2, %%mm6 \n\t" /* c */\ "paddw %%mm5, %%mm5 \n\t" /* 2b */\ "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ "paddw %%mm4, %%mm0 \n\t" /* a */\ "paddw %%mm1, %%mm5 \n\t" /* d */\ "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ "paddw %6, %%mm6 \n\t"\ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ "psraw $5, %%mm0 \n\t"\ "movq %%mm0, %5 \n\t"\ /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ \ "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ "paddw %%mm0, %%mm2 \n\t" /* b */\ "paddw %%mm5, %%mm3 \n\t" /* c */\ "paddw %%mm2, %%mm2 \n\t" /* 2b */\ "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ "paddw %%mm2, %%mm1 \n\t" /* a */\ "paddw %%mm6, %%mm4 \n\t" /* d */\ "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ "paddw %6, %%mm1 \n\t"\ "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ "psraw $5, %%mm3 \n\t"\ "movq %5, %%mm1 \n\t"\ "packuswb %%mm3, %%mm1 \n\t"\ OP_MMX2(%%mm1, (%1),%%mm4, q)\ /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ \ "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ "paddw %%mm1, %%mm5 \n\t" /* b */\ "paddw %%mm4, %%mm0 \n\t" /* c */\ "paddw %%mm5, %%mm5 \n\t" /* 2b */\ "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ "paddw %%mm3, %%mm2 \n\t" /* d */\ "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ "paddw %%mm2, %%mm6 \n\t" /* a */\ "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ "paddw %6, %%mm0 \n\t"\ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ "psraw $5, %%mm0 \n\t"\ /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ \ "paddw %%mm5, %%mm3 \n\t" /* a */\ "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ "paddw %%mm4, %%mm6 \n\t" /* b */\ "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ "paddw %%mm1, %%mm4 \n\t" /* c */\ "paddw %%mm2, %%mm5 \n\t" /* d */\ "paddw %%mm6, %%mm6 \n\t" /* 2b */\ "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ "paddw %6, %%mm4 \n\t"\ "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ "psraw $5, %%mm4 \n\t"\ "packuswb %%mm4, %%mm0 \n\t"\ OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ \ "add %3, %0 \n\t"\ "add %4, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(src), "+c"(dst), "+D"(h)\ : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ : "memory"\ );\ }\ \ static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ int i;\ int16_t temp[16];\ /* quick HACK, XXX FIXME MUST be optimized */\ for(i=0; i<h; i++)\ {\ temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ __asm__ volatile(\ "movq (%0), %%mm0 \n\t"\ "movq 8(%0), %%mm1 \n\t"\ "paddw %2, %%mm0 \n\t"\ "paddw %2, %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ OP_3DNOW(%%mm0, (%1), %%mm1, q)\ "movq 16(%0), %%mm0 \n\t"\ "movq 24(%0), %%mm1 \n\t"\ "paddw %2, %%mm0 \n\t"\ "paddw %2, %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ :: "r"(temp), "r"(dst), "m"(ROUNDER)\ : "memory"\ );\ dst+=dstStride;\ src+=srcStride;\ }\ }\ \ static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ "paddw %%mm3, %%mm5 \n\t" /* b */\ "paddw %%mm2, %%mm6 \n\t" /* c */\ "paddw %%mm5, %%mm5 \n\t" /* 2b */\ "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ "paddw %%mm4, %%mm0 \n\t" /* a */\ "paddw %%mm1, %%mm5 \n\t" /* d */\ "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ "paddw %5, %%mm6 \n\t"\ "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ "psraw $5, %%mm0 \n\t"\ /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ \ "movd 5(%0), %%mm5 \n\t" /* FGHI */\ "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ "paddw %%mm5, %%mm1 \n\t" /* a */\ "paddw %%mm6, %%mm2 \n\t" /* b */\ "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ "paddw %%mm6, %%mm3 \n\t" /* c */\ "paddw %%mm5, %%mm4 \n\t" /* d */\ "paddw %%mm2, %%mm2 \n\t" /* 2b */\ "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ "paddw %5, %%mm1 \n\t"\ "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ "psraw $5, %%mm3 \n\t"\ "packuswb %%mm3, %%mm0 \n\t"\ OP_MMX2(%%mm0, (%1), %%mm4, q)\ \ "add %3, %0 \n\t"\ "add %4, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+a"(src), "+c"(dst), "+d"(h)\ : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\ : "memory"\ );\ }\ \ static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ int i;\ int16_t temp[8];\ /* quick HACK, XXX FIXME MUST be optimized */\ for(i=0; i<h; i++)\ {\ temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ __asm__ volatile(\ "movq (%0), %%mm0 \n\t"\ "movq 8(%0), %%mm1 \n\t"\ "paddw %2, %%mm0 \n\t"\ "paddw %2, %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ "psraw $5, %%mm1 \n\t"\ "packuswb %%mm1, %%mm0 \n\t"\ OP_3DNOW(%%mm0, (%1), %%mm1, q)\ :: "r"(temp), "r"(dst), "m"(ROUNDER)\ :"memory"\ );\ dst+=dstStride;\ src+=srcStride;\ }\ } #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ \ static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ uint64_t temp[17*4];\ uint64_t *temp_ptr= temp;\ int count= 17;\ \ /*FIXME unroll */\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq (%0), %%mm1 \n\t"\ "movq 8(%0), %%mm2 \n\t"\ "movq 8(%0), %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpckhbw %%mm7, %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm2 \n\t"\ "punpckhbw %%mm7, %%mm3 \n\t"\ "movq %%mm0, (%1) \n\t"\ "movq %%mm1, 17*8(%1) \n\t"\ "movq %%mm2, 2*17*8(%1) \n\t"\ "movq %%mm3, 3*17*8(%1) \n\t"\ "add $8, %1 \n\t"\ "add %3, %0 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+r" (src), "+r" (temp_ptr), "+r"(count)\ : "r" ((x86_reg)srcStride)\ : "memory"\ );\ \ temp_ptr= temp;\ count=4;\ \ /*FIXME reorder for speed */\ __asm__ volatile(\ /*"pxor %%mm7, %%mm7 \n\t"*/\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq 8(%0), %%mm1 \n\t"\ "movq 16(%0), %%mm2 \n\t"\ "movq 24(%0), %%mm3 \n\t"\ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ \ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ \ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ "add %4, %1 \n\t" \ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ \ "add $136, %0 \n\t"\ "add %6, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ \ : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\ :"memory"\ );\ }\ \ static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ uint64_t temp[9*2];\ uint64_t *temp_ptr= temp;\ int count= 9;\ \ /*FIXME unroll */\ __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq (%0), %%mm1 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpckhbw %%mm7, %%mm1 \n\t"\ "movq %%mm0, (%1) \n\t"\ "movq %%mm1, 9*8(%1) \n\t"\ "add $8, %1 \n\t"\ "add %3, %0 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ : "+r" (src), "+r" (temp_ptr), "+r"(count)\ : "r" ((x86_reg)srcStride)\ : "memory"\ );\ \ temp_ptr= temp;\ count=2;\ \ /*FIXME reorder for speed */\ __asm__ volatile(\ /*"pxor %%mm7, %%mm7 \n\t"*/\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq 8(%0), %%mm1 \n\t"\ "movq 16(%0), %%mm2 \n\t"\ "movq 24(%0), %%mm3 \n\t"\ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ \ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ \ QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ "add %4, %1 \n\t"\ QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ \ "add $72, %0 \n\t"\ "add %6, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ \ : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\ : "memory"\ );\ }\ \ static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\ }\ \ static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ }\ \ static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ }\ \ static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ }\ \ static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ }\ \ static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ }\ \ static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ }\ static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ }\ static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ }\ static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[9];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ }\ static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\ }\ \ static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[32];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ }\ \ static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ }\ \ static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[32];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ }\ \ static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[32];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ }\ \ static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ }\ \ static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t temp[32];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ }\ static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[16*2 + 17*2];\ uint8_t * const halfH= ((uint8_t*)half) + 256;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[16*2 + 17*2];\ uint8_t * const halfH= ((uint8_t*)half) + 256;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[16*2 + 17*2];\ uint8_t * const halfH= ((uint8_t*)half) + 256;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[16*2 + 17*2];\ uint8_t * const halfH= ((uint8_t*)half) + 256;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[16*2 + 17*2];\ uint8_t * const halfH= ((uint8_t*)half) + 256;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[16*2 + 17*2];\ uint8_t * const halfH= ((uint8_t*)half) + 256;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[17*2];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ }\ static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[17*2];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ }\ static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ uint64_t half[17*2];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ } #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" #define AVG_3DNOW_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgusb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" #define AVG_MMX2_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) /***********************************/ /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */ #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\ static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ } #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\ static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ } #define QPEL_2TAP(OPNAME, SIZE, MMX)\ QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\ static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ }\ static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ }\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ QPEL_2TAP(put_, 16, mmx2) QPEL_2TAP(avg_, 16, mmx2) QPEL_2TAP(put_, 8, mmx2) QPEL_2TAP(avg_, 8, mmx2) QPEL_2TAP(put_, 16, 3dnow) QPEL_2TAP(avg_, 16, 3dnow) QPEL_2TAP(put_, 8, 3dnow) QPEL_2TAP(avg_, 8, 3dnow) #if 0 static void just_return(void) { return; } #endif static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ const int w = 8; const int ix = ox>>(16+shift); const int iy = oy>>(16+shift); const int oxs = ox>>4; const int oys = oy>>4; const int dxxs = dxx>>4; const int dxys = dxy>>4; const int dyxs = dyx>>4; const int dyys = dyy>>4; const uint16_t r4[4] = {r,r,r,r}; const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; const uint64_t shift2 = 2*shift; uint8_t edge_buf[(h+1)*stride]; int x, y; const int dxw = (dxx-(1<<(16+shift)))*(w-1); const int dyh = (dyy-(1<<(16+shift)))*(h-1); const int dxh = dxy*(h-1); const int dyw = dyx*(w-1); if( // non-constant fullpel offset (3% of blocks) ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift) // uses more than 16 bits of subpel mv (only at huge resolution) || (dxx|dxy|dyx|dyy)&15 ) { //FIXME could still use mmx for some of the rows ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); return; } src += ix + iy*stride; if( (unsigned)ix >= width-w || (unsigned)iy >= height-h ) { ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); src = edge_buf; } __asm__ volatile( "movd %0, %%mm6 \n\t" "pxor %%mm7, %%mm7 \n\t" "punpcklwd %%mm6, %%mm6 \n\t" "punpcklwd %%mm6, %%mm6 \n\t" :: "r"(1<<shift) ); for(x=0; x<w; x+=4){ uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), oxs - dxys + dxxs*(x+1), oxs - dxys + dxxs*(x+2), oxs - dxys + dxxs*(x+3) }; uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), oys - dyys + dyxs*(x+1), oys - dyys + dyxs*(x+2), oys - dyys + dyxs*(x+3) }; for(y=0; y<h; y++){ __asm__ volatile( "movq %0, %%mm4 \n\t" "movq %1, %%mm5 \n\t" "paddw %2, %%mm4 \n\t" "paddw %3, %%mm5 \n\t" "movq %%mm4, %0 \n\t" "movq %%mm5, %1 \n\t" "psrlw $12, %%mm4 \n\t" "psrlw $12, %%mm5 \n\t" : "+m"(*dx4), "+m"(*dy4) : "m"(*dxy4), "m"(*dyy4) ); __asm__ volatile( "movq %%mm6, %%mm2 \n\t" "movq %%mm6, %%mm1 \n\t" "psubw %%mm4, %%mm2 \n\t" "psubw %%mm5, %%mm1 \n\t" "movq %%mm2, %%mm0 \n\t" "movq %%mm4, %%mm3 \n\t" "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) "pmullw %%mm5, %%mm3 \n\t" // dx*dy "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) "movd %4, %%mm5 \n\t" "movd %3, %%mm4 \n\t" "punpcklbw %%mm7, %%mm5 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy "movd %2, %%mm5 \n\t" "movd %1, %%mm4 \n\t" "punpcklbw %%mm7, %%mm5 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) "paddw %5, %%mm1 \n\t" "paddw %%mm3, %%mm2 \n\t" "paddw %%mm1, %%mm0 \n\t" "paddw %%mm2, %%mm0 \n\t" "psrlw %6, %%mm0 \n\t" "packuswb %%mm0, %%mm0 \n\t" "movd %%mm0, %0 \n\t" : "=m"(dst[x+y*stride]) : "m"(src[0]), "m"(src[1]), "m"(src[stride]), "m"(src[stride+1]), "m"(*r4), "m"(shift2) ); src += stride; } src += 4-h*stride; } } #define PREFETCH(name, op) \ static void name(void *mem, int stride, int h){\ const uint8_t *p= mem;\ do{\ __asm__ volatile(#op" %0" :: "m"(*p));\ p+= stride;\ }while(--h);\ } PREFETCH(prefetch_mmx2, prefetcht0) PREFETCH(prefetch_3dnow, prefetch) #undef PREFETCH #include "h264dsp_mmx.c" #include "rv40dsp_mmx.c" /* CAVS specific */ void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { put_pixels8_mmx(dst, src, stride, 8); } void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { avg_pixels8_mmx(dst, src, stride, 8); } void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { put_pixels16_mmx(dst, src, stride, 16); } void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { avg_pixels16_mmx(dst, src, stride, 16); } /* VC1 specific */ void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { put_pixels8_mmx(dst, src, stride, 8); } void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { avg_pixels8_mmx2(dst, src, stride, 8); } /* XXX: those functions should be suppressed ASAP when all IDCTs are converted */ #if CONFIG_GPL static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) { ff_mmx_idct (block); put_pixels_clamped_mmx(block, dest, line_size); } static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) { ff_mmx_idct (block); add_pixels_clamped_mmx(block, dest, line_size); } static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) { ff_mmxext_idct (block); put_pixels_clamped_mmx(block, dest, line_size); } static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) { ff_mmxext_idct (block); add_pixels_clamped_mmx(block, dest, line_size); } #endif static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) { ff_idct_xvid_mmx (block); put_pixels_clamped_mmx(block, dest, line_size); } static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) { ff_idct_xvid_mmx (block); add_pixels_clamped_mmx(block, dest, line_size); } static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) { ff_idct_xvid_mmx2 (block); put_pixels_clamped_mmx(block, dest, line_size); } static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) { ff_idct_xvid_mmx2 (block); add_pixels_clamped_mmx(block, dest, line_size); } static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) { int i; __asm__ volatile("pxor %%mm7, %%mm7":); for(i=0; i<blocksize; i+=2) { __asm__ volatile( "movq %0, %%mm0 \n\t" "movq %1, %%mm1 \n\t" "movq %%mm0, %%mm2 \n\t" "movq %%mm1, %%mm3 \n\t" "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 "pslld $31, %%mm2 \n\t" // keep only the sign bit "pxor %%mm2, %%mm1 \n\t" "movq %%mm3, %%mm4 \n\t" "pand %%mm1, %%mm3 \n\t" "pandn %%mm1, %%mm4 \n\t" "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) "movq %%mm3, %1 \n\t" "movq %%mm0, %0 \n\t" :"+m"(mag[i]), "+m"(ang[i]) ::"memory" ); } __asm__ volatile("femms"); } static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) { int i; __asm__ volatile( "movaps %0, %%xmm5 \n\t" ::"m"(ff_pdw_80000000[0]) ); for(i=0; i<blocksize; i+=4) { __asm__ volatile( "movaps %0, %%xmm0 \n\t" "movaps %1, %%xmm1 \n\t" "xorps %%xmm2, %%xmm2 \n\t" "xorps %%xmm3, %%xmm3 \n\t" "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit "xorps %%xmm2, %%xmm1 \n\t" "movaps %%xmm3, %%xmm4 \n\t" "andps %%xmm1, %%xmm3 \n\t" "andnps %%xmm1, %%xmm4 \n\t" "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) "movaps %%xmm3, %1 \n\t" "movaps %%xmm0, %0 \n\t" :"+m"(mag[i]), "+m"(ang[i]) ::"memory" ); } } #define IF1(x) x #define IF0(x) #define MIX5(mono,stereo)\ __asm__ volatile(\ "movss 0(%2), %%xmm5 \n"\ "movss 8(%2), %%xmm6 \n"\ "movss 24(%2), %%xmm7 \n"\ "shufps $0, %%xmm5, %%xmm5 \n"\ "shufps $0, %%xmm6, %%xmm6 \n"\ "shufps $0, %%xmm7, %%xmm7 \n"\ "1: \n"\ "movaps (%0,%1), %%xmm0 \n"\ "movaps 0x400(%0,%1), %%xmm1 \n"\ "movaps 0x800(%0,%1), %%xmm2 \n"\ "movaps 0xc00(%0,%1), %%xmm3 \n"\ "movaps 0x1000(%0,%1), %%xmm4 \n"\ "mulps %%xmm5, %%xmm0 \n"\ "mulps %%xmm6, %%xmm1 \n"\ "mulps %%xmm5, %%xmm2 \n"\ "mulps %%xmm7, %%xmm3 \n"\ "mulps %%xmm7, %%xmm4 \n"\ stereo("addps %%xmm1, %%xmm0 \n")\ "addps %%xmm1, %%xmm2 \n"\ "addps %%xmm3, %%xmm0 \n"\ "addps %%xmm4, %%xmm2 \n"\ mono("addps %%xmm2, %%xmm0 \n")\ "movaps %%xmm0, (%0,%1) \n"\ stereo("movaps %%xmm2, 0x400(%0,%1) \n")\ "add $16, %0 \n"\ "jl 1b \n"\ :"+&r"(i)\ :"r"(samples[0]+len), "r"(matrix)\ :"memory"\ ); #define MIX_MISC(stereo)\ __asm__ volatile(\ "1: \n"\ "movaps (%3,%0), %%xmm0 \n"\ stereo("movaps %%xmm0, %%xmm1 \n")\ "mulps %%xmm6, %%xmm0 \n"\ stereo("mulps %%xmm7, %%xmm1 \n")\ "lea 1024(%3,%0), %1 \n"\ "mov %5, %2 \n"\ "2: \n"\ "movaps (%1), %%xmm2 \n"\ stereo("movaps %%xmm2, %%xmm3 \n")\ "mulps (%4,%2), %%xmm2 \n"\ stereo("mulps 16(%4,%2), %%xmm3 \n")\ "addps %%xmm2, %%xmm0 \n"\ stereo("addps %%xmm3, %%xmm1 \n")\ "add $1024, %1 \n"\ "add $32, %2 \n"\ "jl 2b \n"\ "movaps %%xmm0, (%3,%0) \n"\ stereo("movaps %%xmm1, 1024(%3,%0) \n")\ "add $16, %0 \n"\ "jl 1b \n"\ :"+&r"(i), "=&r"(j), "=&r"(k)\ :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ :"memory"\ ); static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) { int (*matrix_cmp)[2] = (int(*)[2])matrix; intptr_t i,j,k; i = -len*sizeof(float); if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { MIX5(IF0,IF1); } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { MIX5(IF1,IF0); } else { DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4]; j = 2*in_ch*sizeof(float); __asm__ volatile( "1: \n" "sub $8, %0 \n" "movss (%2,%0), %%xmm6 \n" "movss 4(%2,%0), %%xmm7 \n" "shufps $0, %%xmm6, %%xmm6 \n" "shufps $0, %%xmm7, %%xmm7 \n" "movaps %%xmm6, (%1,%0,4) \n" "movaps %%xmm7, 16(%1,%0,4) \n" "jg 1b \n" :"+&r"(j) :"r"(matrix_simd), "r"(matrix) :"memory" ); if(out_ch == 2) { MIX_MISC(IF1); } else { MIX_MISC(IF0); } } } static void vector_fmul_3dnow(float *dst, const float *src, int len){ x86_reg i = (len-4)*4; __asm__ volatile( "1: \n\t" "movq (%1,%0), %%mm0 \n\t" "movq 8(%1,%0), %%mm1 \n\t" "pfmul (%2,%0), %%mm0 \n\t" "pfmul 8(%2,%0), %%mm1 \n\t" "movq %%mm0, (%1,%0) \n\t" "movq %%mm1, 8(%1,%0) \n\t" "sub $16, %0 \n\t" "jge 1b \n\t" "femms \n\t" :"+r"(i) :"r"(dst), "r"(src) :"memory" ); } static void vector_fmul_sse(float *dst, const float *src, int len){ x86_reg i = (len-8)*4; __asm__ volatile( "1: \n\t" "movaps (%1,%0), %%xmm0 \n\t" "movaps 16(%1,%0), %%xmm1 \n\t" "mulps (%2,%0), %%xmm0 \n\t" "mulps 16(%2,%0), %%xmm1 \n\t" "movaps %%xmm0, (%1,%0) \n\t" "movaps %%xmm1, 16(%1,%0) \n\t" "sub $32, %0 \n\t" "jge 1b \n\t" :"+r"(i) :"r"(dst), "r"(src) :"memory" ); } static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ x86_reg i = len*4-16; __asm__ volatile( "1: \n\t" "pswapd 8(%1), %%mm0 \n\t" "pswapd (%1), %%mm1 \n\t" "pfmul (%3,%0), %%mm0 \n\t" "pfmul 8(%3,%0), %%mm1 \n\t" "movq %%mm0, (%2,%0) \n\t" "movq %%mm1, 8(%2,%0) \n\t" "add $16, %1 \n\t" "sub $16, %0 \n\t" "jge 1b \n\t" :"+r"(i), "+r"(src1) :"r"(dst), "r"(src0) ); __asm__ volatile("femms"); } static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ x86_reg i = len*4-32; __asm__ volatile( "1: \n\t" "movaps 16(%1), %%xmm0 \n\t" "movaps (%1), %%xmm1 \n\t" "shufps $0x1b, %%xmm0, %%xmm0 \n\t" "shufps $0x1b, %%xmm1, %%xmm1 \n\t" "mulps (%3,%0), %%xmm0 \n\t" "mulps 16(%3,%0), %%xmm1 \n\t" "movaps %%xmm0, (%2,%0) \n\t" "movaps %%xmm1, 16(%2,%0) \n\t" "add $32, %1 \n\t" "sub $32, %0 \n\t" "jge 1b \n\t" :"+r"(i), "+r"(src1) :"r"(dst), "r"(src0) ); } static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, const float *src2, int len){ x86_reg i = (len-4)*4; __asm__ volatile( "1: \n\t" "movq (%2,%0), %%mm0 \n\t" "movq 8(%2,%0), %%mm1 \n\t" "pfmul (%3,%0), %%mm0 \n\t" "pfmul 8(%3,%0), %%mm1 \n\t" "pfadd (%4,%0), %%mm0 \n\t" "pfadd 8(%4,%0), %%mm1 \n\t" "movq %%mm0, (%1,%0) \n\t" "movq %%mm1, 8(%1,%0) \n\t" "sub $16, %0 \n\t" "jge 1b \n\t" :"+r"(i) :"r"(dst), "r"(src0), "r"(src1), "r"(src2) :"memory" ); __asm__ volatile("femms"); } static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, const float *src2, int len){ x86_reg i = (len-8)*4; __asm__ volatile( "1: \n\t" "movaps (%2,%0), %%xmm0 \n\t" "movaps 16(%2,%0), %%xmm1 \n\t" "mulps (%3,%0), %%xmm0 \n\t" "mulps 16(%3,%0), %%xmm1 \n\t" "addps (%4,%0), %%xmm0 \n\t" "addps 16(%4,%0), %%xmm1 \n\t" "movaps %%xmm0, (%1,%0) \n\t" "movaps %%xmm1, 16(%1,%0) \n\t" "sub $32, %0 \n\t" "jge 1b \n\t" :"+r"(i) :"r"(dst), "r"(src0), "r"(src1), "r"(src2) :"memory" ); } static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len){ #if HAVE_6REGS if(add_bias == 0){ x86_reg i = -len*4; x86_reg j = len*4-8; __asm__ volatile( "1: \n" "pswapd (%5,%1), %%mm1 \n" "movq (%5,%0), %%mm0 \n" "pswapd (%4,%1), %%mm5 \n" "movq (%3,%0), %%mm4 \n" "movq %%mm0, %%mm2 \n" "movq %%mm1, %%mm3 \n" "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] "pfadd %%mm3, %%mm2 \n" "pfsub %%mm0, %%mm1 \n" "pswapd %%mm2, %%mm2 \n" "movq %%mm1, (%2,%0) \n" "movq %%mm2, (%2,%1) \n" "sub $8, %1 \n" "add $8, %0 \n" "jl 1b \n" "femms \n" :"+r"(i), "+r"(j) :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) ); }else #endif ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); } static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len){ #if HAVE_6REGS if(add_bias == 0){ x86_reg i = -len*4; x86_reg j = len*4-16; __asm__ volatile( "1: \n" "movaps (%5,%1), %%xmm1 \n" "movaps (%5,%0), %%xmm0 \n" "movaps (%4,%1), %%xmm5 \n" "movaps (%3,%0), %%xmm4 \n" "shufps $0x1b, %%xmm1, %%xmm1 \n" "shufps $0x1b, %%xmm5, %%xmm5 \n" "movaps %%xmm0, %%xmm2 \n" "movaps %%xmm1, %%xmm3 \n" "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] "addps %%xmm3, %%xmm2 \n" "subps %%xmm0, %%xmm1 \n" "shufps $0x1b, %%xmm2, %%xmm2 \n" "movaps %%xmm1, (%2,%0) \n" "movaps %%xmm2, (%2,%1) \n" "sub $16, %1 \n" "add $16, %0 \n" "jl 1b \n" :"+r"(i), "+r"(j) :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) ); }else #endif ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); } static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len) { x86_reg i = -4*len; __asm__ volatile( "movss %3, %%xmm4 \n" "shufps $0, %%xmm4, %%xmm4 \n" "1: \n" "cvtpi2ps (%2,%0), %%xmm0 \n" "cvtpi2ps 8(%2,%0), %%xmm1 \n" "cvtpi2ps 16(%2,%0), %%xmm2 \n" "cvtpi2ps 24(%2,%0), %%xmm3 \n" "movlhps %%xmm1, %%xmm0 \n" "movlhps %%xmm3, %%xmm2 \n" "mulps %%xmm4, %%xmm0 \n" "mulps %%xmm4, %%xmm2 \n" "movaps %%xmm0, (%1,%0) \n" "movaps %%xmm2, 16(%1,%0) \n" "add $32, %0 \n" "jl 1b \n" :"+r"(i) :"r"(dst+len), "r"(src+len), "m"(mul) ); } static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len) { x86_reg i = -4*len; __asm__ volatile( "movss %3, %%xmm4 \n" "shufps $0, %%xmm4, %%xmm4 \n" "1: \n" "cvtdq2ps (%2,%0), %%xmm0 \n" "cvtdq2ps 16(%2,%0), %%xmm1 \n" "mulps %%xmm4, %%xmm0 \n" "mulps %%xmm4, %%xmm1 \n" "movaps %%xmm0, (%1,%0) \n" "movaps %%xmm1, 16(%1,%0) \n" "add $32, %0 \n" "jl 1b \n" :"+r"(i) :"r"(dst+len), "r"(src+len), "m"(mul) ); } static void vector_clipf_sse(float *dst, const float *src, float min, float max, int len) { x86_reg i = (len-16)*4; __asm__ volatile( "movss %3, %%xmm4 \n" "movss %4, %%xmm5 \n" "shufps $0, %%xmm4, %%xmm4 \n" "shufps $0, %%xmm5, %%xmm5 \n" "1: \n\t" "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel "movaps 16(%2,%0), %%xmm1 \n\t" "movaps 32(%2,%0), %%xmm2 \n\t" "movaps 48(%2,%0), %%xmm3 \n\t" "maxps %%xmm4, %%xmm0 \n\t" "maxps %%xmm4, %%xmm1 \n\t" "maxps %%xmm4, %%xmm2 \n\t" "maxps %%xmm4, %%xmm3 \n\t" "minps %%xmm5, %%xmm0 \n\t" "minps %%xmm5, %%xmm1 \n\t" "minps %%xmm5, %%xmm2 \n\t" "minps %%xmm5, %%xmm3 \n\t" "movaps %%xmm0, (%1,%0) \n\t" "movaps %%xmm1, 16(%1,%0) \n\t" "movaps %%xmm2, 32(%1,%0) \n\t" "movaps %%xmm3, 48(%1,%0) \n\t" "sub $64, %0 \n\t" "jge 1b \n\t" :"+&r"(i) :"r"(dst), "r"(src), "m"(min), "m"(max) :"memory" ); } static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){ x86_reg reglen = len; // not bit-exact: pf2id uses different rounding than C and SSE __asm__ volatile( "add %0 , %0 \n\t" "lea (%2,%0,2) , %2 \n\t" "add %0 , %1 \n\t" "neg %0 \n\t" "1: \n\t" "pf2id (%2,%0,2) , %%mm0 \n\t" "pf2id 8(%2,%0,2) , %%mm1 \n\t" "pf2id 16(%2,%0,2) , %%mm2 \n\t" "pf2id 24(%2,%0,2) , %%mm3 \n\t" "packssdw %%mm1 , %%mm0 \n\t" "packssdw %%mm3 , %%mm2 \n\t" "movq %%mm0 , (%1,%0) \n\t" "movq %%mm2 , 8(%1,%0) \n\t" "add $16 , %0 \n\t" " js 1b \n\t" "femms \n\t" :"+r"(reglen), "+r"(dst), "+r"(src) ); } static void float_to_int16_sse(int16_t *dst, const float *src, long len){ x86_reg reglen = len; __asm__ volatile( "add %0 , %0 \n\t" "lea (%2,%0,2) , %2 \n\t" "add %0 , %1 \n\t" "neg %0 \n\t" "1: \n\t" "cvtps2pi (%2,%0,2) , %%mm0 \n\t" "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t" "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t" "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t" "packssdw %%mm1 , %%mm0 \n\t" "packssdw %%mm3 , %%mm2 \n\t" "movq %%mm0 , (%1,%0) \n\t" "movq %%mm2 , 8(%1,%0) \n\t" "add $16 , %0 \n\t" " js 1b \n\t" "emms \n\t" :"+r"(reglen), "+r"(dst), "+r"(src) ); } static void float_to_int16_sse2(int16_t *dst, const float *src, long len){ x86_reg reglen = len; __asm__ volatile( "add %0 , %0 \n\t" "lea (%2,%0,2) , %2 \n\t" "add %0 , %1 \n\t" "neg %0 \n\t" "1: \n\t" "cvtps2dq (%2,%0,2) , %%xmm0 \n\t" "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t" "packssdw %%xmm1 , %%xmm0 \n\t" "movdqa %%xmm0 , (%1,%0) \n\t" "add $16 , %0 \n\t" " js 1b \n\t" :"+r"(reglen), "+r"(dst), "+r"(src) ); } void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len); void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len); void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len); int32_t ff_scalarproduct_int16_mmx2(int16_t *v1, int16_t *v2, int order, int shift); int32_t ff_scalarproduct_int16_sse2(int16_t *v1, int16_t *v2, int order, int shift); int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul); int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul); int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul); void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top); int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left); int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left); void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); #if HAVE_YASM && ARCH_X86_32 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta) { ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta); ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta); } #elif !HAVE_YASM #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6) #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) #endif #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \ /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\ static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\ DECLARE_ALIGNED(16, int16_t, tmp)[len];\ int i,j,c;\ for(c=0; c<channels; c++){\ float_to_int16_##cpu(tmp, src[c], len);\ for(i=0, j=c; i<len; i++, j+=channels)\ dst[j] = tmp[i];\ }\ }\ \ static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\ if(channels==1)\ float_to_int16_##cpu(dst, src[0], len);\ else if(channels==2){\ x86_reg reglen = len; \ const float *src0 = src[0];\ const float *src1 = src[1];\ __asm__ volatile(\ "shl $2, %0 \n"\ "add %0, %1 \n"\ "add %0, %2 \n"\ "add %0, %3 \n"\ "neg %0 \n"\ body\ :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\ );\ }else if(channels==6){\ ff_float_to_int16_interleave6_##cpu(dst, src, len);\ }else\ float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\ } FLOAT_TO_INT16_INTERLEAVE(3dnow, "1: \n" "pf2id (%2,%0), %%mm0 \n" "pf2id 8(%2,%0), %%mm1 \n" "pf2id (%3,%0), %%mm2 \n" "pf2id 8(%3,%0), %%mm3 \n" "packssdw %%mm1, %%mm0 \n" "packssdw %%mm3, %%mm2 \n" "movq %%mm0, %%mm1 \n" "punpcklwd %%mm2, %%mm0 \n" "punpckhwd %%mm2, %%mm1 \n" "movq %%mm0, (%1,%0)\n" "movq %%mm1, 8(%1,%0)\n" "add $16, %0 \n" "js 1b \n" "femms \n" ) FLOAT_TO_INT16_INTERLEAVE(sse, "1: \n" "cvtps2pi (%2,%0), %%mm0 \n" "cvtps2pi 8(%2,%0), %%mm1 \n" "cvtps2pi (%3,%0), %%mm2 \n" "cvtps2pi 8(%3,%0), %%mm3 \n" "packssdw %%mm1, %%mm0 \n" "packssdw %%mm3, %%mm2 \n" "movq %%mm0, %%mm1 \n" "punpcklwd %%mm2, %%mm0 \n" "punpckhwd %%mm2, %%mm1 \n" "movq %%mm0, (%1,%0)\n" "movq %%mm1, 8(%1,%0)\n" "add $16, %0 \n" "js 1b \n" "emms \n" ) FLOAT_TO_INT16_INTERLEAVE(sse2, "1: \n" "cvtps2dq (%2,%0), %%xmm0 \n" "cvtps2dq (%3,%0), %%xmm1 \n" "packssdw %%xmm1, %%xmm0 \n" "movhlps %%xmm0, %%xmm1 \n" "punpcklwd %%xmm1, %%xmm0 \n" "movdqa %%xmm0, (%1,%0) \n" "add $16, %0 \n" "js 1b \n" ) static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){ if(channels==6) ff_float_to_int16_interleave6_3dn2(dst, src, len); else float_to_int16_interleave_3dnow(dst, src, len, channels); } float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order); void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) { mm_flags = mm_support(); if (avctx->dsp_mask) { if (avctx->dsp_mask & FF_MM_FORCE) mm_flags |= (avctx->dsp_mask & 0xffff); else mm_flags &= ~(avctx->dsp_mask & 0xffff); } #if 0 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); if (mm_flags & FF_MM_MMX) av_log(avctx, AV_LOG_INFO, " mmx"); if (mm_flags & FF_MM_MMX2) av_log(avctx, AV_LOG_INFO, " mmx2"); if (mm_flags & FF_MM_3DNOW) av_log(avctx, AV_LOG_INFO, " 3dnow"); if (mm_flags & FF_MM_SSE) av_log(avctx, AV_LOG_INFO, " sse"); if (mm_flags & FF_MM_SSE2) av_log(avctx, AV_LOG_INFO, " sse2"); av_log(avctx, AV_LOG_INFO, "\n"); #endif if (mm_flags & FF_MM_MMX) { const int idct_algo= avctx->idct_algo; if(avctx->lowres==0){ if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ c->idct_put= ff_simple_idct_put_mmx; c->idct_add= ff_simple_idct_add_mmx; c->idct = ff_simple_idct_mmx; c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; #if CONFIG_GPL }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ if(mm_flags & FF_MM_MMX2){ c->idct_put= ff_libmpeg2mmx2_idct_put; c->idct_add= ff_libmpeg2mmx2_idct_add; c->idct = ff_mmxext_idct; }else{ c->idct_put= ff_libmpeg2mmx_idct_put; c->idct_add= ff_libmpeg2mmx_idct_add; c->idct = ff_mmx_idct; } c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; #endif }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) && idct_algo==FF_IDCT_VP3){ if(mm_flags & FF_MM_SSE2){ c->idct_put= ff_vp3_idct_put_sse2; c->idct_add= ff_vp3_idct_add_sse2; c->idct = ff_vp3_idct_sse2; c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; }else{ c->idct_put= ff_vp3_idct_put_mmx; c->idct_add= ff_vp3_idct_add_mmx; c->idct = ff_vp3_idct_mmx; c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; } }else if(idct_algo==FF_IDCT_CAVS){ c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; }else if(idct_algo==FF_IDCT_XVIDMMX){ if(mm_flags & FF_MM_SSE2){ c->idct_put= ff_idct_xvid_sse2_put; c->idct_add= ff_idct_xvid_sse2_add; c->idct = ff_idct_xvid_sse2; c->idct_permutation_type= FF_SSE2_IDCT_PERM; }else if(mm_flags & FF_MM_MMX2){ c->idct_put= ff_idct_xvid_mmx2_put; c->idct_add= ff_idct_xvid_mmx2_add; c->idct = ff_idct_xvid_mmx2; }else{ c->idct_put= ff_idct_xvid_mmx_put; c->idct_add= ff_idct_xvid_mmx_add; c->idct = ff_idct_xvid_mmx; } } } c->put_pixels_clamped = put_pixels_clamped_mmx; c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; c->add_pixels_clamped = add_pixels_clamped_mmx; c->clear_block = clear_block_mmx; c->clear_blocks = clear_blocks_mmx; if ((mm_flags & FF_MM_SSE) && !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){ /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */ c->clear_block = clear_block_sse; c->clear_blocks = clear_blocks_sse; } #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU SET_HPEL_FUNCS(put, 0, 16, mmx); SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); SET_HPEL_FUNCS(avg, 0, 16, mmx); SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); SET_HPEL_FUNCS(put, 1, 8, mmx); SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); SET_HPEL_FUNCS(avg, 1, 8, mmx); SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); c->gmc= gmc_mmx; c->add_bytes= add_bytes_mmx; c->add_bytes_l2= add_bytes_l2_mmx; c->draw_edges = draw_edges_mmx; if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { c->h263_v_loop_filter= h263_v_loop_filter_mmx; c->h263_h_loop_filter= h263_h_loop_filter_mmx; } c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd; c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd; c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx; c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx; if (CONFIG_VP6_DECODER) { c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx; } if (mm_flags & FF_MM_MMX2) { c->prefetch = prefetch_mmx2; c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; if (CONFIG_VP3_DECODER) { c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; } } if (CONFIG_VP3_DECODER) { c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2; } #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2; c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2; c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd; c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; #if HAVE_YASM c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; #endif #if HAVE_7REGS && HAVE_TEN_OPERANDS if( mm_flags&FF_MM_3DNOW ) c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; #endif if (CONFIG_CAVS_DECODER) ff_cavsdsp_init_mmx2(c, avctx); if (CONFIG_VC1_DECODER) ff_vc1dsp_init_mmx(c, avctx); c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; } else if (mm_flags & FF_MM_3DNOW) { c->prefetch = prefetch_3dnow; c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; } SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow; c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow; if (CONFIG_CAVS_DECODER) ff_cavsdsp_init_3dnow(c, avctx); } #define H264_QPEL_FUNCS(x, y, CPU)\ c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){ // these functions are slower than mmx on AMD, but faster on Intel c->put_pixels_tab[0][0] = put_pixels16_sse2; c->avg_pixels_tab[0][0] = avg_pixels16_sse2; H264_QPEL_FUNCS(0, 0, sse2); } if(mm_flags & FF_MM_SSE2){ H264_QPEL_FUNCS(0, 1, sse2); H264_QPEL_FUNCS(0, 2, sse2); H264_QPEL_FUNCS(0, 3, sse2); H264_QPEL_FUNCS(1, 1, sse2); H264_QPEL_FUNCS(1, 2, sse2); H264_QPEL_FUNCS(1, 3, sse2); H264_QPEL_FUNCS(2, 1, sse2); H264_QPEL_FUNCS(2, 2, sse2); H264_QPEL_FUNCS(2, 3, sse2); H264_QPEL_FUNCS(3, 1, sse2); H264_QPEL_FUNCS(3, 2, sse2); H264_QPEL_FUNCS(3, 3, sse2); if (CONFIG_VP6_DECODER) { c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2; } } #if HAVE_SSSE3 if(mm_flags & FF_MM_SSSE3){ H264_QPEL_FUNCS(1, 0, ssse3); H264_QPEL_FUNCS(1, 1, ssse3); H264_QPEL_FUNCS(1, 2, ssse3); H264_QPEL_FUNCS(1, 3, ssse3); H264_QPEL_FUNCS(2, 0, ssse3); H264_QPEL_FUNCS(2, 1, ssse3); H264_QPEL_FUNCS(2, 2, ssse3); H264_QPEL_FUNCS(2, 3, ssse3); H264_QPEL_FUNCS(3, 0, ssse3); H264_QPEL_FUNCS(3, 1, ssse3); H264_QPEL_FUNCS(3, 2, ssse3); H264_QPEL_FUNCS(3, 3, ssse3); c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd; c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd; c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd; c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd; c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3; c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; #if HAVE_YASM c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3; if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4; #endif } #endif if(mm_flags & FF_MM_3DNOW){ c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; c->vector_fmul = vector_fmul_3dnow; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->float_to_int16 = float_to_int16_3dnow; c->float_to_int16_interleave = float_to_int16_interleave_3dnow; } } if(mm_flags & FF_MM_3DNOWEXT){ c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; c->vector_fmul_window = vector_fmul_window_3dnow2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->float_to_int16_interleave = float_to_int16_interleave_3dn2; } } if(mm_flags & FF_MM_MMX2){ #if HAVE_YASM c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; #endif } if(mm_flags & FF_MM_SSE){ c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; c->ac3_downmix = ac3_downmix_sse; c->vector_fmul = vector_fmul_sse; c->vector_fmul_reverse = vector_fmul_reverse_sse; c->vector_fmul_add = vector_fmul_add_sse; c->vector_fmul_window = vector_fmul_window_sse; c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; c->vector_clipf = vector_clipf_sse; c->float_to_int16 = float_to_int16_sse; c->float_to_int16_interleave = float_to_int16_interleave_sse; #if HAVE_YASM c->scalarproduct_float = ff_scalarproduct_float_sse; #endif } if(mm_flags & FF_MM_3DNOW) c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse if(mm_flags & FF_MM_SSE2){ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; c->float_to_int16 = float_to_int16_sse2; c->float_to_int16_interleave = float_to_int16_interleave_sse2; #if HAVE_YASM c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; #endif } if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3; } if (CONFIG_ENCODERS) dsputilenc_init_mmx(c, avctx); #if 0 // for speed testing get_pixels = just_return; put_pixels_clamped = just_return; add_pixels_clamped = just_return; pix_abs16x16 = just_return; pix_abs16x16_x2 = just_return; pix_abs16x16_y2 = just_return; pix_abs16x16_xy2 = just_return; put_pixels_tab[0] = just_return; put_pixels_tab[1] = just_return; put_pixels_tab[2] = just_return; put_pixels_tab[3] = just_return; put_no_rnd_pixels_tab[0] = just_return; put_no_rnd_pixels_tab[1] = just_return; put_no_rnd_pixels_tab[2] = just_return; put_no_rnd_pixels_tab[3] = just_return; avg_pixels_tab[0] = just_return; avg_pixels_tab[1] = just_return; avg_pixels_tab[2] = just_return; avg_pixels_tab[3] = just_return; avg_no_rnd_pixels_tab[0] = just_return; avg_no_rnd_pixels_tab[1] = just_return; avg_no_rnd_pixels_tab[2] = just_return; avg_no_rnd_pixels_tab[3] = just_return; //av_fdct = just_return; //ff_idct = just_return; #endif } #if CONFIG_H264DSP void ff_h264dsp_init_x86(H264DSPContext *c) { mm_flags = mm_support(); if (mm_flags & FF_MM_MMX) { c->h264_idct_dc_add= c->h264_idct_add= ff_h264_idct_add_mmx; c->h264_idct8_dc_add= c->h264_idct8_add= ff_h264_idct8_add_mmx; c->h264_idct_add16 = ff_h264_idct_add16_mmx; c->h264_idct8_add4 = ff_h264_idct8_add4_mmx; c->h264_idct_add8 = ff_h264_idct_add8_mmx; c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx; if (mm_flags & FF_MM_MMX2) { c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; c->h264_idct_add16 = ff_h264_idct_add16_mmx2; c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2; c->h264_idct_add8 = ff_h264_idct_add8_mmx2; c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2; c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2; c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2; c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; } if(mm_flags & FF_MM_SSE2){ c->h264_idct8_add = ff_h264_idct8_add_sse2; c->h264_idct8_add4= ff_h264_idct8_add4_sse2; } #if CONFIG_GPL && HAVE_YASM if (mm_flags & FF_MM_MMX2){ #if ARCH_X86_32 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext; c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext; #endif if( mm_flags&FF_MM_SSE2 ){ #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1110 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2; c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2; c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2; c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2; #endif c->h264_idct_add16 = ff_h264_idct_add16_sse2; c->h264_idct_add8 = ff_h264_idct_add8_sse2; c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2; } } #endif } } #endif /* CONFIG_H264DSP */
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_mmx.c
C
asf20
125,665
;***************************************************************************** ;* x86inc.asm ;***************************************************************************** ;* Copyright (C) 2005-2008 Loren Merritt <lorenm@u.washington.edu> ;* ;* This file is part of FFmpeg. ;* ;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* ;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public ;* License along with FFmpeg; if not, write to the Free Software ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;***************************************************************************** %ifdef ARCH_X86_64 %ifidn __OUTPUT_FORMAT__,win32 %define WIN64 %else %define UNIX64 %endif %endif ; FIXME: All of the 64bit asm functions that take a stride as an argument ; via register, assume that the high dword of that register is filled with 0. ; This is true in practice (since we never do any 64bit arithmetic on strides, ; and x264's strides are all positive), but is not guaranteed by the ABI. ; Name of the .rodata section. ; Kludge: Something on OS X fails to align .rodata even given an align attribute, ; so use a different read-only section. %macro SECTION_RODATA 0-1 16 %ifidn __OUTPUT_FORMAT__,macho64 SECTION .text align=%1 %elifidn __OUTPUT_FORMAT__,macho SECTION .text align=%1 fakegot: %else SECTION .rodata align=%1 %endif %endmacro ; PIC support macros. ; x86_64 can't fit 64bit address literals in most instruction types, ; so shared objects (under the assumption that they might be anywhere ; in memory) must use an address mode that does fit. ; So all accesses to global variables must use this macro, e.g. ; mov eax, [foo GLOBAL] ; instead of ; mov eax, [foo] ; ; x86_32 doesn't require PIC. ; Some distros prefer shared objects to be PIC, but nothing breaks if ; the code contains a few textrels, so we'll skip that complexity. %ifdef WIN64 %define PIC %elifndef ARCH_X86_64 %undef PIC %endif %ifdef PIC %define GLOBAL wrt rip %else %define GLOBAL %endif ; Macros to eliminate most code duplication between x86_32 and x86_64: ; Currently this works only for leaf functions which load all their arguments ; into registers at the start, and make no other use of the stack. Luckily that ; covers most of x264's asm. ; PROLOGUE: ; %1 = number of arguments. loads them from stack if needed. ; %2 = number of registers used. pushes callee-saved regs if needed. ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed. ; %4 = list of names to define to registers ; PROLOGUE can also be invoked by adding the same options to cglobal ; e.g. ; cglobal foo, 2,3,0, dst, src, tmp ; declares a function (foo), taking two args (dst and src) and one local variable (tmp) ; TODO Some functions can use some args directly from the stack. If they're the ; last args then you can just not declare them, but if they're in the middle ; we need more flexible macro. ; RET: ; Pops anything that was pushed by PROLOGUE ; REP_RET: ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons ; which are slow when a normal ret follows a branch. ; registers: ; rN and rNq are the native-size register holding function argument N ; rNd, rNw, rNb are dword, word, and byte size ; rNm is the original location of arg N (a register or on the stack), dword ; rNmp is native size %macro DECLARE_REG 6 %define r%1q %2 %define r%1d %3 %define r%1w %4 %define r%1b %5 %define r%1m %6 %ifid %6 ; i.e. it's a register %define r%1mp %2 %elifdef ARCH_X86_64 ; memory %define r%1mp qword %6 %else %define r%1mp dword %6 %endif %define r%1 %2 %endmacro %macro DECLARE_REG_SIZE 2 %define r%1q r%1 %define e%1q r%1 %define r%1d e%1 %define e%1d e%1 %define r%1w %1 %define e%1w %1 %define r%1b %2 %define e%1b %2 %ifndef ARCH_X86_64 %define r%1 e%1 %endif %endmacro DECLARE_REG_SIZE ax, al DECLARE_REG_SIZE bx, bl DECLARE_REG_SIZE cx, cl DECLARE_REG_SIZE dx, dl DECLARE_REG_SIZE si, sil DECLARE_REG_SIZE di, dil DECLARE_REG_SIZE bp, bpl ; t# defines for when per-arch register allocation is more complex than just function arguments %macro DECLARE_REG_TMP 1-* %assign %%i 0 %rep %0 CAT_XDEFINE t, %%i, r%1 %assign %%i %%i+1 %rotate 1 %endrep %endmacro %macro DECLARE_REG_TMP_SIZE 0-* %rep %0 %define t%1q t%1 %+ q %define t%1d t%1 %+ d %define t%1w t%1 %+ w %define t%1b t%1 %+ b %rotate 1 %endrep %endmacro DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7 %ifdef ARCH_X86_64 %define gprsize 8 %else %define gprsize 4 %endif %macro PUSH 1 push %1 %assign stack_offset stack_offset+gprsize %endmacro %macro POP 1 pop %1 %assign stack_offset stack_offset-gprsize %endmacro %macro SUB 2 sub %1, %2 %ifidn %1, rsp %assign stack_offset stack_offset+(%2) %endif %endmacro %macro ADD 2 add %1, %2 %ifidn %1, rsp %assign stack_offset stack_offset-(%2) %endif %endmacro %macro movifnidn 2 %ifnidn %1, %2 mov %1, %2 %endif %endmacro %macro movsxdifnidn 2 %ifnidn %1, %2 movsxd %1, %2 %endif %endmacro %macro ASSERT 1 %if (%1) == 0 %error assert failed %endif %endmacro %macro DEFINE_ARGS 0-* %ifdef n_arg_names %assign %%i 0 %rep n_arg_names CAT_UNDEF arg_name %+ %%i, q CAT_UNDEF arg_name %+ %%i, d CAT_UNDEF arg_name %+ %%i, w CAT_UNDEF arg_name %+ %%i, b CAT_UNDEF arg_name %+ %%i, m CAT_UNDEF arg_name, %%i %assign %%i %%i+1 %endrep %endif %assign %%i 0 %rep %0 %xdefine %1q r %+ %%i %+ q %xdefine %1d r %+ %%i %+ d %xdefine %1w r %+ %%i %+ w %xdefine %1b r %+ %%i %+ b %xdefine %1m r %+ %%i %+ m CAT_XDEFINE arg_name, %%i, %1 %assign %%i %%i+1 %rotate 1 %endrep %assign n_arg_names %%i %endmacro %ifdef WIN64 ; Windows x64 ;================================================= DECLARE_REG 0, rcx, ecx, cx, cl, ecx DECLARE_REG 1, rdx, edx, dx, dl, edx DECLARE_REG 2, r8, r8d, r8w, r8b, r8d DECLARE_REG 3, r9, r9d, r9w, r9b, r9d DECLARE_REG 4, rdi, edi, di, dil, [rsp + stack_offset + 40] DECLARE_REG 5, rsi, esi, si, sil, [rsp + stack_offset + 48] DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 56] %define r7m [rsp + stack_offset + 64] %define r8m [rsp + stack_offset + 72] %macro LOAD_IF_USED 2 ; reg_id, number_of_args %if %1 < %2 mov r%1, [rsp + stack_offset + 8 + %1*8] %endif %endmacro %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... ASSERT %2 >= %1 %assign regs_used %2 ASSERT regs_used <= 7 %if %0 > 2 %assign xmm_regs_used %3 %else %assign xmm_regs_used 0 %endif ASSERT xmm_regs_used <= 16 %if regs_used > 4 push r4 push r5 %assign stack_offset stack_offset+16 %endif %if xmm_regs_used > 6 sub rsp, (xmm_regs_used-6)*16+16 %assign stack_offset stack_offset+(xmm_regs_used-6)*16+16 %assign %%i xmm_regs_used %rep (xmm_regs_used-6) %assign %%i %%i-1 movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i %endrep %endif LOAD_IF_USED 4, %1 LOAD_IF_USED 5, %1 LOAD_IF_USED 6, %1 DEFINE_ARGS %4 %endmacro %macro RESTORE_XMM_INTERNAL 1 %if xmm_regs_used > 6 %assign %%i xmm_regs_used %rep (xmm_regs_used-6) %assign %%i %%i-1 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8] %endrep add %1, (xmm_regs_used-6)*16+16 %endif %endmacro %macro RESTORE_XMM 1 RESTORE_XMM_INTERNAL %1 %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16 %assign xmm_regs_used 0 %endmacro %macro RET 0 RESTORE_XMM_INTERNAL rsp %if regs_used > 4 pop r5 pop r4 %endif ret %endmacro %macro REP_RET 0 %if regs_used > 4 || xmm_regs_used > 6 RET %else rep ret %endif %endmacro %elifdef ARCH_X86_64 ; *nix x64 ;============================================= DECLARE_REG 0, rdi, edi, di, dil, edi DECLARE_REG 1, rsi, esi, si, sil, esi DECLARE_REG 2, rdx, edx, dx, dl, edx DECLARE_REG 3, rcx, ecx, cx, cl, ecx DECLARE_REG 4, r8, r8d, r8w, r8b, r8d DECLARE_REG 5, r9, r9d, r9w, r9b, r9d DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 8] %define r7m [rsp + stack_offset + 16] %define r8m [rsp + stack_offset + 24] %macro LOAD_IF_USED 2 ; reg_id, number_of_args %if %1 < %2 mov r%1, [rsp - 40 + %1*8] %endif %endmacro %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... ASSERT %2 >= %1 ASSERT %2 <= 7 LOAD_IF_USED 6, %1 DEFINE_ARGS %4 %endmacro %macro RET 0 ret %endmacro %macro REP_RET 0 rep ret %endmacro %else ; X86_32 ;============================================================== DECLARE_REG 0, eax, eax, ax, al, [esp + stack_offset + 4] DECLARE_REG 1, ecx, ecx, cx, cl, [esp + stack_offset + 8] DECLARE_REG 2, edx, edx, dx, dl, [esp + stack_offset + 12] DECLARE_REG 3, ebx, ebx, bx, bl, [esp + stack_offset + 16] DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20] DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24] DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28] %define r7m [esp + stack_offset + 32] %define r8m [esp + stack_offset + 36] %define rsp esp %macro PUSH_IF_USED 1 ; reg_id %if %1 < regs_used push r%1 %assign stack_offset stack_offset+4 %endif %endmacro %macro POP_IF_USED 1 ; reg_id %if %1 < regs_used pop r%1 %endif %endmacro %macro LOAD_IF_USED 2 ; reg_id, number_of_args %if %1 < %2 mov r%1, [esp + stack_offset + 4 + %1*4] %endif %endmacro %macro PROLOGUE 2-4+ ; #args, #regs, arg_names... ASSERT %2 >= %1 %assign regs_used %2 ASSERT regs_used <= 7 PUSH_IF_USED 3 PUSH_IF_USED 4 PUSH_IF_USED 5 PUSH_IF_USED 6 LOAD_IF_USED 0, %1 LOAD_IF_USED 1, %1 LOAD_IF_USED 2, %1 LOAD_IF_USED 3, %1 LOAD_IF_USED 4, %1 LOAD_IF_USED 5, %1 LOAD_IF_USED 6, %1 DEFINE_ARGS %4 %endmacro %macro RET 0 POP_IF_USED 6 POP_IF_USED 5 POP_IF_USED 4 POP_IF_USED 3 ret %endmacro %macro REP_RET 0 %if regs_used > 3 RET %else rep ret %endif %endmacro %endif ;====================================================================== ;============================================================================= ; arch-independent part ;============================================================================= %assign function_align 16 ; Symbol prefix for C linkage %macro cglobal 1-2+ %xdefine %1 ff_%1 %ifdef PREFIX %xdefine %1 _ %+ %1 %endif %xdefine %1.skip_prologue %1 %+ .skip_prologue %ifidn __OUTPUT_FORMAT__,elf global %1:function hidden %else global %1 %endif align function_align %1: RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer %assign stack_offset 0 %if %0 > 1 PROLOGUE %2 %endif %endmacro %macro cextern 1 %ifdef PREFIX %xdefine %1 _%1 %endif extern %1 %endmacro ; This is needed for ELF, otherwise the GNU linker assumes the stack is ; executable by default. %ifidn __OUTPUT_FORMAT__,elf SECTION .note.GNU-stack noalloc noexec nowrite progbits %endif %assign FENC_STRIDE 16 %assign FDEC_STRIDE 32 ; merge mmx and sse* %macro CAT_XDEFINE 3 %xdefine %1%2 %3 %endmacro %macro CAT_UNDEF 2 %undef %1%2 %endmacro %macro INIT_MMX 0 %define RESET_MM_PERMUTATION INIT_MMX %define mmsize 8 %define num_mmregs 8 %define mova movq %define movu movq %define movh movd %define movnt movntq %assign %%i 0 %rep 8 CAT_XDEFINE m, %%i, mm %+ %%i CAT_XDEFINE nmm, %%i, %%i %assign %%i %%i+1 %endrep %rep 8 CAT_UNDEF m, %%i CAT_UNDEF nmm, %%i %assign %%i %%i+1 %endrep %endmacro %macro INIT_XMM 0 %define RESET_MM_PERMUTATION INIT_XMM %define mmsize 16 %define num_mmregs 8 %ifdef ARCH_X86_64 %define num_mmregs 16 %endif %define mova movdqa %define movu movdqu %define movh movq %define movnt movntdq %assign %%i 0 %rep num_mmregs CAT_XDEFINE m, %%i, xmm %+ %%i CAT_XDEFINE nxmm, %%i, %%i %assign %%i %%i+1 %endrep %endmacro INIT_MMX ; I often want to use macros that permute their arguments. e.g. there's no ; efficient way to implement butterfly or transpose or dct without swapping some ; arguments. ; ; I would like to not have to manually keep track of the permutations: ; If I insert a permutation in the middle of a function, it should automatically ; change everything that follows. For more complex macros I may also have multiple ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations. ; ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that ; permutes its arguments. It's equivalent to exchanging the contents of the ; registers, except that this way you exchange the register names instead, so it ; doesn't cost any cycles. %macro PERMUTE 2-* ; takes a list of pairs to swap %rep %0/2 %xdefine tmp%2 m%2 %xdefine ntmp%2 nm%2 %rotate 2 %endrep %rep %0/2 %xdefine m%1 tmp%2 %xdefine nm%1 ntmp%2 %undef tmp%2 %undef ntmp%2 %rotate 2 %endrep %endmacro %macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs) %rep %0-1 %ifdef m%1 %xdefine tmp m%1 %xdefine m%1 m%2 %xdefine m%2 tmp CAT_XDEFINE n, m%1, %1 CAT_XDEFINE n, m%2, %2 %else ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here. ; Be careful using this mode in nested macros though, as in some cases there may be ; other copies of m# that have already been dereferenced and don't get updated correctly. %xdefine %%n1 n %+ %1 %xdefine %%n2 n %+ %2 %xdefine tmp m %+ %%n1 CAT_XDEFINE m, %%n1, m %+ %%n2 CAT_XDEFINE m, %%n2, tmp CAT_XDEFINE n, m %+ %%n1, %%n1 CAT_XDEFINE n, m %+ %%n2, %%n2 %endif %undef tmp %rotate 1 %endrep %endmacro %macro SAVE_MM_PERMUTATION 1 %assign %%i 0 %rep num_mmregs CAT_XDEFINE %1_m, %%i, m %+ %%i %assign %%i %%i+1 %endrep %endmacro %macro LOAD_MM_PERMUTATION 1 %assign %%i 0 %rep num_mmregs CAT_XDEFINE m, %%i, %1_m %+ %%i CAT_XDEFINE n, m %+ %%i, %%i %assign %%i %%i+1 %endrep %endmacro %macro call 1 call %1 %ifdef %1_m0 LOAD_MM_PERMUTATION %1 %endif %endmacro ;Substitutions that reduce instruction size but are functionally equivalent %macro add 2 %ifnum %2 %if %2==128 sub %1, -128 %else add %1, %2 %endif %else add %1, %2 %endif %endmacro %macro sub 2 %ifnum %2 %if %2==128 add %1, -128 %else sub %1, %2 %endif %else sub %1, %2 %endif %endmacro
123linslouis-android-video-cutter
jni/libavcodec/x86/x86inc.asm
Assembly
asf20
15,737
/* * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3 * Copyright (c) 2004 Michael Niedermayer * * MMX optimization by Michael Niedermayer <michaelni@gmx.at> * 3DNow! and SSSE3 optimization by Zuxy Meng <zuxy.meng@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0)) static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale) { x86_reg i=0; assert(FFABS(scale) < MAX_ABS); scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; SET_RND(mm6); __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movd %4, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" ASMALIGN(4) "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) "paddw (%2, %0), %%mm0 \n\t" "paddw 8(%2, %0), %%mm1 \n\t" "psraw $6, %%mm0 \n\t" "psraw $6, %%mm1 \n\t" "pmullw (%3, %0), %%mm0 \n\t" "pmullw 8(%3, %0), %%mm1 \n\t" "pmaddwd %%mm0, %%mm0 \n\t" "pmaddwd %%mm1, %%mm1 \n\t" "paddd %%mm1, %%mm0 \n\t" "psrld $4, %%mm0 \n\t" "paddd %%mm0, %%mm7 \n\t" "add $16, %0 \n\t" "cmp $128, %0 \n\t" //FIXME optimize & bench " jb 1b \n\t" PHADDD(%%mm7, %%mm6) "psrld $2, %%mm7 \n\t" "movd %%mm7, %0 \n\t" : "+r" (i) : "r"(basis), "r"(rem), "r"(weight), "g"(scale) ); return i; } static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale) { x86_reg i=0; if(FFABS(scale) < MAX_ABS){ scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; SET_RND(mm6); __asm__ volatile( "movd %3, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" "punpcklwd %%mm5, %%mm5 \n\t" ASMALIGN(4) "1: \n\t" "movq (%1, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) "paddw (%2, %0), %%mm0 \n\t" "paddw 8(%2, %0), %%mm1 \n\t" "movq %%mm0, (%2, %0) \n\t" "movq %%mm1, 8(%2, %0) \n\t" "add $16, %0 \n\t" "cmp $128, %0 \n\t" // FIXME optimize & bench " jb 1b \n\t" : "+r" (i) : "r"(basis), "r"(rem), "g"(scale) ); }else{ for(i=0; i<8*8; i++){ rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); } } }
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_mmx_qns_template.c
C
asf20
3,803
/** * @file * SSE2-optimized functions for the VP6 decoder * * Copyright (C) 2009 Zuxy Meng <zuxy.meng@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #include "vp6dsp_sse2.h" #define DIAG4_SSE2(in1,in2,in3,in4) \ "movq "#in1"(%0), %%xmm0 \n\t" \ "movq "#in2"(%0), %%xmm1 \n\t" \ "punpcklbw %%xmm7, %%xmm0 \n\t" \ "punpcklbw %%xmm7, %%xmm1 \n\t" \ "pmullw %%xmm4, %%xmm0 \n\t" /* src[x-8 ] * biweight [0] */ \ "pmullw %%xmm5, %%xmm1 \n\t" /* src[x ] * biweight [1] */ \ "paddw %%xmm1, %%xmm0 \n\t" \ "movq "#in3"(%0), %%xmm1 \n\t" \ "movq "#in4"(%0), %%xmm2 \n\t" \ "punpcklbw %%xmm7, %%xmm1 \n\t" \ "punpcklbw %%xmm7, %%xmm2 \n\t" \ "pmullw %%xmm6, %%xmm1 \n\t" /* src[x+8 ] * biweight [2] */ \ "pmullw %%xmm3, %%xmm2 \n\t" /* src[x+16] * biweight [3] */ \ "paddw %%xmm2, %%xmm1 \n\t" \ "paddsw %%xmm1, %%xmm0 \n\t" \ "paddsw "MANGLE(ff_pw_64)", %%xmm0 \n\t" /* Add 64 */ \ "psraw $7, %%xmm0 \n\t" \ "packuswb %%xmm0, %%xmm0 \n\t" \ "movq %%xmm0, (%1) \n\t" \ void ff_vp6_filter_diag4_sse2(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights,const int16_t *v_weights) { uint8_t tmp[8*11], *t = tmp; src -= stride; __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "movq %4, %%xmm3 \n\t" "pshuflw $0, %%xmm3, %%xmm4 \n\t" "punpcklqdq %%xmm4, %%xmm4 \n\t" "pshuflw $85, %%xmm3, %%xmm5 \n\t" "punpcklqdq %%xmm5, %%xmm5 \n\t" "pshuflw $170, %%xmm3, %%xmm6 \n\t" "punpcklqdq %%xmm6, %%xmm6 \n\t" "pshuflw $255, %%xmm3, %%xmm3 \n\t" "punpcklqdq %%xmm3, %%xmm3 \n\t" "1: \n\t" DIAG4_SSE2(-1,0,1,2) "add $8, %1 \n\t" "add %2, %0 \n\t" "decl %3 \n\t" "jnz 1b \n\t" : "+r"(src), "+r"(t) : "g"((x86_reg)stride), "r"(11), "m"(*(const int64_t*)h_weights) : "memory"); t = tmp + 8; __asm__ volatile( "movq %4, %%xmm3 \n\t" "pshuflw $0, %%xmm3, %%xmm4 \n\t" "punpcklqdq %%xmm4, %%xmm4 \n\t" "pshuflw $85, %%xmm3, %%xmm5 \n\t" "punpcklqdq %%xmm5, %%xmm5 \n\t" "pshuflw $170, %%xmm3, %%xmm6 \n\t" "punpcklqdq %%xmm6, %%xmm6 \n\t" "pshuflw $255, %%xmm3, %%xmm3 \n\t" "punpcklqdq %%xmm3, %%xmm3 \n\t" "1: \n\t" DIAG4_SSE2(-8,0,8,16) "add $8, %0 \n\t" "add %2, %1 \n\t" "decl %3 \n\t" "jnz 1b \n\t" : "+r"(t), "+r"(dst) : "g"((x86_reg)stride), "r"(8), "m"(*(const int64_t*)v_weights) : "memory"); }
123linslouis-android-video-cutter
jni/libavcodec/x86/vp6dsp_sse2.c
C
asf20
4,504
/* * MMX and SSE2 optimized snow DSP utils * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/avcodec.h" #include "libavcodec/snow.h" #include "libavcodec/dwt.h" #include "dsputil_mmx.h" static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){ const int w2= (width+1)>>1; DECLARE_ALIGNED(16, IDWTELEM, temp)[width>>1]; const int w_l= (width>>1); const int w_r= w2 - 1; int i; { // Lift 0 IDWTELEM * const ref = b + w2 - 1; IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice // (the first time erroneously), we allow the SSE2 code to run an extra pass. // The savings in code and time are well worth having to store this value and // calculate b[0] correctly afterwards. i = 0; __asm__ volatile( "pcmpeqd %%xmm7, %%xmm7 \n\t" "pcmpeqd %%xmm3, %%xmm3 \n\t" "psllw $1, %%xmm3 \n\t" "paddw %%xmm7, %%xmm3 \n\t" "psllw $13, %%xmm3 \n\t" ::); for(; i<w_l-15; i+=16){ __asm__ volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 2(%1), %%xmm2 \n\t" "movdqu 18(%1), %%xmm6 \n\t" "paddw %%xmm1, %%xmm2 \n\t" "paddw %%xmm5, %%xmm6 \n\t" "paddw %%xmm7, %%xmm2 \n\t" "paddw %%xmm7, %%xmm6 \n\t" "pmulhw %%xmm3, %%xmm2 \n\t" "pmulhw %%xmm3, %%xmm6 \n\t" "paddw (%0), %%xmm2 \n\t" "paddw 16(%0), %%xmm6 \n\t" "movdqa %%xmm2, (%0) \n\t" "movdqa %%xmm6, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); } { // Lift 1 IDWTELEM * const dst = b+w2; i = 0; for(; (((x86_reg)&dst[i]) & 0x1F) && i<w_r; i++){ dst[i] = dst[i] - (b[i] + b[i + 1]); } for(; i<w_r-15; i+=16){ __asm__ volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 2(%1), %%xmm2 \n\t" "movdqu 18(%1), %%xmm6 \n\t" "paddw %%xmm1, %%xmm2 \n\t" "paddw %%xmm5, %%xmm6 \n\t" "movdqa (%0), %%xmm0 \n\t" "movdqa 16(%0), %%xmm4 \n\t" "psubw %%xmm2, %%xmm0 \n\t" "psubw %%xmm6, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&dst[i]), "r"(&b[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); } { // Lift 2 IDWTELEM * const ref = b+w2 - 1; IDWTELEM b_0 = b[0]; i = 0; __asm__ volatile( "psllw $15, %%xmm7 \n\t" "pcmpeqw %%xmm6, %%xmm6 \n\t" "psrlw $13, %%xmm6 \n\t" "paddw %%xmm7, %%xmm6 \n\t" ::); for(; i<w_l-15; i+=16){ __asm__ volatile( "movdqu (%1), %%xmm0 \n\t" "movdqu 16(%1), %%xmm4 \n\t" "movdqu 2(%1), %%xmm1 \n\t" "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts "paddw %%xmm6, %%xmm0 \n\t" "paddw %%xmm6, %%xmm4 \n\t" "paddw %%xmm7, %%xmm1 \n\t" "paddw %%xmm7, %%xmm5 \n\t" "pavgw %%xmm1, %%xmm0 \n\t" "pavgw %%xmm5, %%xmm4 \n\t" "psubw %%xmm7, %%xmm0 \n\t" "psubw %%xmm7, %%xmm4 \n\t" "psraw $1, %%xmm0 \n\t" "psraw $1, %%xmm4 \n\t" "movdqa (%0), %%xmm1 \n\t" "movdqa 16(%0), %%xmm5 \n\t" "paddw %%xmm1, %%xmm0 \n\t" "paddw %%xmm5, %%xmm4 \n\t" "psraw $2, %%xmm0 \n\t" "psraw $2, %%xmm4 \n\t" "paddw %%xmm1, %%xmm0 \n\t" "paddw %%xmm5, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS); } { // Lift 3 IDWTELEM * const src = b+w2; i = 0; for(; (((x86_reg)&temp[i]) & 0x1F) && i<w_r; i++){ temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS); } for(; i<w_r-7; i+=8){ __asm__ volatile( "movdqu 2(%1), %%xmm2 \n\t" "movdqu 18(%1), %%xmm6 \n\t" "paddw (%1), %%xmm2 \n\t" "paddw 16(%1), %%xmm6 \n\t" "movdqu (%0), %%xmm0 \n\t" "movdqu 16(%0), %%xmm4 \n\t" "paddw %%xmm2, %%xmm0 \n\t" "paddw %%xmm6, %%xmm4 \n\t" "psraw $1, %%xmm2 \n\t" "psraw $1, %%xmm6 \n\t" "paddw %%xmm0, %%xmm2 \n\t" "paddw %%xmm4, %%xmm6 \n\t" "movdqa %%xmm2, (%2) \n\t" "movdqa %%xmm6, 16(%2) \n\t" :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); } { snow_interleave_line_header(&i, width, b, temp); for (; (i & 0x3E) != 0x3E; i-=2){ b[i+1] = temp[i>>1]; b[i] = b[i>>1]; } for (i-=62; i>=0; i-=64){ __asm__ volatile( "movdqa (%1), %%xmm0 \n\t" "movdqa 16(%1), %%xmm2 \n\t" "movdqa 32(%1), %%xmm4 \n\t" "movdqa 48(%1), %%xmm6 \n\t" "movdqa (%1), %%xmm1 \n\t" "movdqa 16(%1), %%xmm3 \n\t" "movdqa 32(%1), %%xmm5 \n\t" "movdqa 48(%1), %%xmm7 \n\t" "punpcklwd (%2), %%xmm0 \n\t" "punpcklwd 16(%2), %%xmm2 \n\t" "punpcklwd 32(%2), %%xmm4 \n\t" "punpcklwd 48(%2), %%xmm6 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm2, 32(%0) \n\t" "movdqa %%xmm4, 64(%0) \n\t" "movdqa %%xmm6, 96(%0) \n\t" "punpckhwd (%2), %%xmm1 \n\t" "punpckhwd 16(%2), %%xmm3 \n\t" "punpckhwd 32(%2), %%xmm5 \n\t" "punpckhwd 48(%2), %%xmm7 \n\t" "movdqa %%xmm1, 16(%0) \n\t" "movdqa %%xmm3, 48(%0) \n\t" "movdqa %%xmm5, 80(%0) \n\t" "movdqa %%xmm7, 112(%0) \n\t" :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) : "memory" ); } } } static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){ const int w2= (width+1)>>1; IDWTELEM temp[width >> 1]; const int w_l= (width>>1); const int w_r= w2 - 1; int i; { // Lift 0 IDWTELEM * const ref = b + w2 - 1; i = 1; b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm3, %%mm3 \n\t" "psllw $1, %%mm3 \n\t" "paddw %%mm7, %%mm3 \n\t" "psllw $13, %%mm3 \n\t" ::); for(; i<w_l-7; i+=8){ __asm__ volatile( "movq (%1), %%mm2 \n\t" "movq 8(%1), %%mm6 \n\t" "paddw 2(%1), %%mm2 \n\t" "paddw 10(%1), %%mm6 \n\t" "paddw %%mm7, %%mm2 \n\t" "paddw %%mm7, %%mm6 \n\t" "pmulhw %%mm3, %%mm2 \n\t" "pmulhw %%mm3, %%mm6 \n\t" "paddw (%0), %%mm2 \n\t" "paddw 8(%0), %%mm6 \n\t" "movq %%mm2, (%0) \n\t" "movq %%mm6, 8(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); } { // Lift 1 IDWTELEM * const dst = b+w2; i = 0; for(; i<w_r-7; i+=8){ __asm__ volatile( "movq (%1), %%mm2 \n\t" "movq 8(%1), %%mm6 \n\t" "paddw 2(%1), %%mm2 \n\t" "paddw 10(%1), %%mm6 \n\t" "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm4 \n\t" "psubw %%mm2, %%mm0 \n\t" "psubw %%mm6, %%mm4 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm4, 8(%0) \n\t" :: "r"(&dst[i]), "r"(&b[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); } { // Lift 2 IDWTELEM * const ref = b+w2 - 1; i = 1; b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS); __asm__ volatile( "psllw $15, %%mm7 \n\t" "pcmpeqw %%mm6, %%mm6 \n\t" "psrlw $13, %%mm6 \n\t" "paddw %%mm7, %%mm6 \n\t" ::); for(; i<w_l-7; i+=8){ __asm__ volatile( "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm4 \n\t" "movq 2(%1), %%mm1 \n\t" "movq 10(%1), %%mm5 \n\t" "paddw %%mm6, %%mm0 \n\t" "paddw %%mm6, %%mm4 \n\t" "paddw %%mm7, %%mm1 \n\t" "paddw %%mm7, %%mm5 \n\t" "pavgw %%mm1, %%mm0 \n\t" "pavgw %%mm5, %%mm4 \n\t" "psubw %%mm7, %%mm0 \n\t" "psubw %%mm7, %%mm4 \n\t" "psraw $1, %%mm0 \n\t" "psraw $1, %%mm4 \n\t" "movq (%0), %%mm1 \n\t" "movq 8(%0), %%mm5 \n\t" "paddw %%mm1, %%mm0 \n\t" "paddw %%mm5, %%mm4 \n\t" "psraw $2, %%mm0 \n\t" "psraw $2, %%mm4 \n\t" "paddw %%mm1, %%mm0 \n\t" "paddw %%mm5, %%mm4 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm4, 8(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); } { // Lift 3 IDWTELEM * const src = b+w2; i = 0; for(; i<w_r-7; i+=8){ __asm__ volatile( "movq 2(%1), %%mm2 \n\t" "movq 10(%1), %%mm6 \n\t" "paddw (%1), %%mm2 \n\t" "paddw 8(%1), %%mm6 \n\t" "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm4 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm6, %%mm4 \n\t" "psraw $1, %%mm2 \n\t" "psraw $1, %%mm6 \n\t" "paddw %%mm0, %%mm2 \n\t" "paddw %%mm4, %%mm6 \n\t" "movq %%mm2, (%2) \n\t" "movq %%mm6, 8(%2) \n\t" :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); } { snow_interleave_line_header(&i, width, b, temp); for (; (i & 0x1E) != 0x1E; i-=2){ b[i+1] = temp[i>>1]; b[i] = b[i>>1]; } for (i-=30; i>=0; i-=32){ __asm__ volatile( "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 16(%1), %%mm4 \n\t" "movq 24(%1), %%mm6 \n\t" "movq (%1), %%mm1 \n\t" "movq 8(%1), %%mm3 \n\t" "movq 16(%1), %%mm5 \n\t" "movq 24(%1), %%mm7 \n\t" "punpcklwd (%2), %%mm0 \n\t" "punpcklwd 8(%2), %%mm2 \n\t" "punpcklwd 16(%2), %%mm4 \n\t" "punpcklwd 24(%2), %%mm6 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm2, 16(%0) \n\t" "movq %%mm4, 32(%0) \n\t" "movq %%mm6, 48(%0) \n\t" "punpckhwd (%2), %%mm1 \n\t" "punpckhwd 8(%2), %%mm3 \n\t" "punpckhwd 16(%2), %%mm5 \n\t" "punpckhwd 24(%2), %%mm7 \n\t" "movq %%mm1, 8(%0) \n\t" "movq %%mm3, 24(%0) \n\t" "movq %%mm5, 40(%0) \n\t" "movq %%mm7, 56(%0) \n\t" :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1]) : "memory" ); } } } #if HAVE_7REGS #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\ ""op" ("r",%%"REG_d"), %%"t0" \n\t"\ ""op" 16("r",%%"REG_d"), %%"t1" \n\t"\ ""op" 32("r",%%"REG_d"), %%"t2" \n\t"\ ""op" 48("r",%%"REG_d"), %%"t3" \n\t" #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\ snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3) #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\ snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3) #define snow_vertical_compose_r2r_sub(s0,s1,s2,s3,t0,t1,t2,t3)\ "psubw %%"s0", %%"t0" \n\t"\ "psubw %%"s1", %%"t1" \n\t"\ "psubw %%"s2", %%"t2" \n\t"\ "psubw %%"s3", %%"t3" \n\t" #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\ "movdqa %%"s0", ("w",%%"REG_d") \n\t"\ "movdqa %%"s1", 16("w",%%"REG_d") \n\t"\ "movdqa %%"s2", 32("w",%%"REG_d") \n\t"\ "movdqa %%"s3", 48("w",%%"REG_d") \n\t" #define snow_vertical_compose_sra(n,t0,t1,t2,t3)\ "psraw $"n", %%"t0" \n\t"\ "psraw $"n", %%"t1" \n\t"\ "psraw $"n", %%"t2" \n\t"\ "psraw $"n", %%"t3" \n\t" #define snow_vertical_compose_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\ "paddw %%"s0", %%"t0" \n\t"\ "paddw %%"s1", %%"t1" \n\t"\ "paddw %%"s2", %%"t2" \n\t"\ "paddw %%"s3", %%"t3" \n\t" #define snow_vertical_compose_r2r_pmulhw(s0,s1,s2,s3,t0,t1,t2,t3)\ "pmulhw %%"s0", %%"t0" \n\t"\ "pmulhw %%"s1", %%"t1" \n\t"\ "pmulhw %%"s2", %%"t2" \n\t"\ "pmulhw %%"s3", %%"t3" \n\t" #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\ "movdqa %%"s0", %%"t0" \n\t"\ "movdqa %%"s1", %%"t1" \n\t"\ "movdqa %%"s2", %%"t2" \n\t"\ "movdqa %%"s3", %%"t3" \n\t" static void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ x86_reg i = width; while(i & 0x1F) { i--; b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; } i+=i; __asm__ volatile ( "jmp 2f \n\t" "1: \n\t" snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add("%6","xmm0","xmm2","xmm4","xmm6") "pcmpeqw %%xmm0, %%xmm0 \n\t" "pcmpeqw %%xmm2, %%xmm2 \n\t" "paddw %%xmm2, %%xmm2 \n\t" "paddw %%xmm0, %%xmm2 \n\t" "psllw $13, %%xmm2 \n\t" snow_vertical_compose_r2r_add("xmm0","xmm0","xmm0","xmm0","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_r2r_pmulhw("xmm2","xmm2","xmm2","xmm2","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_add("%5","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_store("%5","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add("%3","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_r2r_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_store("%4","xmm0","xmm2","xmm4","xmm6") "pcmpeqw %%xmm7, %%xmm7 \n\t" "pcmpeqw %%xmm5, %%xmm5 \n\t" "psllw $15, %%xmm7 \n\t" "psrlw $13, %%xmm5 \n\t" "paddw %%xmm7, %%xmm5 \n\t" snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6") "movq (%2,%%"REG_d"), %%xmm1 \n\t" "movq 8(%2,%%"REG_d"), %%xmm3 \n\t" "paddw %%xmm7, %%xmm1 \n\t" "paddw %%xmm7, %%xmm3 \n\t" "pavgw %%xmm1, %%xmm0 \n\t" "pavgw %%xmm3, %%xmm2 \n\t" "movq 16(%2,%%"REG_d"), %%xmm1 \n\t" "movq 24(%2,%%"REG_d"), %%xmm3 \n\t" "paddw %%xmm7, %%xmm1 \n\t" "paddw %%xmm7, %%xmm3 \n\t" "pavgw %%xmm1, %%xmm4 \n\t" "pavgw %%xmm3, %%xmm6 \n\t" snow_vertical_compose_r2r_sub("xmm7","xmm7","xmm7","xmm7","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sra("2","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_store("%3","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add("%1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7") snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_add("%2","xmm0","xmm2","xmm4","xmm6") snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6") "2: \n\t" "sub $64, %%"REG_d" \n\t" "jge 1b \n\t" :"+d"(i) :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5)); } #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\ ""op" ("r",%%"REG_d"), %%"t0" \n\t"\ ""op" 8("r",%%"REG_d"), %%"t1" \n\t"\ ""op" 16("r",%%"REG_d"), %%"t2" \n\t"\ ""op" 24("r",%%"REG_d"), %%"t3" \n\t" #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\ snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3) #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\ snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3) #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\ "movq %%"s0", ("w",%%"REG_d") \n\t"\ "movq %%"s1", 8("w",%%"REG_d") \n\t"\ "movq %%"s2", 16("w",%%"REG_d") \n\t"\ "movq %%"s3", 24("w",%%"REG_d") \n\t" #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\ "movq %%"s0", %%"t0" \n\t"\ "movq %%"s1", %%"t1" \n\t"\ "movq %%"s2", %%"t2" \n\t"\ "movq %%"s3", %%"t3" \n\t" static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ x86_reg i = width; while(i & 15) { i--; b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; } i+=i; __asm__ volatile( "jmp 2f \n\t" "1: \n\t" snow_vertical_compose_mmx_load("%4","mm1","mm3","mm5","mm7") snow_vertical_compose_mmx_add("%6","mm1","mm3","mm5","mm7") "pcmpeqw %%mm0, %%mm0 \n\t" "pcmpeqw %%mm2, %%mm2 \n\t" "paddw %%mm2, %%mm2 \n\t" "paddw %%mm0, %%mm2 \n\t" "psllw $13, %%mm2 \n\t" snow_vertical_compose_r2r_add("mm0","mm0","mm0","mm0","mm1","mm3","mm5","mm7") snow_vertical_compose_r2r_pmulhw("mm2","mm2","mm2","mm2","mm1","mm3","mm5","mm7") snow_vertical_compose_mmx_add("%5","mm1","mm3","mm5","mm7") snow_vertical_compose_mmx_store("%5","mm1","mm3","mm5","mm7") snow_vertical_compose_mmx_load("%4","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_add("%3","mm1","mm3","mm5","mm7") snow_vertical_compose_r2r_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_store("%4","mm0","mm2","mm4","mm6") "pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm5, %%mm5 \n\t" "psllw $15, %%mm7 \n\t" "psrlw $13, %%mm5 \n\t" "paddw %%mm7, %%mm5 \n\t" snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6") "movq (%2,%%"REG_d"), %%mm1 \n\t" "movq 8(%2,%%"REG_d"), %%mm3 \n\t" "paddw %%mm7, %%mm1 \n\t" "paddw %%mm7, %%mm3 \n\t" "pavgw %%mm1, %%mm0 \n\t" "pavgw %%mm3, %%mm2 \n\t" "movq 16(%2,%%"REG_d"), %%mm1 \n\t" "movq 24(%2,%%"REG_d"), %%mm3 \n\t" "paddw %%mm7, %%mm1 \n\t" "paddw %%mm7, %%mm3 \n\t" "pavgw %%mm1, %%mm4 \n\t" "pavgw %%mm3, %%mm6 \n\t" snow_vertical_compose_r2r_sub("mm7","mm7","mm7","mm7","mm0","mm2","mm4","mm6") snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6") snow_vertical_compose_sra("2","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_store("%3","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_add("%1","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7") snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6") snow_vertical_compose_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_add("%2","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6") "2: \n\t" "sub $32, %%"REG_d" \n\t" "jge 1b \n\t" :"+d"(i) :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5)); } #endif //HAVE_7REGS #define snow_inner_add_yblock_sse2_header \ IDWTELEM * * dst_array = sb->line + src_y;\ x86_reg tmp;\ __asm__ volatile(\ "mov %7, %%"REG_c" \n\t"\ "mov %6, %2 \n\t"\ "mov %4, %%"REG_S" \n\t"\ "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\ "pcmpeqd %%xmm3, %%xmm3 \n\t"\ "psllw $15, %%xmm3 \n\t"\ "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\ "1: \n\t"\ "mov %1, %%"REG_D" \n\t"\ "mov (%%"REG_D"), %%"REG_D" \n\t"\ "add %3, %%"REG_D" \n\t" #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\ "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\ "movq (%%"REG_d"), %%"out_reg1" \n\t"\ "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\ "punpcklbw %%xmm7, %%"out_reg1" \n\t"\ "punpcklbw %%xmm7, %%"out_reg2" \n\t"\ "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\ "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\ "punpcklbw %%xmm7, %%xmm0 \n\t"\ "punpcklbw %%xmm7, %%xmm4 \n\t"\ "pmullw %%xmm0, %%"out_reg1" \n\t"\ "pmullw %%xmm4, %%"out_reg2" \n\t" #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\ "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\ "movq (%%"REG_d"), %%"out_reg1" \n\t"\ "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\ "punpcklbw %%xmm7, %%"out_reg1" \n\t"\ "punpcklbw %%xmm7, %%"out_reg2" \n\t"\ "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\ "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\ "punpcklbw %%xmm7, %%xmm0 \n\t"\ "punpcklbw %%xmm7, %%xmm4 \n\t"\ "pmullw %%xmm0, %%"out_reg1" \n\t"\ "pmullw %%xmm4, %%"out_reg2" \n\t" #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \ snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\ "paddusw %%xmm2, %%xmm1 \n\t"\ "paddusw %%xmm6, %%xmm5 \n\t" #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \ snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\ "paddusw %%xmm2, %%xmm1 \n\t"\ "paddusw %%xmm6, %%xmm5 \n\t" #define snow_inner_add_yblock_sse2_end_common1\ "add $32, %%"REG_S" \n\t"\ "add %%"REG_c", %0 \n\t"\ "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\ "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\ "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\ "add %%"REG_c", (%%"REG_a") \n\t" #define snow_inner_add_yblock_sse2_end_common2\ "jnz 1b \n\t"\ :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ :\ "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\ "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); #define snow_inner_add_yblock_sse2_end_8\ "sal $1, %%"REG_c" \n\t"\ "add $"PTR_SIZE"*2, %1 \n\t"\ snow_inner_add_yblock_sse2_end_common1\ "sar $1, %%"REG_c" \n\t"\ "sub $2, %2 \n\t"\ snow_inner_add_yblock_sse2_end_common2 #define snow_inner_add_yblock_sse2_end_16\ "add $"PTR_SIZE"*1, %1 \n\t"\ snow_inner_add_yblock_sse2_end_common1\ "dec %2 \n\t"\ snow_inner_add_yblock_sse2_end_common2 static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ snow_inner_add_yblock_sse2_header snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0") snow_inner_add_yblock_sse2_accum_8("2", "8") snow_inner_add_yblock_sse2_accum_8("1", "128") snow_inner_add_yblock_sse2_accum_8("0", "136") "mov %0, %%"REG_d" \n\t" "movdqa (%%"REG_D"), %%xmm0 \n\t" "movdqa %%xmm1, %%xmm2 \n\t" "punpckhwd %%xmm7, %%xmm1 \n\t" "punpcklwd %%xmm7, %%xmm2 \n\t" "paddd %%xmm2, %%xmm0 \n\t" "movdqa 16(%%"REG_D"), %%xmm2 \n\t" "paddd %%xmm1, %%xmm2 \n\t" "paddd %%xmm3, %%xmm0 \n\t" "paddd %%xmm3, %%xmm2 \n\t" "mov %1, %%"REG_D" \n\t" "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t" "add %3, %%"REG_D" \n\t" "movdqa (%%"REG_D"), %%xmm4 \n\t" "movdqa %%xmm5, %%xmm6 \n\t" "punpckhwd %%xmm7, %%xmm5 \n\t" "punpcklwd %%xmm7, %%xmm6 \n\t" "paddd %%xmm6, %%xmm4 \n\t" "movdqa 16(%%"REG_D"), %%xmm6 \n\t" "paddd %%xmm5, %%xmm6 \n\t" "paddd %%xmm3, %%xmm4 \n\t" "paddd %%xmm3, %%xmm6 \n\t" "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */ "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */ "packssdw %%xmm2, %%xmm0 \n\t" "packuswb %%xmm7, %%xmm0 \n\t" "movq %%xmm0, (%%"REG_d") \n\t" "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */ "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */ "packssdw %%xmm6, %%xmm4 \n\t" "packuswb %%xmm7, %%xmm4 \n\t" "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t" snow_inner_add_yblock_sse2_end_8 } static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ snow_inner_add_yblock_sse2_header snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0") snow_inner_add_yblock_sse2_accum_16("2", "16") snow_inner_add_yblock_sse2_accum_16("1", "512") snow_inner_add_yblock_sse2_accum_16("0", "528") "mov %0, %%"REG_d" \n\t" "psrlw $4, %%xmm1 \n\t" "psrlw $4, %%xmm5 \n\t" "paddw (%%"REG_D"), %%xmm1 \n\t" "paddw 16(%%"REG_D"), %%xmm5 \n\t" "paddw %%xmm3, %%xmm1 \n\t" "paddw %%xmm3, %%xmm5 \n\t" "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */ "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */ "packuswb %%xmm5, %%xmm1 \n\t" "movdqu %%xmm1, (%%"REG_d") \n\t" snow_inner_add_yblock_sse2_end_16 } #define snow_inner_add_yblock_mmx_header \ IDWTELEM * * dst_array = sb->line + src_y;\ x86_reg tmp;\ __asm__ volatile(\ "mov %7, %%"REG_c" \n\t"\ "mov %6, %2 \n\t"\ "mov %4, %%"REG_S" \n\t"\ "pxor %%mm7, %%mm7 \n\t" /* 0 */\ "pcmpeqd %%mm3, %%mm3 \n\t"\ "psllw $15, %%mm3 \n\t"\ "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\ "1: \n\t"\ "mov %1, %%"REG_D" \n\t"\ "mov (%%"REG_D"), %%"REG_D" \n\t"\ "add %3, %%"REG_D" \n\t" #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\ "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\ "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\ "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\ "punpcklbw %%mm7, %%"out_reg1" \n\t"\ "punpcklbw %%mm7, %%"out_reg2" \n\t"\ "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\ "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\ "punpcklbw %%mm7, %%mm0 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ "pmullw %%mm0, %%"out_reg1" \n\t"\ "pmullw %%mm4, %%"out_reg2" \n\t" #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \ snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\ "paddusw %%mm2, %%mm1 \n\t"\ "paddusw %%mm6, %%mm5 \n\t" #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\ "mov %0, %%"REG_d" \n\t"\ "psrlw $4, %%mm1 \n\t"\ "psrlw $4, %%mm5 \n\t"\ "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\ "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\ "paddw %%mm3, %%mm1 \n\t"\ "paddw %%mm3, %%mm5 \n\t"\ "psraw $4, %%mm1 \n\t"\ "psraw $4, %%mm5 \n\t"\ "packuswb %%mm5, %%mm1 \n\t"\ "movq %%mm1, "write_offset"(%%"REG_d") \n\t" #define snow_inner_add_yblock_mmx_end(s_step)\ "add $"s_step", %%"REG_S" \n\t"\ "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\ "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\ "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\ "add %%"REG_c", (%%"REG_a") \n\t"\ "add $"PTR_SIZE"*1, %1 \n\t"\ "add %%"REG_c", %0 \n\t"\ "dec %2 \n\t"\ "jnz 1b \n\t"\ :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ :\ "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\ "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ snow_inner_add_yblock_mmx_header snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0") snow_inner_add_yblock_mmx_accum("2", "8", "0") snow_inner_add_yblock_mmx_accum("1", "128", "0") snow_inner_add_yblock_mmx_accum("0", "136", "0") snow_inner_add_yblock_mmx_mix("0", "0") snow_inner_add_yblock_mmx_end("16") } static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ snow_inner_add_yblock_mmx_header snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0") snow_inner_add_yblock_mmx_accum("2", "16", "0") snow_inner_add_yblock_mmx_accum("1", "512", "0") snow_inner_add_yblock_mmx_accum("0", "528", "0") snow_inner_add_yblock_mmx_mix("0", "0") snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8") snow_inner_add_yblock_mmx_accum("2", "24", "8") snow_inner_add_yblock_mmx_accum("1", "520", "8") snow_inner_add_yblock_mmx_accum("0", "536", "8") snow_inner_add_yblock_mmx_mix("16", "8") snow_inner_add_yblock_mmx_end("32") } static void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){ if (b_w == 16) inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); else if (b_w == 8 && obmc_stride == 16) { if (!(b_h & 1)) inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); else inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); } else ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); } static void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){ if (b_w == 16) inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); else if (b_w == 8 && obmc_stride == 16) inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); else ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); } void ff_dwt_init_x86(DWTContext *c) { mm_flags = mm_support(); if (mm_flags & FF_MM_MMX) { if(mm_flags & FF_MM_SSE2 & 0){ c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; #if HAVE_7REGS c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; #endif c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; } else{ if(mm_flags & FF_MM_MMX2){ c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; #if HAVE_7REGS c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; #endif } c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; } } }
123linslouis-android-video-cutter
jni/libavcodec/x86/snowdsp_mmx.c
C
asf20
40,317
/* * XVID MPEG-4 VIDEO CODEC * - SSE2 inverse discrete cosine transform - * * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> * * Conversion to gcc syntax with modifications * by Alexander Strange <astrange@ithinksw.com> * * Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid. * * This file is part of FFmpeg. * * Vertical pass is an implementation of the scheme: * Loeffler C., Ligtenberg A., and Moschytz C.S.: * Practical Fast 1D DCT Algorithm with Eleven Multiplications, * Proc. ICASSP 1989, 988-991. * * Horizontal pass is a double 4x4 vector/matrix multiplication, * (see also Intel's Application Note 922: * http://developer.intel.com/vtune/cbts/strmsimd/922down.htm * Copyright (C) 1999 Intel Corporation) * * More details at http://skal.planet-d.net/coding/dct.html * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with FFmpeg; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "idct_xvid.h" #include "dsputil_mmx.h" /*! * @file * @brief SSE2 idct compatible with xvidmmx */ #define X8(x) x,x,x,x,x,x,x,x #define ROW_SHIFT 11 #define COL_SHIFT 6 DECLARE_ASM_CONST(16, int16_t, tan1)[] = {X8(13036)}; // tan( pi/16) DECLARE_ASM_CONST(16, int16_t, tan2)[] = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1 DECLARE_ASM_CONST(16, int16_t, tan3)[] = {X8(43790)}; // tan(3pi/16)-1 DECLARE_ASM_CONST(16, int16_t, sqrt2)[]= {X8(23170)}; // 0.5/sqrt(2) DECLARE_ASM_CONST(8, uint8_t, m127)[] = {X8(127)}; DECLARE_ASM_CONST(16, int16_t, iTab1)[] = { 0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d, 0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61, 0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7, 0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b }; DECLARE_ASM_CONST(16, int16_t, iTab2)[] = { 0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5, 0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04, 0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41, 0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df }; DECLARE_ASM_CONST(16, int16_t, iTab3)[] = { 0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf, 0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf, 0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d, 0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04 }; DECLARE_ASM_CONST(16, int16_t, iTab4)[] = { 0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746, 0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac, 0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df, 0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e }; DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = { 65536, 65536, 65536, 65536, 3597, 3597, 3597, 3597, 2260, 2260, 2260, 2260, 1203, 1203, 1203, 1203, 120, 120, 120, 120, 512, 512, 512, 512 }; // Temporary storage before the column pass #define ROW1 "%%xmm6" #define ROW3 "%%xmm4" #define ROW5 "%%xmm5" #define ROW7 "%%xmm7" #define CLEAR_ODD(r) "pxor "r","r" \n\t" #define PUT_ODD(dst) "pshufhw $0x1B, %%xmm2, "dst" \n\t" #if ARCH_X86_64 # define ROW0 "%%xmm8" # define REG0 ROW0 # define ROW2 "%%xmm9" # define REG2 ROW2 # define ROW4 "%%xmm10" # define REG4 ROW4 # define ROW6 "%%xmm11" # define REG6 ROW6 # define CLEAR_EVEN(r) CLEAR_ODD(r) # define PUT_EVEN(dst) PUT_ODD(dst) # define XMMS "%%xmm12" # define MOV_32_ONLY "#" # define SREG2 REG2 # define TAN3 "%%xmm13" # define TAN1 "%%xmm14" #else # define ROW0 "(%0)" # define REG0 "%%xmm4" # define ROW2 "2*16(%0)" # define REG2 "%%xmm4" # define ROW4 "4*16(%0)" # define REG4 "%%xmm6" # define ROW6 "6*16(%0)" # define REG6 "%%xmm6" # define CLEAR_EVEN(r) # define PUT_EVEN(dst) \ "pshufhw $0x1B, %%xmm2, %%xmm2 \n\t" \ "movdqa %%xmm2, "dst" \n\t" # define XMMS "%%xmm2" # define MOV_32_ONLY "movdqa " # define SREG2 "%%xmm7" # define TAN3 "%%xmm0" # define TAN1 "%%xmm2" #endif #define ROUND(x) "paddd "MANGLE(x) #define JZ(reg, to) \ "testl "reg","reg" \n\t" \ "jz "to" \n\t" #define JNZ(reg, to) \ "testl "reg","reg" \n\t" \ "jnz "to" \n\t" #define TEST_ONE_ROW(src, reg, clear) \ clear \ "movq "src", %%mm1 \n\t" \ "por 8+"src", %%mm1 \n\t" \ "paddusb %%mm0, %%mm1 \n\t" \ "pmovmskb %%mm1, "reg" \n\t" #define TEST_TWO_ROWS(row1, row2, reg1, reg2, clear1, clear2) \ clear1 \ clear2 \ "movq "row1", %%mm1 \n\t" \ "por 8+"row1", %%mm1 \n\t" \ "movq "row2", %%mm2 \n\t" \ "por 8+"row2", %%mm2 \n\t" \ "paddusb %%mm0, %%mm1 \n\t" \ "paddusb %%mm0, %%mm2 \n\t" \ "pmovmskb %%mm1, "reg1" \n\t" \ "pmovmskb %%mm2, "reg2" \n\t" ///IDCT pass on rows. #define iMTX_MULT(src, table, rounder, put) \ "movdqa "src", %%xmm3 \n\t" \ "movdqa %%xmm3, %%xmm0 \n\t" \ "pshufd $0x11, %%xmm3, %%xmm1 \n\t" /* 4602 */ \ "punpcklqdq %%xmm0, %%xmm0 \n\t" /* 0246 */ \ "pmaddwd "table", %%xmm0 \n\t" \ "pmaddwd 16+"table", %%xmm1 \n\t" \ "pshufd $0xBB, %%xmm3, %%xmm2 \n\t" /* 5713 */ \ "punpckhqdq %%xmm3, %%xmm3 \n\t" /* 1357 */ \ "pmaddwd 32+"table", %%xmm2 \n\t" \ "pmaddwd 48+"table", %%xmm3 \n\t" \ "paddd %%xmm1, %%xmm0 \n\t" \ "paddd %%xmm3, %%xmm2 \n\t" \ rounder", %%xmm0 \n\t" \ "movdqa %%xmm2, %%xmm3 \n\t" \ "paddd %%xmm0, %%xmm2 \n\t" \ "psubd %%xmm3, %%xmm0 \n\t" \ "psrad $11, %%xmm2 \n\t" \ "psrad $11, %%xmm0 \n\t" \ "packssdw %%xmm0, %%xmm2 \n\t" \ put \ "1: \n\t" #define iLLM_HEAD \ "movdqa "MANGLE(tan3)", "TAN3" \n\t" \ "movdqa "MANGLE(tan1)", "TAN1" \n\t" \ ///IDCT pass on columns. #define iLLM_PASS(dct) \ "movdqa "TAN3", %%xmm1 \n\t" \ "movdqa "TAN1", %%xmm3 \n\t" \ "pmulhw %%xmm4, "TAN3" \n\t" \ "pmulhw %%xmm5, %%xmm1 \n\t" \ "paddsw %%xmm4, "TAN3" \n\t" \ "paddsw %%xmm5, %%xmm1 \n\t" \ "psubsw %%xmm5, "TAN3" \n\t" \ "paddsw %%xmm4, %%xmm1 \n\t" \ "pmulhw %%xmm7, %%xmm3 \n\t" \ "pmulhw %%xmm6, "TAN1" \n\t" \ "paddsw %%xmm6, %%xmm3 \n\t" \ "psubsw %%xmm7, "TAN1" \n\t" \ "movdqa %%xmm3, %%xmm7 \n\t" \ "movdqa "TAN1", %%xmm6 \n\t" \ "psubsw %%xmm1, %%xmm3 \n\t" \ "psubsw "TAN3", "TAN1" \n\t" \ "paddsw %%xmm7, %%xmm1 \n\t" \ "paddsw %%xmm6, "TAN3" \n\t" \ "movdqa %%xmm3, %%xmm6 \n\t" \ "psubsw "TAN3", %%xmm3 \n\t" \ "paddsw %%xmm6, "TAN3" \n\t" \ "movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \ "pmulhw %%xmm4, %%xmm3 \n\t" \ "pmulhw %%xmm4, "TAN3" \n\t" \ "paddsw "TAN3", "TAN3" \n\t" \ "paddsw %%xmm3, %%xmm3 \n\t" \ "movdqa "MANGLE(tan2)", %%xmm7 \n\t" \ MOV_32_ONLY ROW2", "REG2" \n\t" \ MOV_32_ONLY ROW6", "REG6" \n\t" \ "movdqa %%xmm7, %%xmm5 \n\t" \ "pmulhw "REG6", %%xmm7 \n\t" \ "pmulhw "REG2", %%xmm5 \n\t" \ "paddsw "REG2", %%xmm7 \n\t" \ "psubsw "REG6", %%xmm5 \n\t" \ MOV_32_ONLY ROW0", "REG0" \n\t" \ MOV_32_ONLY ROW4", "REG4" \n\t" \ MOV_32_ONLY" "TAN1", (%0) \n\t" \ "movdqa "REG0", "XMMS" \n\t" \ "psubsw "REG4", "REG0" \n\t" \ "paddsw "XMMS", "REG4" \n\t" \ "movdqa "REG4", "XMMS" \n\t" \ "psubsw %%xmm7, "REG4" \n\t" \ "paddsw "XMMS", %%xmm7 \n\t" \ "movdqa "REG0", "XMMS" \n\t" \ "psubsw %%xmm5, "REG0" \n\t" \ "paddsw "XMMS", %%xmm5 \n\t" \ "movdqa %%xmm5, "XMMS" \n\t" \ "psubsw "TAN3", %%xmm5 \n\t" \ "paddsw "XMMS", "TAN3" \n\t" \ "movdqa "REG0", "XMMS" \n\t" \ "psubsw %%xmm3, "REG0" \n\t" \ "paddsw "XMMS", %%xmm3 \n\t" \ MOV_32_ONLY" (%0), "TAN1" \n\t" \ "psraw $6, %%xmm5 \n\t" \ "psraw $6, "REG0" \n\t" \ "psraw $6, "TAN3" \n\t" \ "psraw $6, %%xmm3 \n\t" \ "movdqa "TAN3", 1*16("dct") \n\t" \ "movdqa %%xmm3, 2*16("dct") \n\t" \ "movdqa "REG0", 5*16("dct") \n\t" \ "movdqa %%xmm5, 6*16("dct") \n\t" \ "movdqa %%xmm7, %%xmm0 \n\t" \ "movdqa "REG4", %%xmm4 \n\t" \ "psubsw %%xmm1, %%xmm7 \n\t" \ "psubsw "TAN1", "REG4" \n\t" \ "paddsw %%xmm0, %%xmm1 \n\t" \ "paddsw %%xmm4, "TAN1" \n\t" \ "psraw $6, %%xmm1 \n\t" \ "psraw $6, %%xmm7 \n\t" \ "psraw $6, "TAN1" \n\t" \ "psraw $6, "REG4" \n\t" \ "movdqa %%xmm1, ("dct") \n\t" \ "movdqa "TAN1", 3*16("dct") \n\t" \ "movdqa "REG4", 4*16("dct") \n\t" \ "movdqa %%xmm7, 7*16("dct") \n\t" ///IDCT pass on columns, assuming rows 4-7 are zero. #define iLLM_PASS_SPARSE(dct) \ "pmulhw %%xmm4, "TAN3" \n\t" \ "paddsw %%xmm4, "TAN3" \n\t" \ "movdqa %%xmm6, %%xmm3 \n\t" \ "pmulhw %%xmm6, "TAN1" \n\t" \ "movdqa %%xmm4, %%xmm1 \n\t" \ "psubsw %%xmm1, %%xmm3 \n\t" \ "paddsw %%xmm6, %%xmm1 \n\t" \ "movdqa "TAN1", %%xmm6 \n\t" \ "psubsw "TAN3", "TAN1" \n\t" \ "paddsw %%xmm6, "TAN3" \n\t" \ "movdqa %%xmm3, %%xmm6 \n\t" \ "psubsw "TAN3", %%xmm3 \n\t" \ "paddsw %%xmm6, "TAN3" \n\t" \ "movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \ "pmulhw %%xmm4, %%xmm3 \n\t" \ "pmulhw %%xmm4, "TAN3" \n\t" \ "paddsw "TAN3", "TAN3" \n\t" \ "paddsw %%xmm3, %%xmm3 \n\t" \ "movdqa "MANGLE(tan2)", %%xmm5 \n\t" \ MOV_32_ONLY ROW2", "SREG2" \n\t" \ "pmulhw "SREG2", %%xmm5 \n\t" \ MOV_32_ONLY ROW0", "REG0" \n\t" \ "movdqa "REG0", %%xmm6 \n\t" \ "psubsw "SREG2", %%xmm6 \n\t" \ "paddsw "REG0", "SREG2" \n\t" \ MOV_32_ONLY" "TAN1", (%0) \n\t" \ "movdqa "REG0", "XMMS" \n\t" \ "psubsw %%xmm5, "REG0" \n\t" \ "paddsw "XMMS", %%xmm5 \n\t" \ "movdqa %%xmm5, "XMMS" \n\t" \ "psubsw "TAN3", %%xmm5 \n\t" \ "paddsw "XMMS", "TAN3" \n\t" \ "movdqa "REG0", "XMMS" \n\t" \ "psubsw %%xmm3, "REG0" \n\t" \ "paddsw "XMMS", %%xmm3 \n\t" \ MOV_32_ONLY" (%0), "TAN1" \n\t" \ "psraw $6, %%xmm5 \n\t" \ "psraw $6, "REG0" \n\t" \ "psraw $6, "TAN3" \n\t" \ "psraw $6, %%xmm3 \n\t" \ "movdqa "TAN3", 1*16("dct") \n\t" \ "movdqa %%xmm3, 2*16("dct") \n\t" \ "movdqa "REG0", 5*16("dct") \n\t" \ "movdqa %%xmm5, 6*16("dct") \n\t" \ "movdqa "SREG2", %%xmm0 \n\t" \ "movdqa %%xmm6, %%xmm4 \n\t" \ "psubsw %%xmm1, "SREG2" \n\t" \ "psubsw "TAN1", %%xmm6 \n\t" \ "paddsw %%xmm0, %%xmm1 \n\t" \ "paddsw %%xmm4, "TAN1" \n\t" \ "psraw $6, %%xmm1 \n\t" \ "psraw $6, "SREG2" \n\t" \ "psraw $6, "TAN1" \n\t" \ "psraw $6, %%xmm6 \n\t" \ "movdqa %%xmm1, ("dct") \n\t" \ "movdqa "TAN1", 3*16("dct") \n\t" \ "movdqa %%xmm6, 4*16("dct") \n\t" \ "movdqa "SREG2", 7*16("dct") \n\t" inline void ff_idct_xvid_sse2(short *block) { __asm__ volatile( "movq "MANGLE(m127)", %%mm0 \n\t" iMTX_MULT("(%0)", MANGLE(iTab1), ROUND(walkenIdctRounders), PUT_EVEN(ROW0)) iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+1*16), PUT_ODD(ROW1)) iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+2*16), PUT_EVEN(ROW2)) TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4)) JZ("%%eax", "1f") iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+3*16), PUT_ODD(ROW3)) TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6)) TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7)) iLLM_HEAD ASMALIGN(4) JNZ("%%ecx", "2f") JNZ("%%eax", "3f") JNZ("%%edx", "4f") JNZ("%%esi", "5f") iLLM_PASS_SPARSE("%0") "jmp 6f \n\t" "2: \n\t" iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4)) "3: \n\t" iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+4*16), PUT_ODD(ROW5)) JZ("%%edx", "1f") "4: \n\t" iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+5*16), PUT_EVEN(ROW6)) JZ("%%esi", "1f") "5: \n\t" iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+5*16), PUT_ODD(ROW7)) #if !ARCH_X86_64 iLLM_HEAD #endif iLLM_PASS("%0") "6: \n\t" : "+r"(block) : : "%eax", "%ecx", "%edx", "%esi", "memory"); } void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block) { ff_idct_xvid_sse2(block); put_pixels_clamped_mmx(block, dest, line_size); } void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block) { ff_idct_xvid_sse2(block); add_pixels_clamped_mmx(block, dest, line_size); }
123linslouis-android-video-cutter
jni/libavcodec/x86/idct_sse2_xvid.c
C
asf20
15,417
/* * vp6dsp MMX function declarations * Copyright (c) 2009 Sebastien Lucas <sebastien.lucas@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_VP6DSP_MMX_H #define AVCODEC_X86_VP6DSP_MMX_H #include <stdint.h> void ff_vp6_filter_diag4_mmx(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights,const int16_t *v_weights); #endif /* AVCODEC_X86_VP6DSP_MMX_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/vp6dsp_mmx.h
C
asf20
1,147
/* * mmx.h * Copyright (C) 1997-2001 H. Dietz and R. Fisher * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_MMX_H #define AVCODEC_X86_MMX_H #warning Everything in this header is deprecated, use plain __asm__()! New code using this header will be rejected. #define mmx_i2r(op,imm,reg) \ __asm__ volatile (#op " %0, %%" #reg \ : /* nothing */ \ : "i" (imm) ) #define mmx_m2r(op,mem,reg) \ __asm__ volatile (#op " %0, %%" #reg \ : /* nothing */ \ : "m" (mem)) #define mmx_r2m(op,reg,mem) \ __asm__ volatile (#op " %%" #reg ", %0" \ : "=m" (mem) \ : /* nothing */ ) #define mmx_r2r(op,regs,regd) \ __asm__ volatile (#op " %" #regs ", %" #regd) #define emms() __asm__ volatile ("emms") #define movd_m2r(var,reg) mmx_m2r (movd, var, reg) #define movd_r2m(reg,var) mmx_r2m (movd, reg, var) #define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd) #define movq_m2r(var,reg) mmx_m2r (movq, var, reg) #define movq_r2m(reg,var) mmx_r2m (movq, reg, var) #define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd) #define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg) #define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd) #define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg) #define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd) #define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg) #define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd) #define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg) #define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd) #define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg) #define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd) #define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg) #define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd) #define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg) #define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd) #define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg) #define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd) #define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg) #define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd) #define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg) #define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd) #define pand_m2r(var,reg) mmx_m2r (pand, var, reg) #define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd) #define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg) #define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd) #define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg) #define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd) #define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg) #define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd) #define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg) #define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd) #define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg) #define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd) #define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg) #define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd) #define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg) #define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd) #define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg) #define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd) #define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg) #define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd) #define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg) #define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd) #define por_m2r(var,reg) mmx_m2r (por, var, reg) #define por_r2r(regs,regd) mmx_r2r (por, regs, regd) #define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg) #define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg) #define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd) #define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg) #define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg) #define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd) #define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg) #define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg) #define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd) #define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg) #define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg) #define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd) #define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg) #define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg) #define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd) #define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg) #define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg) #define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd) #define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg) #define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg) #define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd) #define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg) #define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg) #define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd) #define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg) #define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd) #define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg) #define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd) #define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg) #define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd) #define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg) #define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd) #define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg) #define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd) #define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg) #define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd) #define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg) #define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd) #define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg) #define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd) #define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg) #define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd) #define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg) #define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd) #define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg) #define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd) #define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg) #define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd) #define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg) #define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd) #define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) #define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) /* 3DNOW extensions */ #define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) #define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) /* AMD MMX extensions - also available in intel SSE */ #define mmx_m2ri(op,mem,reg,imm) \ __asm__ volatile (#op " %1, %0, %%" #reg \ : /* nothing */ \ : "m" (mem), "i" (imm)) #define mmx_r2ri(op,regs,regd,imm) \ __asm__ volatile (#op " %0, %%" #regs ", %%" #regd \ : /* nothing */ \ : "i" (imm) ) #define mmx_fetch(mem,hint) \ __asm__ volatile ("prefetch" #hint " %0" \ : /* nothing */ \ : "m" (mem)) #define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) #define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) #define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg) #define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd) #define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg) #define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd) #define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm) #define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm) #define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg) #define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd) #define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg) #define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd) #define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg) #define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd) #define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg) #define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd) #define pmovmskb(mmreg,reg) \ __asm__ volatile ("movmskps %" #mmreg ", %" #reg) #define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg) #define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd) #define prefetcht0(mem) mmx_fetch (mem, t0) #define prefetcht1(mem) mmx_fetch (mem, t1) #define prefetcht2(mem) mmx_fetch (mem, t2) #define prefetchnta(mem) mmx_fetch (mem, nta) #define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg) #define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd) #define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm) #define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm) #define sfence() __asm__ volatile ("sfence\n\t") /* SSE2 */ #define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm) #define pshufhw_r2r(regs,regd,imm) mmx_r2ri(pshufhw, regs, regd, imm) #define pshuflw_m2r(var,reg,imm) mmx_m2ri(pshuflw, var, reg, imm) #define pshuflw_r2r(regs,regd,imm) mmx_r2ri(pshuflw, regs, regd, imm) #define pshufd_r2r(regs,regd,imm) mmx_r2ri(pshufd, regs, regd, imm) #define movdqa_m2r(var,reg) mmx_m2r (movdqa, var, reg) #define movdqa_r2m(reg,var) mmx_r2m (movdqa, reg, var) #define movdqa_r2r(regs,regd) mmx_r2r (movdqa, regs, regd) #define movdqu_m2r(var,reg) mmx_m2r (movdqu, var, reg) #define movdqu_r2m(reg,var) mmx_r2m (movdqu, reg, var) #define movdqu_r2r(regs,regd) mmx_r2r (movdqu, regs, regd) #define pmullw_r2m(reg,var) mmx_r2m (pmullw, reg, var) #define pslldq_i2r(imm,reg) mmx_i2r (pslldq, imm, reg) #define psrldq_i2r(imm,reg) mmx_i2r (psrldq, imm, reg) #define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd) #define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd) #endif /* AVCODEC_X86_MMX_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/mmx.h
C
asf20
13,125
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "fft.h" av_cold void ff_fft_init_mmx(FFTContext *s) { #if HAVE_YASM int has_vectors = mm_support(); if (has_vectors & FF_MM_SSE && HAVE_SSE) { /* SSE for P3/P4/K8 */ s->imdct_calc = ff_imdct_calc_sse; s->imdct_half = ff_imdct_half_sse; s->fft_permute = ff_fft_permute_sse; s->fft_calc = ff_fft_calc_sse; } else if (has_vectors & FF_MM_3DNOWEXT && HAVE_AMD3DNOWEXT) { /* 3DNowEx for K7 */ s->imdct_calc = ff_imdct_calc_3dn2; s->imdct_half = ff_imdct_half_3dn2; s->fft_calc = ff_fft_calc_3dn2; } else if (has_vectors & FF_MM_3DNOW && HAVE_AMD3DNOW) { /* 3DNow! for K6-2/3 */ s->imdct_calc = ff_imdct_calc_3dn; s->imdct_half = ff_imdct_half_3dn; s->fft_calc = ff_fft_calc_3dn; } #endif }
123linslouis-android-video-cutter
jni/libavcodec/x86/fft.c
C
asf20
1,624
/* * vp6dsp SSE2 function declarations * Copyright (c) 2009 Zuxy Meng <zuxy.meng@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_VP6DSP_SSE2_H #define AVCODEC_X86_VP6DSP_SSE2_H #include <stdint.h> void ff_vp6_filter_diag4_sse2(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights,const int16_t *v_weights); #endif /* AVCODEC_X86_VP6DSP_SSE2_H */
123linslouis-android-video-cutter
jni/libavcodec/x86/vp6dsp_sse2.h
C
asf20
1,140
/* * DSP utils mmx functions are compiled twice for rnd/no_rnd * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at> * * MMX optimization by Nick Kurshev <nickols_k@mail.ru> * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> * and improved by Zdenek Kabelac <kabi@users.sf.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ // put_pixels static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" "movq 1(%1, %3), %%mm3 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" "movq 1(%1, %3), %%mm3 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) :REG_a, "memory"); } static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { MOVQ_BFE(mm6); __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "add %4, %1 \n\t" "add $8, %2 \n\t" PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) "movq %%mm4, (%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm2 \n\t" "movq 8(%2), %%mm3 \n\t" "add %4, %1 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%3) \n\t" "add %5, %3 \n\t" "movq %%mm5, (%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "movq 16(%2), %%mm1 \n\t" "add %4, %1 \n\t" "movq (%1), %%mm2 \n\t" "movq 24(%2), %%mm3 \n\t" "add %4, %1 \n\t" "add $32, %2 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%3) \n\t" "add %5, %3 \n\t" "movq %%mm5, (%3) \n\t" "add %5, %3 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); } static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" "movq 1(%1, %3), %%mm3 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" "movq 8(%1), %%mm0 \n\t" "movq 9(%1), %%mm1 \n\t" "movq 8(%1, %3), %%mm2 \n\t" "movq 9(%1, %3), %%mm3 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, 8(%2) \n\t" "movq %%mm5, 8(%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" "movq 1(%1, %3), %%mm3 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" "movq 8(%1), %%mm0 \n\t" "movq 9(%1), %%mm1 \n\t" "movq 8(%1, %3), %%mm2 \n\t" "movq 9(%1, %3), %%mm3 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, 8(%2) \n\t" "movq %%mm5, 8(%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) :REG_a, "memory"); } static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { MOVQ_BFE(mm6); __asm__ volatile( "testl $1, %0 \n\t" " jz 1f \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 8(%2), %%mm3 \n\t" "add %4, %1 \n\t" "add $16, %2 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%3) \n\t" "movq %%mm5, 8(%3) \n\t" "add %5, %3 \n\t" "decl %0 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%2), %%mm1 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 8(%2), %%mm3 \n\t" "add %4, %1 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%3) \n\t" "movq %%mm5, 8(%3) \n\t" "add %5, %3 \n\t" "movq (%1), %%mm0 \n\t" "movq 16(%2), %%mm1 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 24(%2), %%mm3 \n\t" "add %4, %1 \n\t" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%3) \n\t" "movq %%mm5, 8(%3) \n\t" "add %5, %3 \n\t" "add $32, %2 \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) #else :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) #endif :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) :"memory"); } static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"),%%mm2 \n\t" PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"),%%mm0 \n\t" PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) :REG_a, "memory"); } static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_ZERO(mm7); SET_RND(mm6); // =2 for rnd and =1 for no_rnd version __asm__ volatile( "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm4 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" "xor %%"REG_a", %%"REG_a" \n\t" "add %3, %1 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddusw %%mm2, %%mm0 \n\t" "paddusw %%mm3, %%mm1 \n\t" "paddusw %%mm6, %%mm4 \n\t" "paddusw %%mm6, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" "psrlw $2, %%mm4 \n\t" "psrlw $2, %%mm5 \n\t" "packuswb %%mm5, %%mm4 \n\t" "movq %%mm4, (%2, %%"REG_a") \n\t" "add %3, %%"REG_a" \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 "movq 1(%1, %%"REG_a"), %%mm4 \n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm2, %%mm4 \n\t" "paddusw %%mm3, %%mm5 \n\t" "paddusw %%mm6, %%mm0 \n\t" "paddusw %%mm6, %%mm1 \n\t" "paddusw %%mm4, %%mm0 \n\t" "paddusw %%mm5, %%mm1 \n\t" "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" "movq %%mm0, (%2, %%"REG_a") \n\t" "add %3, %%"REG_a" \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels) :"D"(block), "r"((x86_reg)line_size) :REG_a, "memory"); } // avg_pixels static void av_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movd %0, %%mm0 \n\t" "movd %1, %%mm1 \n\t" OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) "movd %%mm2, %0 \n\t" :"+m"(*block) :"m"(*pixels) :"memory"); pixels += line_size; block += line_size; } while (--h); } // in case more speed is needed - unroling would certainly help static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movq %0, %%mm0 \n\t" "movq %1, %%mm1 \n\t" OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) "movq %%mm2, %0 \n\t" :"+m"(*block) :"m"(*pixels) :"memory"); pixels += line_size; block += line_size; } while (--h); } static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movq %0, %%mm0 \n\t" "movq %1, %%mm1 \n\t" OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) "movq %%mm2, %0 \n\t" "movq 8%0, %%mm0 \n\t" "movq 8%1, %%mm1 \n\t" OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) "movq %%mm2, 8%0 \n\t" :"+m"(*block) :"m"(*pixels) :"memory"); pixels += line_size; block += line_size; } while (--h); } static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movq %1, %%mm0 \n\t" "movq 1%1, %%mm1 \n\t" "movq %0, %%mm3 \n\t" PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) "movq %%mm0, %0 \n\t" :"+m"(*block) :"m"(*pixels) :"memory"); pixels += line_size; block += line_size; } while (--h); } static av_unused void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movq %1, %%mm0 \n\t" "movq %2, %%mm1 \n\t" "movq %0, %%mm3 \n\t" PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) "movq %%mm0, %0 \n\t" :"+m"(*dst) :"m"(*src1), "m"(*src2) :"memory"); dst += dstStride; src1 += src1Stride; src2 += 8; } while (--h); } static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movq %1, %%mm0 \n\t" "movq 1%1, %%mm1 \n\t" "movq %0, %%mm3 \n\t" PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) "movq %%mm0, %0 \n\t" "movq 8%1, %%mm0 \n\t" "movq 9%1, %%mm1 \n\t" "movq 8%0, %%mm3 \n\t" PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) "movq %%mm0, 8%0 \n\t" :"+m"(*block) :"m"(*pixels) :"memory"); pixels += line_size; block += line_size; } while (--h); } static av_unused void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) { MOVQ_BFE(mm6); JUMPALIGN(); do { __asm__ volatile( "movq %1, %%mm0 \n\t" "movq %2, %%mm1 \n\t" "movq %0, %%mm3 \n\t" PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) "movq %%mm0, %0 \n\t" "movq 8%1, %%mm0 \n\t" "movq 8%2, %%mm1 \n\t" "movq 8%0, %%mm3 \n\t" PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) "movq %%mm0, 8%0 \n\t" :"+m"(*dst) :"m"(*src1), "m"(*src2) :"memory"); dst += dstStride; src1 += src1Stride; src2 += 16; } while (--h); } static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" "movq (%1), %%mm0 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) "movq (%2), %%mm3 \n\t" OP_AVG(%%mm3, %%mm4, %%mm0, %%mm6) "movq (%2, %3), %%mm3 \n\t" OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) "movq (%2), %%mm3 \n\t" OP_AVG(%%mm3, %%mm4, %%mm2, %%mm6) "movq (%2, %3), %%mm3 \n\t" OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) "movq %%mm2, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "add %%"REG_a", %1 \n\t" "add %%"REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) :REG_a, "memory"); } // this routine is 'slightly' suboptimal but mostly unused static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) { MOVQ_ZERO(mm7); SET_RND(mm6); // =2 for rnd and =1 for no_rnd version __asm__ volatile( "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm4 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" "xor %%"REG_a", %%"REG_a" \n\t" "add %3, %1 \n\t" ASMALIGN(3) "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddusw %%mm2, %%mm0 \n\t" "paddusw %%mm3, %%mm1 \n\t" "paddusw %%mm6, %%mm4 \n\t" "paddusw %%mm6, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" "psrlw $2, %%mm4 \n\t" "psrlw $2, %%mm5 \n\t" "movq (%2, %%"REG_a"), %%mm3 \n\t" "packuswb %%mm5, %%mm4 \n\t" "pcmpeqd %%mm2, %%mm2 \n\t" "paddb %%mm2, %%mm2 \n\t" OP_AVG(%%mm3, %%mm4, %%mm5, %%mm2) "movq %%mm5, (%2, %%"REG_a") \n\t" "add %3, %%"REG_a" \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 "movq 1(%1, %%"REG_a"), %%mm4 \n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm2, %%mm4 \n\t" "paddusw %%mm3, %%mm5 \n\t" "paddusw %%mm6, %%mm0 \n\t" "paddusw %%mm6, %%mm1 \n\t" "paddusw %%mm4, %%mm0 \n\t" "paddusw %%mm5, %%mm1 \n\t" "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" "movq (%2, %%"REG_a"), %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "pcmpeqd %%mm2, %%mm2 \n\t" "paddb %%mm2, %%mm2 \n\t" OP_AVG(%%mm3, %%mm0, %%mm1, %%mm2) "movq %%mm1, (%2, %%"REG_a") \n\t" "add %3, %%"REG_a" \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels) :"D"(block), "r"((x86_reg)line_size) :REG_a, "memory"); } //FIXME optimize static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(put, pixels8_y2)(block , pixels , line_size, h); DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); } static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(put, pixels8_xy2)(block , pixels , line_size, h); DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); } static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(avg, pixels8_y2)(block , pixels , line_size, h); DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); } static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ DEF(avg, pixels8_xy2)(block , pixels , line_size, h); DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); }
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_mmx_rnd_template.c
C
asf20
23,213
/* * Copyright (c) 2008 Konstantin Shishkov, Mathieu Velten * * MMX-optimized DSP functions for RV40, based on H.264 optimizations by * Michael Niedermayer and Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "dsputil_mmx.h" /* bias interleaved with bias div 8, use p+1 to access bias div 8 */ DECLARE_ALIGNED(8, static const uint64_t, rv40_bias_reg)[4][8] = { { 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0010001000100010ULL, 0x0002000200020002ULL, 0x0020002000200020ULL, 0x0004000400040004ULL, 0x0010001000100010ULL, 0x0002000200020002ULL }, { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL, 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL }, { 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0020002000200020ULL, 0x0004000400040004ULL, 0x0010001000100010ULL, 0x0002000200020002ULL, 0x0020002000200020ULL, 0x0004000400040004ULL }, { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL, 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL } }; static void put_rv40_chroma_mc8_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); } static void put_rv40_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); } static void avg_rv40_chroma_mc8_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); } static void avg_rv40_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); } static void avg_rv40_chroma_mc8_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); } static void avg_rv40_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); }
123linslouis-android-video-cutter
jni/libavcodec/x86/rv40dsp_mmx.c
C
asf20
3,213
/* * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>, * Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * MMX optimized version of (put|avg)_h264_chroma_mc8. * H264_CHROMA_MC8_TMPL must be defined to the desired function name * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function */ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg) { DECLARE_ALIGNED(8, uint64_t, AA); DECLARE_ALIGNED(8, uint64_t, DD); int i; if(y==0 && x==0) { /* no filter needed */ H264_CHROMA_MC8_MV0(dst, src, stride, h); return; } assert(x<8 && y<8 && x>=0 && y>=0); if(y==0 || x==0) { /* 1 dimensional filter only */ const int dxy = x ? 1 : stride; __asm__ volatile( "movd %0, %%mm5\n\t" "movq %1, %%mm4\n\t" "movq %2, %%mm6\n\t" /* mm6 = rnd >> 3 */ "punpcklwd %%mm5, %%mm5\n\t" "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */ "pxor %%mm7, %%mm7\n\t" "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */ :: "rm"(x+y), "m"(ff_pw_8), "m"(*(rnd_reg+1))); for(i=0; i<h; i++) { __asm__ volatile( /* mm0 = src[0..7], mm1 = src[1..8] */ "movq %0, %%mm0\n\t" "movq %1, %%mm2\n\t" :: "m"(src[0]), "m"(src[dxy])); __asm__ volatile( /* [mm0,mm1] = A * src[0..7] */ /* [mm2,mm3] = B * src[1..8] */ "movq %%mm0, %%mm1\n\t" "movq %%mm2, %%mm3\n\t" "punpcklbw %%mm7, %%mm0\n\t" "punpckhbw %%mm7, %%mm1\n\t" "punpcklbw %%mm7, %%mm2\n\t" "punpckhbw %%mm7, %%mm3\n\t" "pmullw %%mm4, %%mm0\n\t" "pmullw %%mm4, %%mm1\n\t" "pmullw %%mm5, %%mm2\n\t" "pmullw %%mm5, %%mm3\n\t" /* dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3 */ "paddw %%mm6, %%mm0\n\t" "paddw %%mm6, %%mm1\n\t" "paddw %%mm2, %%mm0\n\t" "paddw %%mm3, %%mm1\n\t" "psrlw $3, %%mm0\n\t" "psrlw $3, %%mm1\n\t" "packuswb %%mm1, %%mm0\n\t" H264_CHROMA_OP(%0, %%mm0) "movq %%mm0, %0\n\t" : "=m" (dst[0])); src += stride; dst += stride; } return; } /* general case, bilinear */ __asm__ volatile("movd %2, %%mm4\n\t" "movd %3, %%mm6\n\t" "punpcklwd %%mm4, %%mm4\n\t" "punpcklwd %%mm6, %%mm6\n\t" "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */ "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */ "movq %%mm4, %%mm5\n\t" "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */ "psllw $3, %%mm5\n\t" "psllw $3, %%mm6\n\t" "movq %%mm5, %%mm7\n\t" "paddw %%mm6, %%mm7\n\t" "movq %%mm4, %1\n\t" /* DD = x * y */ "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */ "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */ "paddw %4, %%mm4\n\t" "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */ "pxor %%mm7, %%mm7\n\t" "movq %%mm4, %0\n\t" : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64)); __asm__ volatile( /* mm0 = src[0..7], mm1 = src[1..8] */ "movq %0, %%mm0\n\t" "movq %1, %%mm1\n\t" : : "m" (src[0]), "m" (src[1])); for(i=0; i<h; i++) { src += stride; __asm__ volatile( /* mm2 = A * src[0..3] + B * src[1..4] */ /* mm3 = A * src[4..7] + B * src[5..8] */ "movq %%mm0, %%mm2\n\t" "movq %%mm1, %%mm3\n\t" "punpckhbw %%mm7, %%mm0\n\t" "punpcklbw %%mm7, %%mm1\n\t" "punpcklbw %%mm7, %%mm2\n\t" "punpckhbw %%mm7, %%mm3\n\t" "pmullw %0, %%mm0\n\t" "pmullw %0, %%mm2\n\t" "pmullw %%mm5, %%mm1\n\t" "pmullw %%mm5, %%mm3\n\t" "paddw %%mm1, %%mm2\n\t" "paddw %%mm0, %%mm3\n\t" : : "m" (AA)); __asm__ volatile( /* [mm2,mm3] += C * src[0..7] */ "movq %0, %%mm0\n\t" "movq %%mm0, %%mm1\n\t" "punpcklbw %%mm7, %%mm0\n\t" "punpckhbw %%mm7, %%mm1\n\t" "pmullw %%mm6, %%mm0\n\t" "pmullw %%mm6, %%mm1\n\t" "paddw %%mm0, %%mm2\n\t" "paddw %%mm1, %%mm3\n\t" : : "m" (src[0])); __asm__ volatile( /* [mm2,mm3] += D * src[1..8] */ "movq %1, %%mm1\n\t" "movq %%mm1, %%mm0\n\t" "movq %%mm1, %%mm4\n\t" "punpcklbw %%mm7, %%mm0\n\t" "punpckhbw %%mm7, %%mm4\n\t" "pmullw %2, %%mm0\n\t" "pmullw %2, %%mm4\n\t" "paddw %%mm0, %%mm2\n\t" "paddw %%mm4, %%mm3\n\t" "movq %0, %%mm0\n\t" : : "m" (src[0]), "m" (src[1]), "m" (DD)); __asm__ volatile( /* dst[0..7] = ([mm2,mm3] + rnd) >> 6 */ "paddw %1, %%mm2\n\t" "paddw %1, %%mm3\n\t" "psrlw $6, %%mm2\n\t" "psrlw $6, %%mm3\n\t" "packuswb %%mm3, %%mm2\n\t" H264_CHROMA_OP(%0, %%mm2) "movq %%mm2, %0\n\t" : "=m" (dst[0]) : "m" (*rnd_reg)); dst+= stride; } } static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg) { __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movd %5, %%mm2 \n\t" "movd %6, %%mm3 \n\t" "movq "MANGLE(ff_pw_8)", %%mm4\n\t" "movq "MANGLE(ff_pw_8)", %%mm5\n\t" "punpcklwd %%mm2, %%mm2 \n\t" "punpcklwd %%mm3, %%mm3 \n\t" "punpcklwd %%mm2, %%mm2 \n\t" "punpcklwd %%mm3, %%mm3 \n\t" "psubw %%mm2, %%mm4 \n\t" "psubw %%mm3, %%mm5 \n\t" "movd (%1), %%mm0 \n\t" "movd 1(%1), %%mm6 \n\t" "add %3, %1 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm6 \n\t" "pmullw %%mm4, %%mm0 \n\t" "pmullw %%mm2, %%mm6 \n\t" "paddw %%mm0, %%mm6 \n\t" "1: \n\t" "movd (%1), %%mm0 \n\t" "movd 1(%1), %%mm1 \n\t" "add %3, %1 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "pmullw %%mm4, %%mm0 \n\t" "pmullw %%mm2, %%mm1 \n\t" "paddw %%mm0, %%mm1 \n\t" "movq %%mm1, %%mm0 \n\t" "pmullw %%mm5, %%mm6 \n\t" "pmullw %%mm3, %%mm1 \n\t" "paddw %4, %%mm6 \n\t" "paddw %%mm6, %%mm1 \n\t" "psrlw $6, %%mm1 \n\t" "packuswb %%mm1, %%mm1 \n\t" H264_CHROMA_OP4((%0), %%mm1, %%mm6) "movd %%mm1, (%0) \n\t" "add %3, %0 \n\t" "movd (%1), %%mm6 \n\t" "movd 1(%1), %%mm1 \n\t" "add %3, %1 \n\t" "punpcklbw %%mm7, %%mm6 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "pmullw %%mm4, %%mm6 \n\t" "pmullw %%mm2, %%mm1 \n\t" "paddw %%mm6, %%mm1 \n\t" "movq %%mm1, %%mm6 \n\t" "pmullw %%mm5, %%mm0 \n\t" "pmullw %%mm3, %%mm1 \n\t" "paddw %4, %%mm0 \n\t" "paddw %%mm0, %%mm1 \n\t" "psrlw $6, %%mm1 \n\t" "packuswb %%mm1, %%mm1 \n\t" H264_CHROMA_OP4((%0), %%mm1, %%mm0) "movd %%mm1, (%0) \n\t" "add %3, %0 \n\t" "sub $2, %2 \n\t" "jnz 1b \n\t" : "+r"(dst), "+r"(src), "+r"(h) : "r"((x86_reg)stride), "m"(*rnd_reg), "m"(x), "m"(y) ); } #ifdef H264_CHROMA_MC2_TMPL static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { int tmp = ((1<<16)-1)*x + 8; int CD= tmp*y; int AB= (tmp<<3) - CD; __asm__ volatile( /* mm5 = {A,B,A,B} */ /* mm6 = {C,D,C,D} */ "movd %0, %%mm5\n\t" "movd %1, %%mm6\n\t" "punpckldq %%mm5, %%mm5\n\t" "punpckldq %%mm6, %%mm6\n\t" "pxor %%mm7, %%mm7\n\t" /* mm0 = src[0,1,1,2] */ "movd %2, %%mm2\n\t" "punpcklbw %%mm7, %%mm2\n\t" "pshufw $0x94, %%mm2, %%mm2\n\t" :: "r"(AB), "r"(CD), "m"(src[0])); __asm__ volatile( "1:\n\t" "add %4, %1\n\t" /* mm1 = A * src[0,1] + B * src[1,2] */ "movq %%mm2, %%mm1\n\t" "pmaddwd %%mm5, %%mm1\n\t" /* mm0 = src[0,1,1,2] */ "movd (%1), %%mm0\n\t" "punpcklbw %%mm7, %%mm0\n\t" "pshufw $0x94, %%mm0, %%mm0\n\t" /* mm1 += C * src[0,1] + D * src[1,2] */ "movq %%mm0, %%mm2\n\t" "pmaddwd %%mm6, %%mm0\n\t" "paddw %3, %%mm1\n\t" "paddw %%mm0, %%mm1\n\t" /* dst[0,1] = pack((mm1 + 32) >> 6) */ "psrlw $6, %%mm1\n\t" "packssdw %%mm7, %%mm1\n\t" "packuswb %%mm7, %%mm1\n\t" H264_CHROMA_OP4((%0), %%mm1, %%mm3) "movd %%mm1, %%esi\n\t" "movw %%si, (%0)\n\t" "add %4, %0\n\t" "sub $1, %2\n\t" "jnz 1b\n\t" : "+r" (dst), "+r"(src), "+r"(h) : "m" (ff_pw_32), "r"((x86_reg)stride) : "%esi"); } #endif
123linslouis-android-video-cutter
jni/libavcodec/x86/dsputil_h264_template_mmx.c
C
asf20
10,982
;***************************************************************************** ;* x86util.asm ;***************************************************************************** ;* Copyright (C) 2008 Loren Merritt <lorenm@u.washington.edu> ;* ;* This program is free software; you can redistribute it and/or modify ;* it under the terms of the GNU General Public License as published by ;* the Free Software Foundation; either version 2 of the License, or ;* (at your option) any later version. ;* ;* This program is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;* GNU General Public License for more details. ;* ;* You should have received a copy of the GNU General Public License ;* along with this program; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. ;***************************************************************************** %macro SBUTTERFLY 4 mova m%4, m%2 punpckl%1 m%2, m%3 punpckh%1 m%4, m%3 SWAP %3, %4 %endmacro %macro TRANSPOSE4x4W 5 SBUTTERFLY wd, %1, %2, %5 SBUTTERFLY wd, %3, %4, %5 SBUTTERFLY dq, %1, %3, %5 SBUTTERFLY dq, %2, %4, %5 SWAP %2, %3 %endmacro %macro TRANSPOSE2x4x4W 5 SBUTTERFLY wd, %1, %2, %5 SBUTTERFLY wd, %3, %4, %5 SBUTTERFLY dq, %1, %3, %5 SBUTTERFLY dq, %2, %4, %5 SBUTTERFLY qdq, %1, %2, %5 SBUTTERFLY qdq, %3, %4, %5 %endmacro %macro TRANSPOSE4x4D 5 SBUTTERFLY dq, %1, %2, %5 SBUTTERFLY dq, %3, %4, %5 SBUTTERFLY qdq, %1, %3, %5 SBUTTERFLY qdq, %2, %4, %5 SWAP %2, %3 %endmacro %macro TRANSPOSE8x8W 9-11 %ifdef ARCH_X86_64 SBUTTERFLY wd, %1, %2, %9 SBUTTERFLY wd, %3, %4, %9 SBUTTERFLY wd, %5, %6, %9 SBUTTERFLY wd, %7, %8, %9 SBUTTERFLY dq, %1, %3, %9 SBUTTERFLY dq, %2, %4, %9 SBUTTERFLY dq, %5, %7, %9 SBUTTERFLY dq, %6, %8, %9 SBUTTERFLY qdq, %1, %5, %9 SBUTTERFLY qdq, %2, %6, %9 SBUTTERFLY qdq, %3, %7, %9 SBUTTERFLY qdq, %4, %8, %9 SWAP %2, %5 SWAP %4, %7 %else ; in: m0..m7, unless %11 in which case m6 is in %9 ; out: m0..m7, unless %11 in which case m4 is in %10 ; spills into %9 and %10 %if %0<11 movdqa %9, m%7 %endif SBUTTERFLY wd, %1, %2, %7 movdqa %10, m%2 movdqa m%7, %9 SBUTTERFLY wd, %3, %4, %2 SBUTTERFLY wd, %5, %6, %2 SBUTTERFLY wd, %7, %8, %2 SBUTTERFLY dq, %1, %3, %2 movdqa %9, m%3 movdqa m%2, %10 SBUTTERFLY dq, %2, %4, %3 SBUTTERFLY dq, %5, %7, %3 SBUTTERFLY dq, %6, %8, %3 SBUTTERFLY qdq, %1, %5, %3 SBUTTERFLY qdq, %2, %6, %3 movdqa %10, m%2 movdqa m%3, %9 SBUTTERFLY qdq, %3, %7, %2 SBUTTERFLY qdq, %4, %8, %2 SWAP %2, %5 SWAP %4, %7 %if %0<11 movdqa m%5, %10 %endif %endif %endmacro %macro ABS1_MMX 2 ; a, tmp pxor %2, %2 psubw %2, %1 pmaxsw %1, %2 %endmacro %macro ABS2_MMX 4 ; a, b, tmp0, tmp1 pxor %3, %3 pxor %4, %4 psubw %3, %1 psubw %4, %2 pmaxsw %1, %3 pmaxsw %2, %4 %endmacro %macro ABS1_SSSE3 2 pabsw %1, %1 %endmacro %macro ABS2_SSSE3 4 pabsw %1, %1 pabsw %2, %2 %endmacro %define ABS1 ABS1_MMX %define ABS2 ABS2_MMX %macro ABS4 6 ABS2 %1, %2, %5, %6 ABS2 %3, %4, %5, %6 %endmacro %macro SPLATB_MMX 3 movd %1, [%2-3] ;to avoid crossing a cacheline punpcklbw %1, %1 %if mmsize==16 pshuflw %1, %1, 0xff punpcklqdq %1, %1 %else pshufw %1, %1, 0xff %endif %endmacro %macro SPLATB_SSSE3 3 movd %1, [%2-3] pshufb %1, %3 %endmacro %macro PALIGNR_MMX 4 %ifnidn %4, %2 mova %4, %2 %endif %if mmsize == 8 psllq %1, (8-%3)*8 psrlq %4, %3*8 %else pslldq %1, 16-%3 psrldq %4, %3 %endif por %1, %4 %endmacro %macro PALIGNR_SSSE3 4 palignr %1, %2, %3 %endmacro %macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from %ifnum %5 mova m%1, m%5 mova m%3, m%5 %else mova m%1, %5 mova m%3, m%1 %endif pand m%1, m%2 ; dst .. y6 .. y4 pand m%3, m%4 ; src .. y6 .. y4 psrlw m%2, 8 ; dst .. y7 .. y5 psrlw m%4, 8 ; src .. y7 .. y5 %endmacro %macro SUMSUB_BA 2-3 %if %0==2 paddw %1, %2 paddw %2, %2 psubw %2, %1 %else mova %3, %1 paddw %1, %2 psubw %2, %3 %endif %endmacro %macro SUMSUB_BADC 4-5 %if %0==5 SUMSUB_BA %1, %2, %5 SUMSUB_BA %3, %4, %5 %else paddw %1, %2 paddw %3, %4 paddw %2, %2 paddw %4, %4 psubw %2, %1 psubw %4, %3 %endif %endmacro %macro HADAMARD4_V 4+ SUMSUB_BADC %1, %2, %3, %4 SUMSUB_BADC %1, %3, %2, %4 %endmacro %macro HADAMARD8_V 8+ SUMSUB_BADC %1, %2, %3, %4 SUMSUB_BADC %5, %6, %7, %8 SUMSUB_BADC %1, %3, %2, %4 SUMSUB_BADC %5, %7, %6, %8 SUMSUB_BADC %1, %5, %2, %6 SUMSUB_BADC %3, %7, %4, %8 %endmacro %macro TRANS_SSE2 5-6 ; TRANSPOSE2x2 ; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq ; %2: ord/unord (for compat with sse4, unused) ; %3/%4: source regs ; %5/%6: tmp regs %ifidn %1, d %define mask [mask_10 GLOBAL] %define shift 16 %elifidn %1, q %define mask [mask_1100 GLOBAL] %define shift 32 %endif %if %0==6 ; less dependency if we have two tmp mova m%5, mask ; ff00 mova m%6, m%4 ; x5x4 psll%1 m%4, shift ; x4.. pand m%6, m%5 ; x5.. pandn m%5, m%3 ; ..x0 psrl%1 m%3, shift ; ..x1 por m%4, m%5 ; x4x0 por m%3, m%6 ; x5x1 %else ; more dependency, one insn less. sometimes faster, sometimes not mova m%5, m%4 ; x5x4 psll%1 m%4, shift ; x4.. pxor m%4, m%3 ; (x4^x1)x0 pand m%4, mask ; (x4^x1).. pxor m%3, m%4 ; x4x0 psrl%1 m%4, shift ; ..(x1^x4) pxor m%5, m%4 ; x5x1 SWAP %4, %3, %5 %endif %endmacro %macro TRANS_SSE4 5-6 ; see above %ifidn %1, d mova m%5, m%3 %ifidn %2, ord psrl%1 m%3, 16 %endif pblendw m%3, m%4, 10101010b psll%1 m%4, 16 %ifidn %2, ord pblendw m%4, m%5, 01010101b %else psrl%1 m%5, 16 por m%4, m%5 %endif %elifidn %1, q mova m%5, m%3 shufps m%3, m%4, 10001000b shufps m%5, m%4, 11011101b SWAP %4, %5 %endif %endmacro %macro HADAMARD 5-6 ; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes) ; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes) ; %3/%4: regs ; %5(%6): tmpregs %if %1!=0 ; have to reorder stuff for horizontal op %ifidn %2, sumsub %define ORDER ord ; sumsub needs order because a-b != b-a unless a=b %else %define ORDER unord ; if we just max, order doesn't matter (allows pblendw+or in sse4) %endif %if %1==1 TRANS d, ORDER, %3, %4, %5, %6 %elif %1==2 %if mmsize==8 SBUTTERFLY dq, %3, %4, %5 %else TRANS q, ORDER, %3, %4, %5, %6 %endif %elif %1==4 SBUTTERFLY qdq, %3, %4, %5 %endif %endif %ifidn %2, sumsub SUMSUB_BA m%3, m%4, m%5 %else %ifidn %2, amax %if %0==6 ABS2 m%3, m%4, m%5, m%6 %else ABS1 m%3, m%5 ABS1 m%4, m%5 %endif %endif pmaxsw m%3, m%4 %endif %endmacro %macro HADAMARD2_2D 6-7 sumsub HADAMARD 0, sumsub, %1, %2, %5 HADAMARD 0, sumsub, %3, %4, %5 SBUTTERFLY %6, %1, %2, %5 %ifnum %7 HADAMARD 0, amax, %1, %2, %5, %7 %else HADAMARD 0, %7, %1, %2, %5 %endif SBUTTERFLY %6, %3, %4, %5 %ifnum %7 HADAMARD 0, amax, %3, %4, %5, %7 %else HADAMARD 0, %7, %3, %4, %5 %endif %endmacro %macro HADAMARD4_2D 5-6 sumsub HADAMARD2_2D %1, %2, %3, %4, %5, wd HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6 SWAP %2, %3 %endmacro %macro HADAMARD4_2D_SSE 5-6 sumsub HADAMARD 0, sumsub, %1, %2, %5 ; 1st V row 0 + 1 HADAMARD 0, sumsub, %3, %4, %5 ; 1st V row 2 + 3 SBUTTERFLY wd, %1, %2, %5 ; %1: m0 1+0 %2: m1 1+0 SBUTTERFLY wd, %3, %4, %5 ; %3: m0 3+2 %4: m1 3+2 HADAMARD2_2D %1, %3, %2, %4, %5, dq SBUTTERFLY qdq, %1, %2, %5 HADAMARD 0, %6, %1, %2, %5 ; 2nd H m1/m0 row 0+1 SBUTTERFLY qdq, %3, %4, %5 HADAMARD 0, %6, %3, %4, %5 ; 2nd H m1/m0 row 2+3 %endmacro %macro HADAMARD8_2D 9-10 sumsub HADAMARD2_2D %1, %2, %3, %4, %9, wd HADAMARD2_2D %5, %6, %7, %8, %9, wd HADAMARD2_2D %1, %3, %2, %4, %9, dq HADAMARD2_2D %5, %7, %6, %8, %9, dq HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10 HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10 %ifnidn %10, amax SWAP %2, %5 SWAP %4, %7 %endif %endmacro %macro SUMSUB2_AB 3 mova %3, %1 paddw %1, %1 paddw %1, %2 psubw %3, %2 psubw %3, %2 %endmacro %macro SUMSUB2_BA 3 mova m%3, m%1 paddw m%1, m%2 paddw m%1, m%2 psubw m%2, m%3 psubw m%2, m%3 %endmacro %macro SUMSUBD2_AB 4 mova %4, %1 mova %3, %2 psraw %2, 1 psraw %1, 1 paddw %2, %4 psubw %1, %3 %endmacro %macro DCT4_1D 5 %ifnum %5 SUMSUB_BADC m%4, m%1, m%3, m%2; m%5 SUMSUB_BA m%3, m%4, m%5 SUMSUB2_AB m%1, m%2, m%5 SWAP %1, %3, %4, %5, %2 %else SUMSUB_BADC m%4, m%1, m%3, m%2 SUMSUB_BA m%3, m%4 mova [%5], m%2 SUMSUB2_AB m%1, [%5], m%2 SWAP %1, %3, %4, %2 %endif %endmacro %macro IDCT4_1D 5-6 %ifnum %5 SUMSUBD2_AB m%2, m%4, m%6, m%5 SUMSUB_BA m%3, m%1, m%6 SUMSUB_BADC m%4, m%3, m%2, m%1, m%6 %else SUMSUBD2_AB m%2, m%4, [%5], [%5+16] SUMSUB_BA m%3, m%1 SUMSUB_BADC m%4, m%3, m%2, m%1 %endif SWAP %1, %4, %3 %endmacro %macro LOAD_DIFF 5 %ifidn %3, none movh %1, %4 movh %2, %5 punpcklbw %1, %2 punpcklbw %2, %2 psubw %1, %2 %else movh %1, %4 punpcklbw %1, %3 movh %2, %5 punpcklbw %2, %3 psubw %1, %2 %endif %endmacro %macro LOAD_DIFF8x4_SSE2 8 LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDE], [%8+%1*FDEC_STRIDE] LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDE], [%8+%2*FDEC_STRIDE] LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDE], [%8+%3*FDEC_STRIDE] LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDE], [%8+%4*FDEC_STRIDE] %endmacro %macro LOAD_DIFF8x4_SSSE3 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr movh m%2, [%8+%1*FDEC_STRIDE] movh m%1, [%7+%1*FENC_STRIDE] punpcklbw m%1, m%2 movh m%3, [%8+%2*FDEC_STRIDE] movh m%2, [%7+%2*FENC_STRIDE] punpcklbw m%2, m%3 movh m%4, [%8+%3*FDEC_STRIDE] movh m%3, [%7+%3*FENC_STRIDE] punpcklbw m%3, m%4 movh m%5, [%8+%4*FDEC_STRIDE] movh m%4, [%7+%4*FENC_STRIDE] punpcklbw m%4, m%5 pmaddubsw m%1, m%6 pmaddubsw m%2, m%6 pmaddubsw m%3, m%6 pmaddubsw m%4, m%6 %endmacro %macro STORE_DCT 6 movq [%5+%6+ 0], m%1 movq [%5+%6+ 8], m%2 movq [%5+%6+16], m%3 movq [%5+%6+24], m%4 movhps [%5+%6+32], m%1 movhps [%5+%6+40], m%2 movhps [%5+%6+48], m%3 movhps [%5+%6+56], m%4 %endmacro %macro STORE_IDCT 4 movhps [r0-4*FDEC_STRIDE], %1 movh [r0-3*FDEC_STRIDE], %1 movhps [r0-2*FDEC_STRIDE], %2 movh [r0-1*FDEC_STRIDE], %2 movhps [r0+0*FDEC_STRIDE], %3 movh [r0+1*FDEC_STRIDE], %3 movhps [r0+2*FDEC_STRIDE], %4 movh [r0+3*FDEC_STRIDE], %4 %endmacro %macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment? LOAD_DIFF m%1, m%5, m%7, [%8], [%9] LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3] LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3] LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5] %if %10 lea %8, [%8+4*r1] lea %9, [%9+4*r3] %endif %endmacro %macro DIFFx2 6-7 movh %3, %5 punpcklbw %3, %4 psraw %1, 6 paddsw %1, %3 movh %3, %6 punpcklbw %3, %4 psraw %2, 6 paddsw %2, %3 packuswb %2, %1 %endmacro %macro STORE_DIFF 4 movh %2, %4 punpcklbw %2, %3 psraw %1, 6 paddsw %1, %2 packuswb %1, %1 movh %4, %1 %endmacro
123linslouis-android-video-cutter
jni/libavcodec/x86/x86util.asm
Assembly
asf20
12,176
/* * MMX optimized LPC DSP utils * Copyright (c) 2007 Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "dsputil_mmx.h" static void apply_welch_window_sse2(const int32_t *data, int len, double *w_data) { double c = 2.0 / (len-1.0); int n2 = len>>1; x86_reg i = -n2*sizeof(int32_t); x86_reg j = n2*sizeof(int32_t); __asm__ volatile( "movsd %0, %%xmm7 \n\t" "movapd "MANGLE(ff_pd_1)", %%xmm6 \n\t" "movapd "MANGLE(ff_pd_2)", %%xmm5 \n\t" "movlhps %%xmm7, %%xmm7 \n\t" "subpd %%xmm5, %%xmm7 \n\t" "addsd %%xmm6, %%xmm7 \n\t" ::"m"(c) ); #define WELCH(MOVPD, offset)\ __asm__ volatile(\ "1: \n\t"\ "movapd %%xmm7, %%xmm1 \n\t"\ "mulpd %%xmm1, %%xmm1 \n\t"\ "movapd %%xmm6, %%xmm0 \n\t"\ "subpd %%xmm1, %%xmm0 \n\t"\ "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\ "cvtpi2pd (%3,%0), %%xmm2 \n\t"\ "cvtpi2pd "#offset"*4(%3,%1), %%xmm3 \n\t"\ "mulpd %%xmm0, %%xmm2 \n\t"\ "mulpd %%xmm1, %%xmm3 \n\t"\ "movapd %%xmm2, (%2,%0,2) \n\t"\ MOVPD" %%xmm3, "#offset"*8(%2,%1,2) \n\t"\ "subpd %%xmm5, %%xmm7 \n\t"\ "sub $8, %1 \n\t"\ "add $8, %0 \n\t"\ "jl 1b \n\t"\ :"+&r"(i), "+&r"(j)\ :"r"(w_data+n2), "r"(data+n2)\ ); if(len&1) WELCH("movupd", -1) else WELCH("movapd", -2) #undef WELCH } void ff_lpc_compute_autocorr_sse2(const int32_t *data, int len, int lag, double *autoc) { double tmp[len + lag + 2]; double *data1 = tmp + lag; int j; if((x86_reg)data1 & 15) data1++; apply_welch_window_sse2(data, len, data1); for(j=0; j<lag; j++) data1[j-lag]= 0.0; data1[len] = 0.0; for(j=0; j<lag; j+=2){ x86_reg i = -len*sizeof(double); if(j == lag-2) { __asm__ volatile( "movsd "MANGLE(ff_pd_1)", %%xmm0 \n\t" "movsd "MANGLE(ff_pd_1)", %%xmm1 \n\t" "movsd "MANGLE(ff_pd_1)", %%xmm2 \n\t" "1: \n\t" "movapd (%2,%0), %%xmm3 \n\t" "movupd -8(%3,%0), %%xmm4 \n\t" "movapd (%3,%0), %%xmm5 \n\t" "mulpd %%xmm3, %%xmm4 \n\t" "mulpd %%xmm3, %%xmm5 \n\t" "mulpd -16(%3,%0), %%xmm3 \n\t" "addpd %%xmm4, %%xmm1 \n\t" "addpd %%xmm5, %%xmm0 \n\t" "addpd %%xmm3, %%xmm2 \n\t" "add $16, %0 \n\t" "jl 1b \n\t" "movhlps %%xmm0, %%xmm3 \n\t" "movhlps %%xmm1, %%xmm4 \n\t" "movhlps %%xmm2, %%xmm5 \n\t" "addsd %%xmm3, %%xmm0 \n\t" "addsd %%xmm4, %%xmm1 \n\t" "addsd %%xmm5, %%xmm2 \n\t" "movsd %%xmm0, (%1) \n\t" "movsd %%xmm1, 8(%1) \n\t" "movsd %%xmm2, 16(%1) \n\t" :"+&r"(i) :"r"(autoc+j), "r"(data1+len), "r"(data1+len-j) :"memory" ); } else { __asm__ volatile( "movsd "MANGLE(ff_pd_1)", %%xmm0 \n\t" "movsd "MANGLE(ff_pd_1)", %%xmm1 \n\t" "1: \n\t" "movapd (%3,%0), %%xmm3 \n\t" "movupd -8(%4,%0), %%xmm4 \n\t" "mulpd %%xmm3, %%xmm4 \n\t" "mulpd (%4,%0), %%xmm3 \n\t" "addpd %%xmm4, %%xmm1 \n\t" "addpd %%xmm3, %%xmm0 \n\t" "add $16, %0 \n\t" "jl 1b \n\t" "movhlps %%xmm0, %%xmm3 \n\t" "movhlps %%xmm1, %%xmm4 \n\t" "addsd %%xmm3, %%xmm0 \n\t" "addsd %%xmm4, %%xmm1 \n\t" "movsd %%xmm0, %1 \n\t" "movsd %%xmm1, %2 \n\t" :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1]) :"r"(data1+len), "r"(data1+len-j) ); } } }
123linslouis-android-video-cutter
jni/libavcodec/x86/lpc_mmx.c
C
asf20
5,743
;****************************************************************************** ;* FFT transform with SSE/3DNow optimizations ;* Copyright (c) 2008 Loren Merritt ;* ;* This file is part of FFmpeg. ;* ;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* ;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public ;* License along with FFmpeg; if not, write to the Free Software ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** ; These functions are not individually interchangeable with the C versions. ; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results ; in blocks as conventient to the vector size. ; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively) %include "x86inc.asm" SECTION_RODATA %define M_SQRT1_2 0.70710678118654752440 ps_root2: times 4 dd M_SQRT1_2 ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2 ps_m1p1: dd 1<<31, 0 %assign i 16 %rep 13 cextern ff_cos_ %+ i %assign i i<<1 %endrep %ifdef ARCH_X86_64 %define pointer dq %else %define pointer dd %endif %macro IF0 1+ %endmacro %macro IF1 1+ %1 %endmacro section .text align=16 %macro T2_3DN 4 ; z0, z1, mem0, mem1 mova %1, %3 mova %2, %1 pfadd %1, %4 pfsub %2, %4 %endmacro %macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1 mova %5, %3 pfsub %3, %4 pfadd %5, %4 ; {t6,t5} pxor %3, [ps_m1p1 GLOBAL] ; {t8,t7} mova %6, %1 pswapd %3, %3 pfadd %1, %5 ; {r0,i0} pfsub %6, %5 ; {r2,i2} mova %4, %2 pfadd %2, %3 ; {r1,i1} pfsub %4, %3 ; {r3,i3} SWAP %3, %6 %endmacro ; in: %1={r0,i0,r1,i1} %2={r2,i2,r3,i3} ; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %macro T4_SSE 3 mova %3, %1 shufps %1, %2, 0x64 ; {r0,i0,r3,i2} shufps %3, %2, 0xce ; {r1,i1,r2,i3} mova %2, %1 addps %1, %3 ; {t1,t2,t6,t5} subps %2, %3 ; {t3,t4,t8,t7} mova %3, %1 shufps %1, %2, 0x44 ; {t1,t2,t3,t4} shufps %3, %2, 0xbe ; {t6,t5,t7,t8} mova %2, %1 addps %1, %3 ; {r0,i0,r1,i1} subps %2, %3 ; {r2,i2,r3,i3} mova %3, %1 shufps %1, %2, 0x88 ; {r0,r1,r2,r3} shufps %3, %2, 0xdd ; {i0,i1,i2,i3} SWAP %2, %3 %endmacro %macro T8_SSE 6 ; r0,i0,r1,i1,t0,t1 mova %5, %3 shufps %3, %4, 0x44 ; {r4,i4,r6,i6} shufps %5, %4, 0xee ; {r5,i5,r7,i7} mova %6, %3 subps %3, %5 ; {r5,i5,r7,i7} addps %6, %5 ; {t1,t2,t3,t4} mova %5, %3 shufps %5, %5, 0xb1 ; {i5,r5,i7,r7} mulps %3, [ps_root2mppm GLOBAL] ; {-r5,i5,r7,-i7} mulps %5, [ps_root2 GLOBAL] addps %3, %5 ; {t8,t7,ta,t9} mova %5, %6 shufps %6, %3, 0x36 ; {t3,t2,t9,t8} shufps %5, %3, 0x9c ; {t1,t4,t7,ta} mova %3, %6 addps %6, %5 ; {t1,t2,t9,ta} subps %3, %5 ; {t6,t5,tc,tb} mova %5, %6 shufps %6, %3, 0xd8 ; {t1,t9,t5,tb} shufps %5, %3, 0x8d ; {t2,ta,t6,tc} mova %3, %1 mova %4, %2 addps %1, %6 ; {r0,r1,r2,r3} addps %2, %5 ; {i0,i1,i2,i3} subps %3, %6 ; {r4,r5,r6,r7} subps %4, %5 ; {i4,i5,i6,i7} %endmacro ; scheduled for cpu-bound sizes %macro PASS_SMALL 3 ; (to load m4-m7), wre, wim IF%1 mova m4, Z(4) IF%1 mova m5, Z(5) mova m0, %2 ; wre mova m2, m4 mova m1, %3 ; wim mova m3, m5 mulps m2, m0 ; r2*wre IF%1 mova m6, Z(6) mulps m3, m1 ; i2*wim IF%1 mova m7, Z(7) mulps m4, m1 ; r2*wim mulps m5, m0 ; i2*wre addps m2, m3 ; r2*wre + i2*wim mova m3, m1 mulps m1, m6 ; r3*wim subps m5, m4 ; i2*wre - r2*wim mova m4, m0 mulps m3, m7 ; i3*wim mulps m4, m6 ; r3*wre mulps m0, m7 ; i3*wre subps m4, m3 ; r3*wre - i3*wim mova m3, Z(0) addps m0, m1 ; i3*wre + r3*wim mova m1, m4 addps m4, m2 ; t5 subps m1, m2 ; t3 subps m3, m4 ; r2 addps m4, Z(0) ; r0 mova m6, Z(2) mova Z(4), m3 mova Z(0), m4 mova m3, m5 subps m5, m0 ; t4 mova m4, m6 subps m6, m5 ; r3 addps m5, m4 ; r1 mova Z(6), m6 mova Z(2), m5 mova m2, Z(3) addps m3, m0 ; t6 subps m2, m1 ; i3 mova m7, Z(1) addps m1, Z(3) ; i1 mova Z(7), m2 mova Z(3), m1 mova m4, m7 subps m7, m3 ; i2 addps m3, m4 ; i0 mova Z(5), m7 mova Z(1), m3 %endmacro ; scheduled to avoid store->load aliasing %macro PASS_BIG 1 ; (!interleave) mova m4, Z(4) ; r2 mova m5, Z(5) ; i2 mova m2, m4 mova m0, [wq] ; wre mova m3, m5 mova m1, [wq+o1q] ; wim mulps m2, m0 ; r2*wre mova m6, Z(6) ; r3 mulps m3, m1 ; i2*wim mova m7, Z(7) ; i3 mulps m4, m1 ; r2*wim mulps m5, m0 ; i2*wre addps m2, m3 ; r2*wre + i2*wim mova m3, m1 mulps m1, m6 ; r3*wim subps m5, m4 ; i2*wre - r2*wim mova m4, m0 mulps m3, m7 ; i3*wim mulps m4, m6 ; r3*wre mulps m0, m7 ; i3*wre subps m4, m3 ; r3*wre - i3*wim mova m3, Z(0) addps m0, m1 ; i3*wre + r3*wim mova m1, m4 addps m4, m2 ; t5 subps m1, m2 ; t3 subps m3, m4 ; r2 addps m4, Z(0) ; r0 mova m6, Z(2) mova Z(4), m3 mova Z(0), m4 mova m3, m5 subps m5, m0 ; t4 mova m4, m6 subps m6, m5 ; r3 addps m5, m4 ; r1 IF%1 mova Z(6), m6 IF%1 mova Z(2), m5 mova m2, Z(3) addps m3, m0 ; t6 subps m2, m1 ; i3 mova m7, Z(1) addps m1, Z(3) ; i1 IF%1 mova Z(7), m2 IF%1 mova Z(3), m1 mova m4, m7 subps m7, m3 ; i2 addps m3, m4 ; i0 IF%1 mova Z(5), m7 IF%1 mova Z(1), m3 %if %1==0 mova m4, m5 ; r1 mova m0, m6 ; r3 unpcklps m5, m1 unpckhps m4, m1 unpcklps m6, m2 unpckhps m0, m2 mova m1, Z(0) mova m2, Z(4) mova Z(2), m5 mova Z(3), m4 mova Z(6), m6 mova Z(7), m0 mova m5, m1 ; r0 mova m4, m2 ; r2 unpcklps m1, m3 unpckhps m5, m3 unpcklps m2, m7 unpckhps m4, m7 mova Z(0), m1 mova Z(1), m5 mova Z(4), m2 mova Z(5), m4 %endif %endmacro %macro PUNPCK 3 mova %3, %1 punpckldq %1, %2 punpckhdq %3, %2 %endmacro INIT_XMM %define mova movaps %define Z(x) [r0+mmsize*x] align 16 fft4_sse: mova m0, Z(0) mova m1, Z(1) T4_SSE m0, m1, m2 mova Z(0), m0 mova Z(1), m1 ret align 16 fft8_sse: mova m0, Z(0) mova m1, Z(1) T4_SSE m0, m1, m2 mova m2, Z(2) mova m3, Z(3) T8_SSE m0, m1, m2, m3, m4, m5 mova Z(0), m0 mova Z(1), m1 mova Z(2), m2 mova Z(3), m3 ret align 16 fft16_sse: mova m0, Z(0) mova m1, Z(1) T4_SSE m0, m1, m2 mova m2, Z(2) mova m3, Z(3) T8_SSE m0, m1, m2, m3, m4, m5 mova m4, Z(4) mova m5, Z(5) mova Z(0), m0 mova Z(1), m1 mova Z(2), m2 mova Z(3), m3 T4_SSE m4, m5, m6 mova m6, Z(6) mova m7, Z(7) T4_SSE m6, m7, m0 PASS_SMALL 0, [ff_cos_16 GLOBAL], [ff_cos_16+16 GLOBAL] ret INIT_MMX %macro FFT48_3DN 1 align 16 fft4%1: T2_3DN m0, m1, Z(0), Z(1) mova m2, Z(2) mova m3, Z(3) T4_3DN m0, m1, m2, m3, m4, m5 PUNPCK m0, m1, m4 PUNPCK m2, m3, m5 mova Z(0), m0 mova Z(1), m4 mova Z(2), m2 mova Z(3), m5 ret align 16 fft8%1: T2_3DN m0, m1, Z(0), Z(1) mova m2, Z(2) mova m3, Z(3) T4_3DN m0, m1, m2, m3, m4, m5 mova Z(0), m0 mova Z(2), m2 T2_3DN m4, m5, Z(4), Z(5) T2_3DN m6, m7, Z(6), Z(7) pswapd m0, m5 pswapd m2, m7 pxor m0, [ps_m1p1 GLOBAL] pxor m2, [ps_m1p1 GLOBAL] pfsub m5, m0 pfadd m7, m2 pfmul m5, [ps_root2 GLOBAL] pfmul m7, [ps_root2 GLOBAL] T4_3DN m1, m3, m5, m7, m0, m2 mova Z(5), m5 mova Z(7), m7 mova m0, Z(0) mova m2, Z(2) T4_3DN m0, m2, m4, m6, m5, m7 PUNPCK m0, m1, m5 PUNPCK m2, m3, m7 mova Z(0), m0 mova Z(1), m5 mova Z(2), m2 mova Z(3), m7 PUNPCK m4, Z(5), m5 PUNPCK m6, Z(7), m7 mova Z(4), m4 mova Z(5), m5 mova Z(6), m6 mova Z(7), m7 ret %endmacro FFT48_3DN _3dn2 %macro pswapd 2 %ifidn %1, %2 movd [r0+12], %1 punpckhdq %1, [r0+8] %else movq %1, %2 psrlq %1, 32 punpckldq %1, %2 %endif %endmacro FFT48_3DN _3dn %define Z(x) [zq + o1q*(x&6)*((x/6)^1) + o3q*(x/6) + mmsize*(x&1)] %macro DECL_PASS 2+ ; name, payload align 16 %1: DEFINE_ARGS z, w, n, o1, o3 lea o3q, [nq*3] lea o1q, [nq*8] shl o3q, 4 .loop: %2 add zq, mmsize*2 add wq, mmsize sub nd, mmsize/8 jg .loop rep ret %endmacro INIT_XMM %define mova movaps DECL_PASS pass_sse, PASS_BIG 1 DECL_PASS pass_interleave_sse, PASS_BIG 0 INIT_MMX %define mulps pfmul %define addps pfadd %define subps pfsub %define unpcklps punpckldq %define unpckhps punpckhdq DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q] DECL_PASS pass_interleave_3dn, PASS_BIG 0 %define pass_3dn2 pass_3dn %define pass_interleave_3dn2 pass_interleave_3dn %ifdef PIC %define SECTION_REL - $$ %else %define SECTION_REL %endif %macro DECL_FFT 2-3 ; nbits, cpu, suffix %xdefine list_of_fft fft4%2 SECTION_REL, fft8%2 SECTION_REL %if %1==5 %xdefine list_of_fft list_of_fft, fft16%2 SECTION_REL %endif %assign n 1<<%1 %rep 17-%1 %assign n2 n/2 %assign n4 n/4 %xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2 SECTION_REL align 16 fft %+ n %+ %3%2: call fft %+ n2 %+ %2 add r0, n*4 - (n&(-2<<%1)) call fft %+ n4 %+ %2 add r0, n*2 - (n2&(-2<<%1)) call fft %+ n4 %+ %2 sub r0, n*6 + (n2&(-2<<%1)) lea r1, [ff_cos_ %+ n GLOBAL] mov r2d, n4/2 jmp pass%3%2 %assign n n*2 %endrep %undef n align 8 dispatch_tab%3%2: pointer list_of_fft section .text ; On x86_32, this function does the register saving and restoring for all of fft. ; The others pass args in registers and don't spill anything. cglobal fft_dispatch%3%2, 2,5,8, z, nbits lea r2, [dispatch_tab%3%2 GLOBAL] mov r2, [r2 + (nbitsq-2)*gprsize] %ifdef PIC lea r3, [$$ GLOBAL] add r2, r3 %endif call r2 RET %endmacro ; DECL_FFT DECL_FFT 5, _sse DECL_FFT 5, _sse, _interleave DECL_FFT 4, _3dn DECL_FFT 4, _3dn, _interleave DECL_FFT 4, _3dn2 DECL_FFT 4, _3dn2, _interleave
123linslouis-android-video-cutter
jni/libavcodec/x86/fft_mmx.asm
Assembly
asf20
11,263
/* * MMX optimized motion estimation * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer * * mostly by Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={ 0x0000000000000000ULL, 0x0001000100010001ULL, 0x0002000200020002ULL, }; DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL; static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq (%2, %%"REG_a"), %%mm2 \n\t" "movq (%2, %%"REG_a"), %%mm4 \n\t" "add %3, %%"REG_a" \n\t" "psubusb %%mm0, %%mm2 \n\t" "psubusb %%mm4, %%mm0 \n\t" "movq (%1, %%"REG_a"), %%mm1 \n\t" "movq (%2, %%"REG_a"), %%mm3 \n\t" "movq (%2, %%"REG_a"), %%mm5 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm5, %%mm1 \n\t" "por %%mm2, %%mm0 \n\t" "por %%mm1, %%mm3 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm3, %%mm2 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm3 \n\t" "punpckhbw %%mm7, %%mm2 \n\t" "paddw %%mm1, %%mm0 \n\t" "paddw %%mm3, %%mm2 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm0, %%mm6 \n\t" "add %3, %%"REG_a" \n\t" " js 1b \n\t" : "+a" (len) : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride) ); } static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "psadbw (%2), %%mm0 \n\t" "psadbw (%2, %3), %%mm1 \n\t" "paddw %%mm0, %%mm6 \n\t" "paddw %%mm1, %%mm6 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%2,%3,2), %2 \n\t" "sub $2, %0 \n\t" " jg 1b \n\t" : "+r" (h), "+r" (blk1), "+r" (blk2) : "r" ((x86_reg)stride) ); } static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) { int ret; __asm__ volatile( "pxor %%xmm6, %%xmm6 \n\t" ASMALIGN(4) "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1, %3), %%xmm1 \n\t" "psadbw (%2), %%xmm0 \n\t" "psadbw (%2, %3), %%xmm1 \n\t" "paddw %%xmm0, %%xmm6 \n\t" "paddw %%xmm1, %%xmm6 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%2,%3,2), %2 \n\t" "sub $2, %0 \n\t" " jg 1b \n\t" : "+r" (h), "+r" (blk1), "+r" (blk2) : "r" ((x86_reg)stride) ); __asm__ volatile( "movhlps %%xmm6, %%xmm0 \n\t" "paddw %%xmm0, %%xmm6 \n\t" "movd %%xmm6, %0 \n\t" : "=r"(ret) ); return ret; } static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "pavgb 1(%1), %%mm0 \n\t" "pavgb 1(%1, %3), %%mm1 \n\t" "psadbw (%2), %%mm0 \n\t" "psadbw (%2, %3), %%mm1 \n\t" "paddw %%mm0, %%mm6 \n\t" "paddw %%mm1, %%mm6 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%2,%3,2), %2 \n\t" "sub $2, %0 \n\t" " jg 1b \n\t" : "+r" (h), "+r" (blk1), "+r" (blk2) : "r" ((x86_reg)stride) ); } static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( "movq (%1), %%mm0 \n\t" "add %3, %1 \n\t" ASMALIGN(4) "1: \n\t" "movq (%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" "pavgb %%mm1, %%mm0 \n\t" "pavgb %%mm2, %%mm1 \n\t" "psadbw (%2), %%mm0 \n\t" "psadbw (%2, %3), %%mm1 \n\t" "paddw %%mm0, %%mm6 \n\t" "paddw %%mm1, %%mm6 \n\t" "movq %%mm2, %%mm0 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%2,%3,2), %2 \n\t" "sub $2, %0 \n\t" " jg 1b \n\t" : "+r" (h), "+r" (blk1), "+r" (blk2) : "r" ((x86_reg)stride) ); } static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { __asm__ volatile( "movq "MANGLE(bone)", %%mm5 \n\t" "movq (%1), %%mm0 \n\t" "pavgb 1(%1), %%mm0 \n\t" "add %3, %1 \n\t" ASMALIGN(4) "1: \n\t" "movq (%1), %%mm1 \n\t" "movq (%1,%3), %%mm2 \n\t" "pavgb 1(%1), %%mm1 \n\t" "pavgb 1(%1,%3), %%mm2 \n\t" "psubusb %%mm5, %%mm1 \n\t" "pavgb %%mm1, %%mm0 \n\t" "pavgb %%mm2, %%mm1 \n\t" "psadbw (%2), %%mm0 \n\t" "psadbw (%2,%3), %%mm1 \n\t" "paddw %%mm0, %%mm6 \n\t" "paddw %%mm1, %%mm6 \n\t" "movq %%mm2, %%mm0 \n\t" "lea (%1,%3,2), %1 \n\t" "lea (%2,%3,2), %2 \n\t" "sub $2, %0 \n\t" " jg 1b \n\t" : "+r" (h), "+r" (blk1), "+r" (blk2) : "r" ((x86_reg)stride) ); } static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); __asm__ volatile( ASMALIGN(4) "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq (%2, %%"REG_a"), %%mm1 \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" "movq (%2, %%"REG_a"), %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddw %%mm0, %%mm1 \n\t" "paddw %%mm2, %%mm3 \n\t" "movq (%3, %%"REG_a"), %%mm4 \n\t" "movq (%3, %%"REG_a"), %%mm2 \n\t" "paddw %%mm5, %%mm1 \n\t" "paddw %%mm5, %%mm3 \n\t" "psrlw $1, %%mm1 \n\t" "psrlw $1, %%mm3 \n\t" "packuswb %%mm3, %%mm1 \n\t" "psubusb %%mm1, %%mm4 \n\t" "psubusb %%mm2, %%mm1 \n\t" "por %%mm4, %%mm1 \n\t" "movq %%mm1, %%mm0 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "paddw %%mm1, %%mm0 \n\t" "paddw %%mm0, %%mm6 \n\t" "add %4, %%"REG_a" \n\t" " js 1b \n\t" : "+a" (len) : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride) ); } static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { x86_reg len= -(stride*h); __asm__ volatile( "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm3, %%mm1 \n\t" ASMALIGN(4) "1: \n\t" "movq (%2, %%"REG_a"), %%mm2 \n\t" "movq 1(%2, %%"REG_a"), %%mm4 \n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddw %%mm4, %%mm2 \n\t" "paddw %%mm5, %%mm3 \n\t" "movq 16+"MANGLE(round_tab)", %%mm5 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm3, %%mm1 \n\t" "paddw %%mm5, %%mm0 \n\t" "paddw %%mm5, %%mm1 \n\t" "movq (%3, %%"REG_a"), %%mm4 \n\t" "movq (%3, %%"REG_a"), %%mm5 \n\t" "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" "psubusb %%mm0, %%mm4 \n\t" "psubusb %%mm5, %%mm0 \n\t" "por %%mm4, %%mm0 \n\t" "movq %%mm0, %%mm4 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpckhbw %%mm7, %%mm4 \n\t" "paddw %%mm0, %%mm6 \n\t" "paddw %%mm4, %%mm6 \n\t" "movq %%mm2, %%mm0 \n\t" "movq %%mm3, %%mm1 \n\t" "add %4, %%"REG_a" \n\t" " js 1b \n\t" : "+a" (len) : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride) ); } static inline int sum_mmx(void) { int ret; __asm__ volatile( "movq %%mm6, %%mm0 \n\t" "psrlq $32, %%mm6 \n\t" "paddw %%mm0, %%mm6 \n\t" "movq %%mm6, %%mm0 \n\t" "psrlq $16, %%mm6 \n\t" "paddw %%mm0, %%mm6 \n\t" "movd %%mm6, %0 \n\t" : "=r" (ret) ); return ret&0xFFFF; } static inline int sum_mmx2(void) { int ret; __asm__ volatile( "movd %%mm6, %0 \n\t" : "=r" (ret) ); return ret; } static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { sad8_2_mmx(blk1, blk1+1, blk2, stride, h); } static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { sad8_2_mmx(blk1, blk1+stride, blk2, stride, h); } #define PIX_SAD(suf)\ static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t":);\ \ sad8_1_ ## suf(blk1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ );\ \ sad8_x2a_ ## suf(blk1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ \ static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ );\ \ sad8_y2a_ ## suf(blk1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ \ static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ assert(h==8);\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ ::);\ \ sad8_4_ ## suf(blk1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ \ static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t":);\ \ sad8_1_ ## suf(blk1 , blk2 , stride, h);\ sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ );\ \ sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\ sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ );\ \ sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\ sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ ::);\ \ sad8_4_ ## suf(blk1 , blk2 , stride, h);\ sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ PIX_SAD(mmx) PIX_SAD(mmx2) void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) { if (mm_flags & FF_MM_MMX) { c->pix_abs[0][0] = sad16_mmx; c->pix_abs[0][1] = sad16_x2_mmx; c->pix_abs[0][2] = sad16_y2_mmx; c->pix_abs[0][3] = sad16_xy2_mmx; c->pix_abs[1][0] = sad8_mmx; c->pix_abs[1][1] = sad8_x2_mmx; c->pix_abs[1][2] = sad8_y2_mmx; c->pix_abs[1][3] = sad8_xy2_mmx; c->sad[0]= sad16_mmx; c->sad[1]= sad8_mmx; } if (mm_flags & FF_MM_MMX2) { c->pix_abs[0][0] = sad16_mmx2; c->pix_abs[1][0] = sad8_mmx2; c->sad[0]= sad16_mmx2; c->sad[1]= sad8_mmx2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->pix_abs[0][1] = sad16_x2_mmx2; c->pix_abs[0][2] = sad16_y2_mmx2; c->pix_abs[0][3] = sad16_xy2_mmx2; c->pix_abs[1][1] = sad8_x2_mmx2; c->pix_abs[1][2] = sad8_y2_mmx2; c->pix_abs[1][3] = sad8_xy2_mmx2; } } if ((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) { c->sad[0]= sad16_sse2; } }
123linslouis-android-video-cutter
jni/libavcodec/x86/motion_est_mmx.c
C
asf20
16,103
/* * Cinepak Video Decoder * Copyright (C) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Cinepak video decoder * by Ewald Snel <ewald@rambo.its.tudelft.nl> * For more information on the Cinepak algorithm, visit: * http://www.csse.monash.edu.au/~timf/ * For more information on the quirky data inside Sega FILM/CPK files, visit: * http://wiki.multimedia.cx/index.php?title=Sega_FILM */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct { uint8_t y0, y1, y2, y3; uint8_t u, v; } cvid_codebook; #define MAX_STRIPS 32 typedef struct { uint16_t id; uint16_t x1, y1; uint16_t x2, y2; cvid_codebook v4_codebook[256]; cvid_codebook v1_codebook[256]; } cvid_strip; typedef struct CinepakContext { AVCodecContext *avctx; AVFrame frame; const unsigned char *data; int size; int width, height; int palette_video; cvid_strip strips[MAX_STRIPS]; int sega_film_skip_bytes; } CinepakContext; static void cinepak_decode_codebook (cvid_codebook *codebook, int chunk_id, int size, const uint8_t *data) { const uint8_t *eod = (data + size); uint32_t flag, mask; int i, n; /* check if this chunk contains 4- or 6-element vectors */ n = (chunk_id & 0x04) ? 4 : 6; flag = 0; mask = 0; for (i=0; i < 256; i++) { if ((chunk_id & 0x01) && !(mask >>= 1)) { if ((data + 4) > eod) break; flag = AV_RB32 (data); data += 4; mask = 0x80000000; } if (!(chunk_id & 0x01) || (flag & mask)) { if ((data + n) > eod) break; if (n == 6) { codebook[i].y0 = *data++; codebook[i].y1 = *data++; codebook[i].y2 = *data++; codebook[i].y3 = *data++; codebook[i].u = 128 + *data++; codebook[i].v = 128 + *data++; } else { /* this codebook type indicates either greyscale or * palettized video; if palettized, U & V components will * not be used so it is safe to set them to 128 for the * benefit of greyscale rendering in YUV420P */ codebook[i].y0 = *data++; codebook[i].y1 = *data++; codebook[i].y2 = *data++; codebook[i].y3 = *data++; codebook[i].u = 128; codebook[i].v = 128; } } } } static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip, int chunk_id, int size, const uint8_t *data) { const uint8_t *eod = (data + size); uint32_t flag, mask; cvid_codebook *codebook; unsigned int x, y; uint32_t iy[4]; uint32_t iu[2]; uint32_t iv[2]; flag = 0; mask = 0; for (y=strip->y1; y < strip->y2; y+=4) { iy[0] = strip->x1 + (y * s->frame.linesize[0]); iy[1] = iy[0] + s->frame.linesize[0]; iy[2] = iy[1] + s->frame.linesize[0]; iy[3] = iy[2] + s->frame.linesize[0]; iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]); iu[1] = iu[0] + s->frame.linesize[1]; iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]); iv[1] = iv[0] + s->frame.linesize[2]; for (x=strip->x1; x < strip->x2; x+=4) { if ((chunk_id & 0x01) && !(mask >>= 1)) { if ((data + 4) > eod) return -1; flag = AV_RB32 (data); data += 4; mask = 0x80000000; } if (!(chunk_id & 0x01) || (flag & mask)) { if (!(chunk_id & 0x02) && !(mask >>= 1)) { if ((data + 4) > eod) return -1; flag = AV_RB32 (data); data += 4; mask = 0x80000000; } if ((chunk_id & 0x02) || (~flag & mask)) { if (data >= eod) return -1; codebook = &strip->v1_codebook[*data++]; s->frame.data[0][iy[0] + 0] = codebook->y0; s->frame.data[0][iy[0] + 1] = codebook->y0; s->frame.data[0][iy[1] + 0] = codebook->y0; s->frame.data[0][iy[1] + 1] = codebook->y0; if (!s->palette_video) { s->frame.data[1][iu[0]] = codebook->u; s->frame.data[2][iv[0]] = codebook->v; } s->frame.data[0][iy[0] + 2] = codebook->y1; s->frame.data[0][iy[0] + 3] = codebook->y1; s->frame.data[0][iy[1] + 2] = codebook->y1; s->frame.data[0][iy[1] + 3] = codebook->y1; if (!s->palette_video) { s->frame.data[1][iu[0] + 1] = codebook->u; s->frame.data[2][iv[0] + 1] = codebook->v; } s->frame.data[0][iy[2] + 0] = codebook->y2; s->frame.data[0][iy[2] + 1] = codebook->y2; s->frame.data[0][iy[3] + 0] = codebook->y2; s->frame.data[0][iy[3] + 1] = codebook->y2; if (!s->palette_video) { s->frame.data[1][iu[1]] = codebook->u; s->frame.data[2][iv[1]] = codebook->v; } s->frame.data[0][iy[2] + 2] = codebook->y3; s->frame.data[0][iy[2] + 3] = codebook->y3; s->frame.data[0][iy[3] + 2] = codebook->y3; s->frame.data[0][iy[3] + 3] = codebook->y3; if (!s->palette_video) { s->frame.data[1][iu[1] + 1] = codebook->u; s->frame.data[2][iv[1] + 1] = codebook->v; } } else if (flag & mask) { if ((data + 4) > eod) return -1; codebook = &strip->v4_codebook[*data++]; s->frame.data[0][iy[0] + 0] = codebook->y0; s->frame.data[0][iy[0] + 1] = codebook->y1; s->frame.data[0][iy[1] + 0] = codebook->y2; s->frame.data[0][iy[1] + 1] = codebook->y3; if (!s->palette_video) { s->frame.data[1][iu[0]] = codebook->u; s->frame.data[2][iv[0]] = codebook->v; } codebook = &strip->v4_codebook[*data++]; s->frame.data[0][iy[0] + 2] = codebook->y0; s->frame.data[0][iy[0] + 3] = codebook->y1; s->frame.data[0][iy[1] + 2] = codebook->y2; s->frame.data[0][iy[1] + 3] = codebook->y3; if (!s->palette_video) { s->frame.data[1][iu[0] + 1] = codebook->u; s->frame.data[2][iv[0] + 1] = codebook->v; } codebook = &strip->v4_codebook[*data++]; s->frame.data[0][iy[2] + 0] = codebook->y0; s->frame.data[0][iy[2] + 1] = codebook->y1; s->frame.data[0][iy[3] + 0] = codebook->y2; s->frame.data[0][iy[3] + 1] = codebook->y3; if (!s->palette_video) { s->frame.data[1][iu[1]] = codebook->u; s->frame.data[2][iv[1]] = codebook->v; } codebook = &strip->v4_codebook[*data++]; s->frame.data[0][iy[2] + 2] = codebook->y0; s->frame.data[0][iy[2] + 3] = codebook->y1; s->frame.data[0][iy[3] + 2] = codebook->y2; s->frame.data[0][iy[3] + 3] = codebook->y3; if (!s->palette_video) { s->frame.data[1][iu[1] + 1] = codebook->u; s->frame.data[2][iv[1] + 1] = codebook->v; } } } iy[0] += 4; iy[1] += 4; iy[2] += 4; iy[3] += 4; iu[0] += 2; iu[1] += 2; iv[0] += 2; iv[1] += 2; } } return 0; } static int cinepak_decode_strip (CinepakContext *s, cvid_strip *strip, const uint8_t *data, int size) { const uint8_t *eod = (data + size); int chunk_id, chunk_size; /* coordinate sanity checks */ if (strip->x1 >= s->width || strip->x2 > s->width || strip->y1 >= s->height || strip->y2 > s->height || strip->x1 >= strip->x2 || strip->y1 >= strip->y2) return -1; while ((data + 4) <= eod) { chunk_id = data[0]; chunk_size = AV_RB24 (&data[1]) - 4; if(chunk_size < 0) return -1; data += 4; chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size; switch (chunk_id) { case 0x20: case 0x21: case 0x24: case 0x25: cinepak_decode_codebook (strip->v4_codebook, chunk_id, chunk_size, data); break; case 0x22: case 0x23: case 0x26: case 0x27: cinepak_decode_codebook (strip->v1_codebook, chunk_id, chunk_size, data); break; case 0x30: case 0x31: case 0x32: return cinepak_decode_vectors (s, strip, chunk_id, chunk_size, data); } data += chunk_size; } return -1; } static int cinepak_decode (CinepakContext *s) { const uint8_t *eod = (s->data + s->size); int i, result, strip_size, frame_flags, num_strips; int y0 = 0; int encoded_buf_size; if (s->size < 10) return -1; frame_flags = s->data[0]; num_strips = AV_RB16 (&s->data[8]); encoded_buf_size = ((s->data[1] << 16) | AV_RB16 (&s->data[2])); /* if this is the first frame, check for deviant Sega FILM data */ if (s->sega_film_skip_bytes == -1) { if (encoded_buf_size != s->size) { /* If the encoded frame size differs from the frame size as indicated * by the container file, this data likely comes from a Sega FILM/CPK file. * If the frame header is followed by the bytes FE 00 00 06 00 00 then * this is probably one of the two known files that have 6 extra bytes * after the frame header. Else, assume 2 extra bytes. */ if ((s->data[10] == 0xFE) && (s->data[11] == 0x00) && (s->data[12] == 0x00) && (s->data[13] == 0x06) && (s->data[14] == 0x00) && (s->data[15] == 0x00)) s->sega_film_skip_bytes = 6; else s->sega_film_skip_bytes = 2; } else s->sega_film_skip_bytes = 0; } s->data += 10 + s->sega_film_skip_bytes; if (num_strips > MAX_STRIPS) num_strips = MAX_STRIPS; for (i=0; i < num_strips; i++) { if ((s->data + 12) > eod) return -1; s->strips[i].id = s->data[0]; s->strips[i].y1 = y0; s->strips[i].x1 = 0; s->strips[i].y2 = y0 + AV_RB16 (&s->data[8]); s->strips[i].x2 = s->avctx->width; strip_size = AV_RB24 (&s->data[1]) - 12; s->data += 12; strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size; if ((i > 0) && !(frame_flags & 0x01)) { memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook, sizeof(s->strips[i].v4_codebook)); memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook, sizeof(s->strips[i].v1_codebook)); } result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size); if (result != 0) return result; s->data += strip_size; y0 = s->strips[i].y2; } return 0; } static av_cold int cinepak_decode_init(AVCodecContext *avctx) { CinepakContext *s = avctx->priv_data; s->avctx = avctx; s->width = (avctx->width + 3) & ~3; s->height = (avctx->height + 3) & ~3; s->sega_film_skip_bytes = -1; /* uninitialized state */ // check for paletted data if ((avctx->palctrl == NULL) || (avctx->bits_per_coded_sample == 40)) { s->palette_video = 0; avctx->pix_fmt = PIX_FMT_YUV420P; } else { s->palette_video = 1; avctx->pix_fmt = PIX_FMT_PAL8; } s->frame.data[0] = NULL; return 0; } static int cinepak_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CinepakContext *s = avctx->priv_data; s->data = buf; s->size = buf_size; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame)) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } cinepak_decode(s); if (s->palette_video) { memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE); if (avctx->palctrl->palette_changed) { s->frame.palette_has_changed = 1; avctx->palctrl->palette_changed = 0; } else s->frame.palette_has_changed = 0; } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; /* report that the buffer was completely consumed */ return buf_size; } static av_cold int cinepak_decode_end(AVCodecContext *avctx) { CinepakContext *s = avctx->priv_data; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec cinepak_decoder = { "cinepak", AVMEDIA_TYPE_VIDEO, CODEC_ID_CINEPAK, sizeof(CinepakContext), cinepak_decode_init, NULL, cinepak_decode_end, cinepak_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Cinepak"), };
123linslouis-android-video-cutter
jni/libavcodec/cinepak.c
C
asf20
15,295
/* * Generate a header file for hardcoded motionpixels RGB to YUV table * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #define MAX_NEG_CROP 0 #define ff_cropTbl ((uint8_t *)NULL) #include "motionpixels_tablegen.h" #include "tableprint.h" int main(void) { motionpixels_tableinit(); write_fileheader(); printf("static const YuvPixel mp_rgb_yuv_table[1 << 15] = {\n"); write_int8_2d_array(mp_rgb_yuv_table, 1 << 15, 3); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/motionpixels_tablegen.c
C
asf20
1,315
/* * Real Audio 1.0 (14.4K) * Copyright (c) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_RA144_H #define AVCODEC_RA144_H #include <stdint.h> static const int16_t gain_val_tab[256][3] = { { 541, 956, 768}, { 877, 581, 568}, { 675,1574, 635}, {1248,1464, 668}, {1246, 839, 1394}, {2560,1386, 991}, { 925, 687, 608}, {2208, 797, 1144}, { 535, 832, 799}, { 762, 605, 1154}, { 832,1122, 1003}, {1180, 687, 1176}, {1292, 901, 732}, {1656, 689, 896}, {1750,1248, 848}, {2284, 942, 1022}, { 824,1472, 643}, { 517, 765, 512}, { 562,1816, 1522}, { 694,1826, 2700}, { 704, 524, 672}, {1442, 757, 2232}, { 884, 551, 1266}, {2232,1007, 1692}, { 932, 746, 777}, {1132, 822, 926}, {1226, 771, 611}, {2948,1342, 1008}, {1302, 594, 1158}, {1602, 636, 1128}, {3408, 910, 1438}, {1996, 614, 575}, { 665, 935, 628}, { 631,1192, 829}, { 644, 926, 1052}, { 879, 988, 1226}, { 941,2768, 2772}, { 565,1344, 2304}, { 547, 628, 740}, { 639, 532, 1074}, { 955,1208, 598}, {1124,1160, 900}, {1206, 899, 1242}, { 746, 533, 624}, {1458,1028, 735}, {1706,1102, 692}, {1898,1018, 1004}, {2176, 988, 735}, {1578, 782, 1642}, { 897, 516, 754}, {2068, 702, 1656}, {2344, 818, 1526}, { 907, 652, 592}, {1056, 652, 642}, {2124,1416, 780}, {2664,1250, 727}, {1894, 727, 1108}, {2196, 657, 981}, {4840, 920, 1704}, {4992,1238, 983}, {2420, 909, 1094}, {2760, 935, 1032}, {2800, 612, 853}, {3068, 832, 574}, { 523,1796, 923}, { 722,1916, 1382}, {1226,1542, 928}, { 758, 757, 584}, { 512,1134, 577}, { 615,1276, 698}, { 574,2568, 2356}, { 993,2728, 3512}, { 539, 890, 913}, { 694, 928, 1088}, { 805, 600, 1360}, {2160, 951, 3128}, { 816, 950, 590}, { 955, 847, 811}, {1094, 883, 556}, {1304, 888, 604}, { 863,1170, 855}, {1023, 997, 1032}, { 932,1228, 1280}, { 627, 564, 573}, { 876, 900, 1448}, {1030, 857, 1792}, {1294, 953, 1758}, {1612, 854, 1714}, {1090,1166, 631}, {1314,1202, 751}, {1480, 905, 795}, {1682,1016, 568}, {1494,1178, 983}, { 878, 613, 526}, {1728,1446, 779}, {2136,1348, 774}, { 950, 649, 939}, {1180, 703, 899}, {1236, 527, 1158}, {1450, 647, 972}, {1282, 647, 707}, {1460, 663, 644}, {1614, 572, 578}, {3516,1222, 821}, {2668, 729, 1682}, {3128, 585, 1502}, {3208, 733, 976}, {6800, 871, 1416}, {3480, 743, 1408}, {3764, 899, 1170}, {3772, 632, 875}, {4092, 732, 638}, {3112, 753, 2620}, {3372, 945, 1890}, {3768, 969, 2288}, {2016, 559, 854}, {1736, 729, 787}, {1940, 686, 547}, {2140, 635, 674}, {4480,1272, 828}, {3976, 592, 1666}, {4384, 621, 1388}, {4400, 801, 955}, {4656, 522, 646}, {4848, 625, 1636}, {4984, 591, 874}, {5352, 535, 1001}, {11216,938, 1184}, { 925,3280, 1476}, { 735,1580, 1088}, {1150,1576, 674}, { 655, 783, 528}, { 527,2052, 1354}, { 782,1704, 1880}, { 578, 910, 1026}, { 692, 882, 1468}, { 586, 683, 715}, { 739, 609, 717}, { 778, 773, 697}, { 922, 785, 813}, { 766, 651, 984}, { 978, 596, 1030}, {1070, 757, 1080}, {1324, 687, 1178}, {1108,2144, 979}, { 723, 982, 690}, { 936, 956, 527}, {1180,1002, 547}, { 517,1306, 825}, { 832,1184, 974}, {1024, 957, 903}, {1262,1090, 906}, {1028, 720, 649}, {1192, 679, 694}, {2468,1480, 979}, {2844,1370, 877}, {1310, 835, 848}, {1508, 839, 698}, {1742,1030, 769}, {1910, 852, 573}, {1280, 859, 1174}, {1584, 863, 1108}, {1686, 708, 1364}, {1942, 768, 1104}, { 891, 536, 690}, {1016, 560, 663}, {2172, 870, 1348}, {2404, 999, 1170}, {1890, 966, 889}, {2116, 912, 777}, {2296,1020, 714}, {4872,1844, 932}, {2392, 778, 929}, {2604, 772, 744}, {2764, 957, 722}, {5832,1532, 984}, {2188, 519, 1264}, {2332, 532, 922}, {5064, 995, 2412}, {2708, 571, 874}, {2408, 545, 666}, {5016,1084, 875}, {5376, 983, 1196}, {5536, 979, 730}, {5344, 634, 1744}, {5688, 706, 1348}, {5912, 977, 1190}, {6072, 905, 763}, {6048, 582, 1526}, {11968,1013,1816}, {12864,937, 1900}, {12560,1086, 998}, {1998, 684, 1884}, {2504, 633, 1992}, {1252, 567, 835}, {1478, 571, 973}, {2620, 769, 1414}, {2808, 952, 1142}, {2908, 712, 1028}, {2976, 686, 741}, {1462, 552, 714}, {3296, 991, 1452}, {1590, 615, 544}, {3480,1150, 824}, {3212, 832, 923}, {3276, 839, 531}, {3548, 786, 852}, {3732, 764, 570}, {5728, 906, 2616}, {6272, 804, 2252}, {3096, 535, 876}, {3228, 598, 649}, {6536, 759, 1436}, {6648, 993, 846}, {6864, 567, 1210},{14016,1012, 1302}, {3408, 548, 1098}, {7160,1008, 1742}, {7136,1000, 1182}, {7480,1032, 836}, {7448, 612, 1552}, {7744, 614, 816}, {8384, 777, 1438}, {8784, 694, 786}, { 882,1508, 1068}, { 597, 837, 766}, {1270, 954, 1408}, { 803, 550, 798}, {1398,1308, 798}, {1848,1534, 738}, { 970, 675, 608}, {1264, 706, 684}, {1716, 767, 1126}, {2108, 765, 1404}, {2236, 924, 1003}, {2472,1048, 611}, { 999, 942, 963}, {1094, 857, 935}, {2936, 926, 1138}, {1934, 746, 551}, {3336, 633, 1762}, {3764, 701, 1454}, {1890, 564, 636}, {4096,1126, 793}, {3936, 556, 1140}, {3936, 540, 740}, {4216, 764, 874}, {8480,1328, 1014}, {2184, 515, 1042}, {4432, 934, 1344}, {4784, 945, 1112}, {5016,1062, 733}, {9216,1020, 2028}, {9968, 924, 1188}, {5424, 909, 1206}, {6512, 744, 1086} }; static const uint8_t gain_exp_tab[256] = { 15, 15, 15, 15, 15, 16, 14, 15, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 14, 14, 13, 14, 13, 14, 13, 13, 13, 14, 13, 13, 14, 13, 13, 13, 13, 13, 14, 13, 12, 12, 13, 13, 13, 12, 13, 13, 13, 13, 13, 12, 13, 13, 12, 12, 13, 13, 13, 13, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 13, 13, 12, 12, 12, 13, 12, 12, 12, 12, 12, 12, 12, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 13, 12, 12, 11, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 11, 11, 11, 11, 12, 12, 12, 12, 11, 11, 12, 12, 12, 12, 12, 13, 12, 12, 12, 13, 12, 12, 13, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 12, 12, 11, 11, 12, 12, 12, 12, 11, 12, 11, 12, 12, 12, 12, 12, 13, 13, 12, 12, 13, 13, 13, 14, 12, 13, 13, 13, 13, 13, 13, 13, 11, 10, 11, 10, 11, 11, 10, 10, 11, 11, 11, 11, 10, 9, 11, 10, 12, 12, 11, 12, 12, 12, 12, 13, 11, 12, 12, 12, 13, 13, 12, 12 }; static const int8_t cb1_vects[128][40]={ { 38, -4, 15, -4, 14, -13, 12, -11, -2, -6, -6, -11, -45, -16, -11, -13, -7, 6, -12, 4, -20, 3, -16, 12, -1, 12, 46, 24, 0, 33, -3, 9, -12, -12, -8, -7, 17, -6, 0, -2, }, { 60, -16, 3, -22, 10, -32, 0, -28, -17, -18, -3, -25, -37, -23, -10, 3, 2, 3, 0, 3, -14, 0, -14, -1, 0, 2, 32, 9, -1, 25, 7, 13, -5, 13, 8, 1, 2, 8, -10, 6, }, { 27, -12, 28, -2, 6, -7, 15, 9, -11, 1, -13, -11, -40, 4, -29, -14, -19, -5, -23, -8, -30, -13, -17, 0, -14, 12, 34, 20, -2, 25, 2, -16, -4, -12, 15, 16, 29, 7, 24, 10, }, { 49, -24, 16, -20, 2, -26, 2, -7, -25, -10, -11, -25, -32, -3, -27, 2, -8, -8, -11, -9, -24, -17, -16, -14, -13, 2, 20, 5, -4, 17, 14, -12, 3, 13, 33, 25, 14, 23, 15, 19, }, { 46, -6, 21, 8, -2, -16, -5, -8, -11, 4, 8, 15, -24, 4, -2, -26, -3, -16, -16, -14, -9, -2, -1, 4, 19, 7, 36, 17, 9, 13, 0, 31, -5, -12, 7, -8, 11, -15, -13, -4, }, { 68, -18, 9, -9, -6, -35, -18, -25, -26, -7, 10, 1, -16, -3, -1, -9, 6, -19, -4, -15, -4, -6, 0, -8, 20, -2, 23, 2, 7, 5, 12, 35, 1, 13, 24, 0, -3, 0, -22, 4, }, { 35, -14, 34, 10, -10, -10, -1, 12, -20, 12, 0, 15, -18, 24, -20, -27, -14, -28, -27, -27, -20, -19, -2, -8, 5, 7, 25, 13, 5, 5, 6, 5, 2, -12, 31, 15, 23, -1, 12, 8, }, { 57, -26, 22, -7, -14, -28, -14, -3, -35, 0, 3, 1, -11, 16, -18, -10, -4, -31, -15, -28, -14, -23, -1, -21, 7, -2, 11, -1, 3, -1, 18, 9, 10, 13, 49, 24, 8, 14, 2, 16, }, { 25, 15, 22, 11, 18, 4, 15, -22, 8, -2, -17, -9, -48, -20, -30, -17, -16, 11, -1, 16, 2, 10, -5, 26, -2, -4, 22, 0, 2, 10, -6, 13, -14, 10, -23, 0, 10, -2, 1, 0, }, { 47, 3, 11, -6, 15, -13, 2, -38, -6, -13, -15, -22, -40, -28, -28, 0, -5, 8, 10, 15, 7, 7, -4, 13, -1, -14, 9, -14, 0, 2, 4, 18, -7, 36, -6, 8, -3, 13, -7, 8, }, { 14, 7, 36, 13, 10, 10, 18, 0, 0, 5, -25, -8, -43, 0, -48, -18, -27, 0, -12, 3, -7, -6, -7, 13, -15, -5, 11, -3, 0, 2, 0, -12, -6, 10, 0, 23, 22, 11, 26, 12, }, { 36, -5, 24, -4, 7, -7, 6, -17, -14, -5, -22, -22, -35, -8, -46, -1, -17, -3, 0, 2, -2, -10, -5, 0, -14, -15, -2, -18, -2, -4, 11, -7, 1, 36, 18, 32, 7, 27, 17, 20, }, { 33, 13, 29, 24, 1, 1, -2, -18, 0, 9, -3, 17, -27, 0, -21, -30, -12, -11, -5, -2, 12, 4, 9, 19, 18, -9, 13, -6, 11, -8, -2, 35, -8, 10, -7, -1, 4, -11, -10, -2, }, { 55, 1, 17, 6, -1, -16, -15, -35, -15, -2, 0, 4, -19, -8, -20, -13, -1, -14, 7, -3, 18, 0, 10, 5, 19, -19, 0, -21, 8, -16, 9, 39, 0, 36, 10, 7, -9, 4, -20, 5, }, { 22, 5, 42, 26, -6, 8, 1, 2, -9, 17, -10, 18, -21, 19, -39, -31, -23, -23, -16, -15, 2, -12, 7, 6, 5, -9, 1, -10, 7, -16, 4, 9, 0, 10, 17, 22, 16, 2, 14, 9, }, { 44, -6, 30, 8, -9, -10, -11, -14, -23, 5, -8, 4, -14, 12, -37, -14, -12, -26, -4, -16, 8, -16, 9, -7, 6, -19, -12, -25, 5, -24, 15, 13, 8, 36, 34, 31, 1, 18, 4, 18, }, { -3, -5, -9, -7, 15, -1, 5, 13, 2, 12, 5, 2, -21, -23, -2, -16, 0, 5, -6, 13, -23, 3, -32, 10, -15, 8, 44, 28, 9, 37, -2, 13, -9, -15, -12, -27, -7, -12, 0, -11, }, { 18, -17, -21, -25, 11, -19, -6, -3, -11, 0, 7, -11, -13, -31, -1, 0, 9, 1, 5, 12, -18, 0, -31, -2, -13, -1, 30, 14, 7, 29, 9, 18, -1, 10, 4, -18, -22, 3, -10, -2, }, { -13, -13, 3, -5, 7, 4, 9, 34, -5, 20, -2, 3, -16, -3, -20, -17, -11, -7, -17, 0, -34, -13, -33, -2, -28, 8, 32, 24, 5, 29, 3, -12, 0, -15, 11, -3, 3, 2, 24, 1, }, { 8, -25, -8, -23, 3, -13, -3, 17, -20, 8, 0, -10, -8, -11, -18, 0, -1, -10, -5, 0, -28, -17, -32, -15, -26, -1, 19, 9, 3, 21, 15, -7, 6, 9, 29, 5, -10, 17, 15, 9, }, { 4, -6, -3, 5, -1, -4, -11, 16, -6, 23, 19, 29, 0, -3, 6, -30, 3, -17, -10, -5, -13, -2, -17, 3, 5, 3, 35, 21, 17, 17, 2, 35, -2, -15, 3, -28, -13, -21, -13, -13, }, { 26, -19, -15, -12, -5, -22, -24, 0, -21, 12, 21, 15, 8, -11, 7, -12, 14, -20, 2, -6, -7, -6, -16, -9, 6, -5, 21, 7, 15, 10, 13, 39, 5, 10, 20, -19, -28, -5, -22, -5, }, { -5, -15, 9, 7, -9, 2, -8, 37, -14, 31, 11, 29, 5, 16, -11, -30, -7, -29, -21, -18, -23, -19, -18, -9, -7, 3, 23, 17, 14, 9, 8, 9, 6, -15, 27, -4, -2, -6, 12, -1, }, { 16, -27, -2, -10, -13, -16, -20, 20, -29, 20, 14, 16, 13, 8, -9, -13, 2, -33, -9, -19, -17, -23, -17, -22, -6, -6, 9, 2, 12, 2, 20, 13, 13, 10, 45, 4, -16, 8, 2, 7, }, { -16, 14, -2, 8, 20, 17, 9, 2, 14, 16, -6, 5, -24, -28, -21, -20, -8, 9, 4, 25, -1, 11, -22, 24, -15, -8, 21, 5, 11, 14, -5, 18, -11, 7, -27, -20, -14, -7, 1, -9, }, { 6, 2, -14, -9, 16, -1, -3, -14, 0, 5, -3, -8, -16, -36, -19, -3, 1, 6, 17, 24, 4, 7, -21, 11, -14, -18, 7, -9, 9, 7, 6, 22, -3, 33, -10, -11, -28, 7, -7, 0, }, { -26, 6, 11, 10, 12, 23, 12, 23, 5, 24, -13, 5, -19, -8, -38, -21, -20, -2, -6, 12, -11, -5, -23, 11, -29, -9, 9, 0, 7, 6, 1, -7, -2, 7, -3, 3, -2, 6, 27, 3, }, { -4, -6, 0, -7, 8, 4, 0, 6, -9, 13, -11, -7, -11, -15, -37, -4, -9, -5, 5, 11, -5, -9, -22, -1, -27, -18, -4, -14, 5, 0, 12, -3, 4, 32, 14, 12, -17, 22, 17, 11, }, { -8, 12, 3, 21, 3, 14, -8, 5, 4, 28, 7, 32, -2, -8, -12, -34, -4, -12, 1, 6, 9, 4, -7, 17, 4, -13, 11, -1, 19, -4, 0, 39, -4, 7, -11, -21, -20, -16, -10, -11, }, { 13, 0, -8, 3, 0, -4, -21, -11, -9, 16, 10, 18, 5, -16, -10, -16, 5, -15, 13, 5, 15, 1, -6, 4, 6, -23, -2, -16, 17, -12, 10, 44, 3, 33, 6, -12, -34, -1, -20, -3, }, { -18, 4, 17, 23, -4, 20, -4, 26, -3, 36, 0, 32, 2, 12, -29, -34, -16, -24, -10, -6, 0, -12, -8, 4, -8, -13, 0, -6, 16, -12, 5, 13, 3, 7, 13, 3, -8, -2, 14, 0, }, { 3, -7, 5, 5, -8, 2, -17, 9, -18, 24, 2, 19, 10, 4, -28, -17, -5, -28, 2, -7, 4, -15, -7, -8, -6, -23, -13, -21, 14, -20, 17, 18, 11, 33, 30, 11, -23, 13, 5, 9, }, { 60, 10, 7, -1, 9, -8, 6, -13, 2, -15, -1, -10, -13, -11, 15, 0, 6, 9, -1, 0, -13, 1, -11, -3, -13, 21, 13, 26, -7, 31, -10, -7, -16, -33, -31, -10, 22, -8, 1, -2, }, { 82, -1, -4, -19, 6, -27, -6, -29, -12, -26, 1, -24, -5, -18, 17, 17, 17, 6, 10, 0, -7, -2, -9, -16, -12, 11, 0, 11, -9, 23, 0, -3, -8, -8, -13, -1, 8, 7, -7, 6, }, { 49, 2, 21, 0, 1, -2, 9, 8, -6, -6, -8, -10, -8, 9, -2, 0, -4, -2, -13, -12, -23, -15, -12, -16, -26, 21, 2, 21, -11, 23, -4, -33, -7, -33, -6, 13, 34, 5, 27, 10, }, { 71, -10, 9, -17, -1, -20, -3, -8, -21, -18, -6, -24, 0, 1, 0, 16, 6, -5, 0, -13, -17, -19, -11, -29, -25, 11, -11, 6, -13, 15, 7, -29, 0, -8, 11, 22, 20, 21, 17, 18, }, { 67, 8, 14, 11, -7, -11, -11, -9, -7, -3, 13, 16, 8, 9, 24, -12, 10, -13, -5, -17, -2, -4, 3, -10, 6, 17, 4, 19, 0, 11, -6, 13, -9, -33, -14, -10, 16, -17, -10, -4, }, { 90, -3, 2, -6, -10, -29, -24, -26, -21, -15, 15, 2, 16, 1, 25, 4, 21, -16, 6, -18, 3, -8, 5, -24, 8, 7, -9, 4, -1, 3, 5, 18, -1, -7, 2, -1, 2, -1, -19, 3, }, { 57, 0, 27, 13, -14, -5, -7, 11, -15, 4, 5, 16, 13, 29, 6, -13, 0, -25, -16, -31, -12, -22, 2, -23, -6, 16, -7, 14, -2, 3, 0, -12, 0, -33, 9, 13, 28, -3, 14, 7, }, { 79, -11, 15, -4, -18, -23, -20, -5, -30, -7, 7, 2, 21, 21, 8, 3, 10, -28, -4, -31, -6, -25, 3, -37, -4, 7, -20, 0, -4, -4, 11, -7, 6, -8, 27, 22, 14, 12, 5, 16, }, { 47, 30, 15, 14, 14, 9, 9, -23, 13, -10, -12, -7, -16, -15, -3, -3, -1, 14, 9, 12, 9, 8, 0, 10, -14, 4, -9, 2, -5, 8, -13, -3, -18, -10, -45, -3, 16, -4, 4, 0, }, { 69, 17, 3, -3, 10, -8, -3, -40, -1, -21, -10, -21, -8, -23, -1, 13, 8, 11, 21, 11, 15, 4, 0, -2, -13, -5, -23, -12, -7, 0, -1, 0, -10, 14, -28, 5, 1, 11, -5, 7, }, { 36, 21, 28, 16, 6, 16, 12, -2, 4, -2, -20, -7, -11, 4, -20, -4, -12, 2, -1, 0, 0, -8, -2, -2, -27, 4, -21, -2, -9, 0, -6, -29, -9, -10, -21, 21, 28, 10, 29, 11, }, { 58, 9, 16, -1, 2, -2, 0, -19, -10, -13, -17, -21, -3, -3, -19, 12, -2, 0, 10, -1, 5, -12, 0, -15, -26, -5, -34, -16, -11, -7, 4, -25, -2, 14, -3, 29, 13, 25, 20, 20, }, { 55, 28, 21, 27, -2, 7, -8, -20, 4, 1, 1, 18, 5, 4, 5, -16, 2, -8, 5, -5, 19, 2, 14, 3, 6, 0, -18, -4, 2, -11, -8, 18, -11, -10, -29, -3, 10, -13, -8, -3, }, { 77, 16, 9, 9, -6, -11, -21, -37, -10, -10, 4, 5, 13, -3, 7, 0, 13, -11, 17, -6, 25, -1, 15, -9, 7, -9, -32, -19, 0, -18, 2, 22, -3, 15, -12, 5, -4, 2, -17, 5, }, { 44, 20, 34, 29, -10, 13, -4, 0, -4, 9, -5, 19, 10, 24, -11, -17, -8, -20, -5, -19, 9, -14, 12, -9, -6, 0, -30, -9, 0, -19, -2, -7, -2, -10, -5, 20, 21, 1, 17, 9, }, { 66, 8, 23, 11, -14, -5, -17, -16, -19, -2, -3, 5, 18, 17, -10, 0, 1, -23, 6, -20, 15, -18, 14, -22, -5, -10, -44, -23, -2, -26, 9, -3, 4, 14, 12, 29, 7, 16, 7, 18, }, { 18, 9, -17, -4, 11, 3, 0, 11, 7, 4, 10, 3, 10, -18, 24, -3, 14, 7, 4, 10, -16, 1, -27, -4, -27, 17, 12, 30, 0, 35, -9, -3, -12, -36, -35, -30, -2, -13, 2, -11, }, { 40, -2, -29, -22, 7, -14, -12, -5, -7, -7, 12, -9, 18, -26, 26, 14, 24, 4, 16, 9, -10, -2, -26, -18, -26, 7, -1, 15, -1, 27, 2, 0, -4, -11, -17, -21, -16, 1, -7, -3, }, { 8, 1, -3, -2, 3, 10, 3, 32, -1, 12, 2, 4, 15, 1, 7, -3, 2, -4, -6, -3, -26, -15, -29, -17, -40, 17, 0, 26, -2, 27, -2, -29, -4, -36, -10, -6, 9, 0, 27, 0, }, { 30, -11, -15, -20, 0, -8, -9, 15, -15, 0, 5, -9, 23, -6, 8, 13, 13, -7, 5, -3, -20, -19, -27, -31, -39, 7, -13, 11, -4, 19, 8, -25, 3, -11, 7, 2, -4, 16, 18, 9, }, { 26, 7, -11, 8, -5, 1, -17, 14, -1, 15, 24, 30, 32, 1, 33, -16, 18, -14, 0, -8, -6, -4, -12, -12, -6, 13, 2, 23, 8, 15, -4, 17, -5, -36, -18, -30, -8, -22, -10, -14, }, { 48, -4, -23, -9, -9, -17, -30, -2, -16, 3, 26, 16, 40, -6, 35, 1, 28, -17, 12, -9, 0, -8, -11, -25, -5, 3, -10, 8, 6, 7, 6, 22, 1, -11, -1, -21, -22, -7, -19, -5, }, { 15, 0, 2, 10, -13, 7, -14, 35, -10, 23, 16, 31, 37, 21, 16, -17, 6, -26, -10, -21, -16, -21, -13, -25, -19, 13, -8, 19, 5, 7, 1, -8, 2, -36, 5, -6, 3, -8, 15, -1, }, { 37, -12, -9, -7, -17, -11, -26, 18, -25, 12, 19, 17, 45, 14, 17, 0, 17, -30, 1, -22, -10, -25, -12, -38, -18, 3, -22, 4, 3, 0, 13, -3, 10, -11, 23, 2, -10, 7, 5, 7, }, { 5, 29, -9, 11, 15, 22, 3, 0, 18, 8, -1, 6, 7, -23, 6, -6, 5, 12, 15, 21, 5, 8, -17, 9, -28, 0, -11, 6, 2, 12, -11, 0, -14, -13, -49, -22, -8, -9, 4, -9, }, { 27, 16, -21, -6, 12, 3, -9, -16, 3, -2, 1, -7, 15, -31, 7, 10, 16, 9, 27, 21, 11, 5, -16, -3, -26, -9, -24, -7, 0, 4, 0, 4, -6, 11, -32, -14, -23, 6, -5, -1, }, { -4, 20, 3, 13, 8, 28, 6, 21, 10, 16, -8, 7, 12, -3, -11, -7, -5, 0, 4, 8, -4, -8, -18, -3, -41, 0, -22, 2, 0, 4, -5, -25, -6, -14, -25, 1, 2, 4, 29, 2, }, { 17, 8, -8, -4, 4, 10, -6, 5, -4, 5, -6, -6, 20, -10, -9, 9, 4, -2, 16, 7, 1, -12, -17, -16, -39, -9, -36, -12, -2, -3, 6, -21, 1, 11, -7, 10, -11, 20, 20, 11, }, { 13, 27, -3, 24, -1, 19, -14, 3, 9, 20, 12, 33, 29, -3, 15, -20, 9, -9, 11, 3, 16, 2, -2, 2, -7, -3, -20, 0, 10, -7, -7, 22, -7, -13, -33, -23, -14, -18, -7, -12, }, { 35, 15, -15, 6, -4, 1, -27, -12, -5, 8, 15, 19, 37, -11, 16, -2, 20, -12, 23, 2, 22, -1, -1, -11, -5, -13, -34, -14, 8, -14, 4, 26, 0, 11, -16, -14, -29, -2, -17, -3, }, { 3, 19, 9, 26, -8, 26, -10, 24, 0, 28, 5, 33, 34, 17, -2, -20, -1, -22, 0, -10, 6, -14, -3, -10, -20, -4, -32, -4, 7, -15, 0, -3, 0, -13, -9, 0, -3, -4, 17, 0, }, { 25, 7, -2, 8, -12, 7, -23, 8, -13, 16, 7, 20, 42, 9, 0, -3, 9, -25, 12, -10, 12, -18, -2, -24, -19, -13, -46, -19, 5, -22, 10, 0, 8, 11, 8, 9, -17, 11, 7, 8, }, { -25, -7, 2, -8, 12, -7, 23, -8, 13, -16, -7, -20, -42, -9, 0, 3, -9, 25, -12, 10, -12, 18, 2, 24, 19, 13, 46, 19, -5, 22, -10, 0, -8, -11, -8, -9, 17, -11, -7, -8, }, { -3, -19, -9, -26, 8, -26, 10, -24, 0, -28, -5, -33, -34, -17, 2, 20, 1, 22, 0, 10, -6, 14, 3, 10, 20, 4, 32, 4, -7, 15, 0, 3, 0, 13, 9, 0, 3, 4, -17, 0, }, { -35, -15, 15, -6, 4, -1, 27, 12, 5, -8, -15, -19, -37, 11, -16, 2, -20, 12, -23, -2, -22, 1, 1, 11, 5, 13, 34, 14, -8, 14, -4, -26, 0, -11, 16, 14, 29, 2, 17, 3, }, { -13, -27, 3, -24, 1, -19, 14, -3, -9, -20, -12, -33, -29, 3, -15, 20, -9, 9, -11, -3, -16, -2, 2, -2, 7, 3, 20, 0, -10, 7, 7, -22, 7, 13, 33, 23, 14, 18, 7, 12, }, { -17, -8, 8, 4, -4, -10, 6, -5, 4, -5, 6, 6, -20, 10, 9, -9, -4, 2, -16, -7, -1, 12, 17, 16, 39, 9, 36, 12, 2, 3, -6, 21, -1, -11, 7, -10, 11, -20, -20, -11, }, { 4, -20, -3, -13, -8, -28, -6, -21, -10, -16, 8, -7, -12, 3, 11, 7, 5, 0, -4, -8, 4, 8, 18, 3, 41, 0, 22, -2, 0, -4, 5, 25, 6, 14, 25, -1, -2, -4, -29, -2, }, { -27, -16, 21, 6, -12, -3, 9, 16, -3, 2, -1, 7, -15, 31, -7, -10, -16, -9, -27, -21, -11, -5, 16, 3, 26, 9, 24, 7, 0, -4, 0, -4, 6, -11, 32, 14, 23, -6, 5, 1, }, { -5, -29, 9, -11, -15, -22, -3, 0, -18, -8, 1, -6, -7, 23, -6, 6, -5, -12, -15, -21, -5, -8, 17, -9, 28, 0, 11, -6, -2, -12, 11, 0, 14, 13, 49, 22, 8, 9, -4, 9, }, { -37, 12, 9, 7, 17, 11, 26, -18, 25, -12, -19, -17, -45, -14, -17, 0, -17, 30, -1, 22, 10, 25, 12, 38, 18, -3, 22, -4, -3, 0, -13, 3, -10, 11, -23, -2, 10, -7, -5, -7, }, { -15, 0, -2, -10, 13, -7, 14, -35, 10, -23, -16, -31, -37, -21, -16, 17, -6, 26, 10, 21, 16, 21, 13, 25, 19, -13, 8, -19, -5, -7, -1, 8, -2, 36, -5, 6, -3, 8, -15, 1, }, { -48, 4, 23, 9, 9, 17, 30, 2, 16, -3, -26, -16, -40, 6, -35, -1, -28, 17, -12, 9, 0, 8, 11, 25, 5, -3, 10, -8, -6, -7, -6, -22, -1, 11, 1, 21, 22, 7, 19, 5, }, { -26, -7, 11, -8, 5, -1, 17, -14, 1, -15, -24, -30, -32, -1, -33, 16, -18, 14, 0, 8, 6, 4, 12, 12, 6, -13, -2, -23, -8, -15, 4, -17, 5, 36, 18, 30, 8, 22, 10, 14, }, { -30, 11, 15, 20, 0, 8, 9, -15, 15, 0, -5, 9, -23, 6, -8, -13, -13, 7, -5, 3, 20, 19, 27, 31, 39, -7, 13, -11, 4, -19, -8, 25, -3, 11, -7, -2, 4, -16, -18, -9, }, { -8, -1, 3, 2, -3, -10, -3, -32, 1, -12, -2, -4, -15, -1, -7, 3, -2, 4, 6, 3, 26, 15, 29, 17, 40, -17, 0, -26, 2, -27, 2, 29, 4, 36, 10, 6, -9, 0, -27, 0, }, { -40, 2, 29, 22, -7, 14, 12, 5, 7, 7, -12, 9, -18, 26, -26, -14, -24, -4, -16, -9, 10, 2, 26, 18, 26, -7, 1, -15, 1, -27, -2, 0, 4, 11, 17, 21, 16, -1, 7, 3, }, { -18, -9, 17, 4, -11, -3, 0, -11, -7, -4, -10, -3, -10, 18, -24, 3, -14, -7, -4, -10, 16, -1, 27, 4, 27, -17, -12, -30, 0, -35, 9, 3, 12, 36, 35, 30, 2, 13, -2, 11, }, { -66, -8, -23, -11, 14, 5, 17, 16, 19, 2, 3, -5, -18, -17, 10, 0, -1, 23, -6, 20, -15, 18, -14, 22, 5, 10, 44, 23, 2, 26, -9, 3, -4, -14, -12, -29, -7, -16, -7, -18, }, { -44, -20, -34, -29, 10, -13, 4, 0, 4, -9, 5, -19, -10, -24, 11, 17, 8, 20, 5, 19, -9, 14, -12, 9, 6, 0, 30, 9, 0, 19, 2, 7, 2, 10, 5, -20, -21, -1, -17, -9, }, { -77, -16, -9, -9, 6, 11, 21, 37, 10, 10, -4, -5, -13, 3, -7, 0, -13, 11, -17, 6, -25, 1, -15, 9, -7, 9, 32, 19, 0, 18, -2, -22, 3, -15, 12, -5, 4, -2, 17, -5, }, { -55, -28, -21, -27, 2, -7, 8, 20, -4, -1, -1, -18, -5, -4, -5, 16, -2, 8, -5, 5, -19, -2, -14, -3, -6, 0, 18, 4, -2, 11, 8, -18, 11, 10, 29, 3, -10, 13, 8, 3, }, { -58, -9, -16, 1, -2, 2, 0, 19, 10, 13, 17, 21, 3, 3, 19, -12, 2, 0, -10, 1, -5, 12, 0, 15, 26, 5, 34, 16, 11, 7, -4, 25, 2, -14, 3, -29, -13, -25, -20, -20, }, { -36, -21, -28, -16, -6, -16, -12, 2, -4, 2, 20, 7, 11, -4, 20, 4, 12, -2, 1, 0, 0, 8, 2, 2, 27, -4, 21, 2, 9, 0, 6, 29, 9, 10, 21, -21, -28, -10, -29, -11, }, { -69, -17, -3, 3, -10, 8, 3, 40, 1, 21, 10, 21, 8, 23, 1, -13, -8, -11, -21, -11, -15, -4, 0, 2, 13, 5, 23, 12, 7, 0, 1, 0, 10, -14, 28, -5, -1, -11, 5, -7, }, { -47, -30, -15, -14, -14, -9, -9, 23, -13, 10, 12, 7, 16, 15, 3, 3, 1, -14, -9, -12, -9, -8, 0, -10, 14, -4, 9, -2, 5, -8, 13, 3, 18, 10, 45, 3, -16, 4, -4, 0, }, { -79, 11, -15, 4, 18, 23, 20, 5, 30, 7, -7, -2, -21, -21, -8, -3, -10, 28, 4, 31, 6, 25, -3, 37, 4, -7, 20, 0, 4, 4, -11, 7, -6, 8, -27, -22, -14, -12, -5, -16, }, { -57, 0, -27, -13, 14, 5, 7, -11, 15, -4, -5, -16, -13, -29, -6, 13, 0, 25, 16, 31, 12, 22, -2, 23, 6, -16, 7, -14, 2, -3, 0, 12, 0, 33, -9, -13, -28, 3, -14, -7, }, { -90, 3, -2, 6, 10, 29, 24, 26, 21, 15, -15, -2, -16, -1, -25, -4, -21, 16, -6, 18, -3, 8, -5, 24, -8, -7, 9, -4, 1, -3, -5, -18, 1, 7, -2, 1, -2, 1, 19, -3, }, { -67, -8, -14, -11, 7, 11, 11, 9, 7, 3, -13, -16, -8, -9, -24, 12, -10, 13, 5, 17, 2, 4, -3, 10, -6, -17, -4, -19, 0, -11, 6, -13, 9, 33, 14, 10, -16, 17, 10, 4, }, { -71, 10, -9, 17, 1, 20, 3, 8, 21, 18, 6, 24, 0, -1, 0, -16, -6, 5, 0, 13, 17, 19, 11, 29, 25, -11, 11, -6, 13, -15, -7, 29, 0, 8, -11, -22, -20, -21, -17, -18, }, { -49, -2, -21, 0, -1, 2, -9, -8, 6, 6, 8, 10, 8, -9, 2, 0, 4, 2, 13, 12, 23, 15, 12, 16, 26, -21, -2, -21, 11, -23, 4, 33, 7, 33, 6, -13, -34, -5, -27, -10, }, { -82, 1, 4, 19, -6, 27, 6, 29, 12, 26, -1, 24, 5, 18, -17, -17, -17, -6, -10, 0, 7, 2, 9, 16, 12, -11, 0, -11, 9, -23, 0, 3, 8, 8, 13, 1, -8, -7, 7, -6, }, { -60, -10, -7, 1, -9, 8, -6, 13, -2, 15, 1, 10, 13, 11, -15, 0, -6, -9, 1, 0, 13, -1, 11, 3, 13, -21, -13, -26, 7, -31, 10, 7, 16, 33, 31, 10, -22, 8, -1, 2, }, { -3, 7, -5, -5, 8, -2, 17, -9, 18, -24, -2, -19, -10, -4, 28, 17, 5, 28, -2, 7, -4, 15, 7, 8, 6, 23, 13, 21, -14, 20, -17, -18, -11, -33, -30, -11, 23, -13, -5, -9, }, { 18, -4, -17, -23, 4, -20, 4, -26, 3, -36, 0, -32, -2, -12, 29, 34, 16, 24, 10, 6, 0, 12, 8, -4, 8, 13, 0, 6, -16, 12, -5, -13, -3, -7, -13, -3, 8, 2, -14, 0, }, { -13, 0, 8, -3, 0, 4, 21, 11, 9, -16, -10, -18, -5, 16, 10, 16, -5, 15, -13, -5, -15, -1, 6, -4, -6, 23, 2, 16, -17, 12, -10, -44, -3, -33, -6, 12, 34, 1, 20, 3, }, { 8, -12, -3, -21, -3, -14, 8, -5, -4, -28, -7, -32, 2, 8, 12, 34, 4, 12, -1, -6, -9, -4, 7, -17, -4, 13, -11, 1, -19, 4, 0, -39, 4, -7, 11, 21, 20, 16, 10, 11, }, { 4, 6, 0, 7, -8, -4, 0, -6, 9, -13, 11, 7, 11, 15, 37, 4, 9, 5, -5, -11, 5, 9, 22, 1, 27, 18, 4, 14, -5, 0, -12, 3, -4, -32, -14, -12, 17, -22, -17, -11, }, { 26, -6, -11, -10, -12, -23, -12, -23, -5, -24, 13, -5, 19, 8, 38, 21, 20, 2, 6, -12, 11, 5, 23, -11, 29, 9, -9, 0, -7, -6, -1, 7, 2, -7, 3, -3, 2, -6, -27, -3, }, { -6, -2, 14, 9, -16, 1, 3, 14, 0, -5, 3, 8, 16, 36, 19, 3, -1, -6, -17, -24, -4, -7, 21, -11, 14, 18, -7, 9, -9, -7, -6, -22, 3, -33, 10, 11, 28, -7, 7, 0, }, { 16, -14, 2, -8, -20, -17, -9, -2, -14, -16, 6, -5, 24, 28, 21, 20, 8, -9, -4, -25, 1, -11, 22, -24, 15, 8, -21, -5, -11, -14, 5, -18, 11, -7, 27, 20, 14, 7, -1, 9, }, { -16, 27, 2, 10, 13, 16, 20, -20, 29, -20, -14, -16, -13, -8, 9, 13, -2, 33, 9, 19, 17, 23, 17, 22, 6, 6, -9, -2, -12, -2, -20, -13, -13, -10, -45, -4, 16, -8, -2, -7, }, { 5, 15, -9, -7, 9, -2, 8, -37, 14, -31, -11, -29, -5, -16, 11, 30, 7, 29, 21, 18, 23, 19, 18, 9, 7, -3, -23, -17, -14, -9, -8, -9, -6, 15, -27, 4, 2, 6, -12, 1, }, { -26, 19, 15, 12, 5, 22, 24, 0, 21, -12, -21, -15, -8, 11, -7, 12, -14, 20, -2, 6, 7, 6, 16, 9, -6, 5, -21, -7, -15, -10, -13, -39, -5, -10, -20, 19, 28, 5, 22, 5, }, { -4, 6, 3, -5, 1, 4, 11, -16, 6, -23, -19, -29, 0, 3, -6, 30, -3, 17, 10, 5, 13, 2, 17, -3, -5, -3, -35, -21, -17, -17, -2, -35, 2, 15, -3, 28, 13, 21, 13, 13, }, { -8, 25, 8, 23, -3, 13, 3, -17, 20, -8, 0, 10, 8, 11, 18, 0, 1, 10, 5, 0, 28, 17, 32, 15, 26, 1, -19, -9, -3, -21, -15, 7, -6, -9, -29, -5, 10, -17, -15, -9, }, { 13, 13, -3, 5, -7, -4, -9, -34, 5, -20, 2, -3, 16, 3, 20, 17, 11, 7, 17, 0, 34, 13, 33, 2, 28, -8, -32, -24, -5, -29, -3, 12, 0, 15, -11, 3, -3, -2, -24, -1, }, { -18, 17, 21, 25, -11, 19, 6, 3, 11, 0, -7, 11, 13, 31, 1, 0, -9, -1, -5, -12, 18, 0, 31, 2, 13, 1, -30, -14, -7, -29, -9, -18, 1, -10, -4, 18, 22, -3, 10, 2, }, { 3, 5, 9, 7, -15, 1, -5, -13, -2, -12, -5, -2, 21, 23, 2, 16, 0, -5, 6, -13, 23, -3, 32, -10, 15, -8, -44, -28, -9, -37, 2, -13, 9, 15, 12, 27, 7, 12, 0, 11, }, { -44, 6, -30, -8, 9, 10, 11, 14, 23, -5, 8, -4, 14, -12, 37, 14, 12, 26, 4, 16, -8, 16, -9, 7, -6, 19, 12, 25, -5, 24, -15, -13, -8, -36, -34, -31, -1, -18, -4, -18, }, { -22, -5, -42, -26, 6, -8, -1, -2, 9, -17, 10, -18, 21, -19, 39, 31, 23, 23, 16, 15, -2, 12, -7, -6, -5, 9, -1, 10, -7, 16, -4, -9, 0, -10, -17, -22, -16, -2, -14, -9, }, { -55, -1, -17, -6, 1, 16, 15, 35, 15, 2, 0, -4, 19, 8, 20, 13, 1, 14, -7, 3, -18, 0, -10, -5, -19, 19, 0, 21, -8, 16, -9, -39, 0, -36, -10, -7, 9, -4, 20, -5, }, { -33, -13, -29, -24, -1, -1, 2, 18, 0, -9, 3, -17, 27, 0, 21, 30, 12, 11, 5, 2, -12, -4, -9, -19, -18, 9, -13, 6, -11, 8, 2, -35, 8, -10, 7, 1, -4, 11, 10, 2, }, { -36, 5, -24, 4, -7, 7, -6, 17, 14, 5, 22, 22, 35, 8, 46, 1, 17, 3, 0, -2, 2, 10, 5, 0, 14, 15, 2, 18, 2, 4, -11, 7, -1, -36, -18, -32, -7, -27, -17, -20, }, { -14, -7, -36, -13, -10, -10, -18, 0, 0, -5, 25, 8, 43, 0, 48, 18, 27, 0, 12, -3, 7, 6, 7, -13, 15, 5, -11, 3, 0, -2, 0, 12, 6, -10, 0, -23, -22, -11, -26, -12, }, { -47, -3, -11, 6, -15, 13, -2, 38, 6, 13, 15, 22, 40, 28, 28, 0, 5, -8, -10, -15, -7, -7, 4, -13, 1, 14, -9, 14, 0, -2, -4, -18, 7, -36, 6, -8, 3, -13, 7, -8, }, { -25, -15, -22, -11, -18, -4, -15, 22, -8, 2, 17, 9, 48, 20, 30, 17, 16, -11, 1, -16, -2, -10, 5, -26, 2, 4, -22, 0, -2, -10, 6, -13, 14, -10, 23, 0, -10, 2, -1, 0, }, { -57, 26, -22, 7, 14, 28, 14, 3, 35, 0, -3, -1, 11, -16, 18, 10, 4, 31, 15, 28, 14, 23, 1, 21, -7, 2, -11, 1, -3, 1, -18, -9, -10, -13, -49, -24, -8, -14, -2, -16, }, { -35, 14, -34, -10, 10, 10, 1, -12, 20, -12, 0, -15, 18, -24, 20, 27, 14, 28, 27, 27, 20, 19, 2, 8, -5, -7, -25, -13, -5, -5, -6, -5, -2, 12, -31, -15, -23, 1, -12, -8, }, { -68, 18, -9, 9, 6, 35, 18, 25, 26, 7, -10, -1, 16, 3, 1, 9, -6, 19, 4, 15, 4, 6, 0, 8, -20, 2, -23, -2, -7, -5, -12, -35, -1, -13, -24, 0, 3, 0, 22, -4, }, { -46, 6, -21, -8, 2, 16, 5, 8, 11, -4, -8, -15, 24, -4, 2, 26, 3, 16, 16, 14, 9, 2, 1, -4, -19, -7, -36, -17, -9, -13, 0, -31, 5, 12, -7, 8, -11, 15, 13, 4, }, { -49, 24, -16, 20, -2, 26, -2, 7, 25, 10, 11, 25, 32, 3, 27, -2, 8, 8, 11, 9, 24, 17, 16, 14, 13, -2, -20, -5, 4, -17, -14, 12, -3, -13, -33, -25, -14, -23, -15, -19, }, { -27, 12, -28, 2, -6, 7, -15, -9, 11, -1, 13, 11, 40, -4, 29, 14, 19, 5, 23, 8, 30, 13, 17, 0, 14, -12, -34, -20, 2, -25, -2, 16, 4, 12, -15, -16, -29, -7, -24, -10, }, { -60, 16, -3, 22, -10, 32, 0, 28, 17, 18, 3, 25, 37, 23, 10, -3, -2, -3, 0, -3, 14, 0, 14, 1, 0, -2, -32, -9, 1, -25, -7, -13, 5, -13, -8, -1, -2, -8, 10, -6, }, { -38, 4, -15, 4, -14, 13, -12, 11, 2, 6, 6, 11, 45, 16, 11, 13, 7, -6, 12, -4, 20, -3, 16, -12, 1, -12, -46, -24, 0, -33, 3, -9, 12, 12, 8, 7, -17, 6, 0, 2 } }; static const int8_t cb2_vects[128][40]={ { 73, -32, -60, -15, -26, 59, 2, -33, 30, -10, -3, -17, 8, 30, -1, -26, -4, -22, 10, 16, -36, -5, -11, 56, 37, 6, -10, -5, -13, -3, 6, -5, 11, 4, -19, -5, -16, 41, 24, 13, }, { 4, -11, -37, 23, -5, 46, -2, -29, -5, -39, -21, -9, 0, 49, 12, -9, -16, -26, 22, 15, -45, -20, -5, 40, 22, 17, -26, 31, -14, 2, -14, 10, 30, 20, -27, -9, -39, 39, 18, 5, }, { 34, -25, -48, -28, -11, 34, -2, -41, 9, -7, -17, 21, 20, 24, -17, -33, 0, -24, 10, 42, 3, -5, 10, 42, 11, 8, -3, 3, 16, 9, 22, -2, 0, -33, -10, 18, 7, 58, 10, 28, }, { -34, -4, -25, 10, 9, 21, -7, -36, -26, -36, -35, 28, 12, 42, -3, -16, -12, -28, 21, 42, -5, -21, 16, 26, -4, 19, -19, 39, 15, 15, 1, 13, 19, -17, -17, 14, -15, 55, 4, 19, }, { 28, -20, -51, -14, -6, 7, 0, -26, 27, -4, 18, -40, -6, 16, -1, -15, 0, -55, -5, -16, -19, 14, -3, 49, 14, 1, -22, -30, -12, 0, 24, 15, 9, -17, -45, -29, 4, 28, 51, 35, }, { -40, 0, -28, 24, 14, -5, -4, -21, -7, -33, 0, -32, -15, 35, 12, 1, -11, -58, 5, -16, -28, 0, 1, 33, 0, 11, -39, 5, -14, 6, 3, 31, 28, -1, -53, -33, -19, 25, 46, 26, }, { -11, -14, -39, -27, 9, -17, -4, -33, 6, 0, 4, -1, 5, 10, -17, -22, 5, -57, -5, 9, 20, 13, 18, 35, -11, 3, -16, -22, 17, 13, 40, 19, -1, -55, -35, -5, 27, 44, 37, 49, }, { -80, 6, -16, 11, 30, -30, -9, -28, -28, -29, -13, 6, -2, 28, -3, -5, -7, -60, 5, 9, 11, -1, 24, 19, -27, 13, -32, 13, 15, 19, 19, 35, 17, -39, -43, -9, 4, 42, 32, 41, }, { 78, -21, -43, 4, -38, 17, 17, -5, 55, 24, -15, -36, 14, 4, 24, -24, 12, 5, 17, 31, -54, -5, -2, 27, 43, -12, 2, 9, -9, -15, 22, -3, 28, 21, -20, 3, 20, 28, 9, -5, }, { 9, -1, -20, 43, -17, 3, 12, 0, 20, -4, -33, -29, 6, 22, 38, -7, 0, 1, 29, 30, -63, -21, 3, 11, 27, -1, -14, 45, -10, -9, 1, 12, 47, 37, -28, 0, -2, 26, 4, -13, }, { 39, -14, -30, -8, -22, -8, 12, -12, 34, 27, -29, 2, 26, -2, 8, -31, 16, 3, 17, 57, -14, -6, 19, 13, 16, -10, 8, 17, 20, -2, 38, 0, 17, -16, -11, 27, 44, 45, -4, 8, }, { -29, 5, -7, 30, -1, -21, 7, -7, 0, 0, -47, 9, 18, 15, 22, -14, 4, 0, 28, 57, -23, -21, 25, -2, 1, 0, -7, 53, 19, 3, 17, 15, 36, 0, -19, 24, 21, 43, -9, 0, }, { 33, -10, -34, 5, -17, -35, 15, 1, 53, 30, 6, -59, 0, -10, 24, -13, 17, -27, 1, -1, -37, 13, 4, 20, 20, -18, -10, -16, -8, -11, 39, 18, 26, 0, -46, -20, 41, 15, 37, 15, }, { -35, 10, -11, 44, 3, -48, 10, 6, 17, 2, -11, -51, -8, 8, 38, 3, 4, -31, 12, -2, -46, -1, 10, 4, 5, -7, -26, 19, -10, -5, 18, 34, 45, 15, -54, -24, 18, 13, 31, 7, }, { -5, -3, -21, -7, -2, -60, 10, -5, 32, 34, -7, -20, 11, -16, 8, -20, 21, -29, 1, 24, 2, 13, 27, 6, -5, -15, -3, -8, 21, 1, 55, 21, 15, -38, -37, 3, 65, 32, 23, 30, }, { -74, 17, 0, 31, 18, -73, 5, 0, -3, 5, -25, -12, 3, 1, 22, -3, 9, -33, 12, 24, -6, -2, 33, -9, -21, -5, -20, 27, 19, 7, 34, 37, 34, -22, -44, 0, 41, 29, 17, 21, }, { 76, -35, -31, -28, -49, 43, -40, 0, 29, -14, 8, 5, 10, 18, -26, -46, 0, 7, 6, 3, -25, -7, -2, 40, 28, 14, 18, -3, -27, -28, -8, -45, -13, 34, -13, -27, -15, 31, 12, 3, }, { 7, -15, -9, 9, -28, 29, -45, 5, -6, -43, -9, 12, 2, 36, -12, -30, -11, 3, 17, 3, -34, -22, 3, 24, 12, 24, 2, 32, -28, -22, -29, -29, 5, 50, -21, -31, -38, 29, 7, -5, }, { 36, -29, -19, -41, -34, 18, -45, -6, 8, -10, -5, 43, 23, 11, -42, -53, 5, 5, 6, 30, 14, -8, 20, 26, 1, 16, 25, 4, 3, -15, 7, -41, -23, -3, -4, -3, 8, 48, -1, 17, }, { -32, -8, 3, -2, -13, 4, -50, -1, -27, -39, -23, 51, 15, 30, -27, -37, -7, 1, 17, 29, 5, -23, 25, 10, -14, 26, 8, 41, 1, -9, -13, -26, -5, 12, -12, -7, -14, 45, -6, 9, }, { 31, -24, -23, -27, -29, -9, -43, 8, 26, -7, 30, -17, -4, 3, -26, -35, 5, -24, -10, -28, -9, 12, 5, 33, 5, 8, 5, -29, -26, -24, 9, -23, -14, 12, -39, -52, 5, 18, 39, 24, }, { -37, -3, 0, 10, -7, -22, -48, 12, -8, -36, 12, -9, -12, 22, -12, -19, -6, -28, 0, -29, -18, -3, 11, 17, -10, 18, -10, 7, -27, -18, -11, -7, 3, 28, -47, -55, -18, 15, 34, 16, }, { -8, -17, -10, -40, -13, -34, -47, 0, 5, -4, 16, 21, 8, -2, -42, -43, 10, -26, -10, -2, 31, 11, 27, 19, -21, 10, 12, -20, 3, -11, 25, -20, -25, -25, -29, -28, 28, 34, 25, 38, }, { -77, 2, 11, -1, 7, -47, -52, 5, -29, -33, -1, 28, 0, 15, -28, -26, -2, -30, 0, -2, 22, -4, 33, 3, -36, 21, -3, 15, 2, -5, 4, -4, -6, -9, -37, -31, 5, 32, 20, 30, }, { 81, -25, -14, -8, -61, 0, -25, 28, 54, 20, -3, -14, 17, -8, 0, -44, 16, 35, 13, 18, -43, -7, 6, 11, 33, -4, 30, 11, -22, -40, 6, -43, 3, 50, -14, -18, 22, 18, -1, -16, }, { 12, -4, 8, 29, -39, -12, -30, 33, 19, -8, -21, -6, 8, 9, 13, -28, 4, 31, 24, 18, -52, -23, 12, -4, 18, 5, 14, 47, -24, -34, -14, -27, 22, 66, -22, -22, -1, 16, -6, -24, }, { 41, -18, -2, -21, -45, -24, -30, 21, 33, 24, -17, 24, 29, -15, -16, -51, 21, 33, 13, 45, -3, -8, 28, -2, 7, -2, 37, 19, 7, -27, 22, -39, -7, 12, -5, 5, 45, 35, -15, -1, }, { -27, 1, 20, 17, -24, -38, -35, 26, -1, -4, -35, 32, 21, 3, -2, -35, 8, 29, 24, 44, -12, -24, 34, -18, -8, 7, 21, 55, 5, -21, 2, -23, 11, 28, -13, 1, 22, 33, -21, -10, }, { 36, -13, -5, -7, -40, -51, -28, 36, 52, 27, 18, -36, 2, -22, 0, -33, 21, 2, -3, -13, -26, 11, 14, 4, 10, -10, 18, -14, -22, -36, 24, -21, 1, 28, -40, -42, 42, 5, 25, 5, }, { -32, 6, 17, 31, -19, -65, -33, 41, 16, -1, 0, -29, -6, -4, 13, -17, 9, -1, 8, -14, -35, -3, 19, -11, -4, 0, 1, 21, -23, -30, 3, -5, 20, 44, -48, -46, 19, 3, 20, -3, }, { -3, -7, 6, -20, -25, -77, -32, 29, 31, 30, 4, 2, 14, -29, -16, -40, 26, 0, -3, 12, 13, 10, 36, -9, -15, -8, 24, -6, 7, -22, 40, -17, -8, -9, -31, -18, 66, 22, 11, 19, }, { -72, 13, 29, 18, -4, -90, -37, 34, -4, 1, -13, 9, 6, -11, -2, -24, 13, -3, 7, 11, 4, -4, 42, -25, -31, 1, 8, 29, 6, -17, 19, -2, 10, 6, -38, -22, 42, 19, 6, 11, }, { 116, -20, -68, -30, -28, 83, 28, -18, 32, -22, -13, -21, 5, 28, 5, -7, -24, -8, -22, 17, -23, 30, -25, 45, 15, -9, -11, -18, 22, -10, 4, -2, 19, -12, 23, 3, -43, 2, 12, -4, }, { 47, 0, -45, 7, -7, 69, 23, -13, -2, -51, -32, -14, -3, 47, 19, 8, -37, -11, -10, 16, -32, 15, -19, 29, 0, 1, -28, 18, 20, -4, -16, 13, 38, 3, 15, 0, -66, 0, 7, -13, }, { 77, -13, -56, -43, -13, 57, 23, -26, 11, -19, -27, 16, 17, 22, -10, -15, -19, -10, -22, 43, 16, 30, -2, 31, -11, -6, -5, -9, 52, 2, 20, 0, 8, -50, 33, 27, -19, 19, -1, 9, }, { 8, 6, -33, -4, 7, 44, 18, -21, -23, -48, -46, 24, 9, 40, 3, 1, -32, -13, -11, 43, 7, 14, 3, 15, -26, 3, -21, 26, 50, 8, 0, 16, 27, -34, 25, 23, -43, 17, -6, 1, }, { 71, -9, -59, -29, -8, 30, 26, -11, 30, -16, 8, -44, -9, 14, 5, 2, -19, -40, -38, -15, -7, 50, -17, 38, -7, -14, -24, -43, 22, -6, 22, 19, 17, -34, -2, -20, -23, -10, 39, 16, }, { 2, 11, -36, 9, 13, 17, 21, -6, -5, -45, -10, -36, -18, 33, 19, 19, -31, -44, -27, -15, -16, 34, -11, 22, -22, -4, -40, -7, 21, 0, 1, 35, 36, -18, -10, -24, -46, -12, 34, 8, }, { 32, -2, -47, -42, 7, 5, 21, -18, 9, -12, -5, -5, 2, 8, -10, -4, -14, -42, -38, 10, 33, 49, 5, 24, -33, -12, -17, -35, 52, 6, 38, 22, 7, -72, 7, 3, 0, 6, 25, 30, }, { -36, 18, -24, -3, 28, -7, 16, -13, -26, -41, -24, 1, -5, 26, 3, 12, -27, -46, -27, 10, 24, 34, 10, 8, -49, -2, -34, 0, 51, 12, 17, 38, 25, -56, 0, 0, -22, 3, 20, 22, }, { 121, -9, -50, -10, -40, 40, 43, 9, 58, 12, -25, -41, 11, 2, 31, -5, -8, 19, -15, 32, -41, 30, -16, 16, 20, -28, 0, -3, 26, -22, 19, 0, 36, 4, 22, 12, -6, -9, -1, -24, }, { 52, 10, -27, 27, -18, 26, 38, 14, 23, -16, -44, -33, 3, 20, 45, 10, -20, 15, -3, 31, -50, 14, -10, 0, 5, -17, -15, 32, 24, -16, -1, 15, 55, 20, 14, 8, -29, -12, -7, -32, }, { 82, -3, -38, -23, -24, 15, 38, 2, 37, 15, -39, -2, 23, -4, 15, -12, -3, 17, -15, 58, -1, 29, 6, 2, -5, -26, 7, 4, 56, -9, 35, 3, 25, -33, 32, 36, 17, 7, -15, -9, }, { 13, 17, -15, 15, -3, 1, 33, 7, 1, -12, -58, 5, 15, 13, 29, 3, -16, 13, -4, 57, -10, 13, 11, -13, -21, -15, -9, 40, 55, -3, 14, 19, 44, -17, 24, 32, -5, 4, -21, -18, }, { 76, 1, -41, -9, -19, -12, 41, 17, 55, 18, -3, -63, -3, -12, 30, 5, -3, -12, -31, 0, -24, 49, -8, 9, -1, -33, -12, -29, 27, -18, 37, 21, 34, -17, -3, -11, 14, -23, 25, -2, }, { 7, 22, -18, 29, 1, -25, 36, 21, 20, -9, -22, -56, -11, 6, 45, 21, -15, -16, -20, -1, -33, 34, -2, -6, -17, -23, -28, 6, 25, -12, 16, 37, 53, -1, -11, -15, -8, -25, 20, -11, }, { 37, 8, -29, -22, -4, -37, 36, 9, 34, 22, -17, -24, 8, -18, 15, -2, 1, -14, -31, 25, 15, 48, 13, -4, -28, -31, -5, -21, 57, -4, 53, 24, 23, -55, 6, 12, 37, -6, 11, 11, }, { -31, 28, -6, 16, 16, -50, 31, 14, 0, -6, -36, -17, 0, 0, 29, 14, -11, -18, -20, 25, 6, 33, 19, -20, -43, -21, -21, 14, 55, 0, 32, 40, 42, -39, -1, 8, 14, -8, 6, 3, }, { 119, -24, -39, -44, -51, 66, -14, 15, 31, -26, -1, 0, 7, 16, -19, -28, -19, 22, -26, 4, -13, 28, -16, 29, 5, -1, 16, -16, 8, -35, -10, -42, -4, 17, 29, -19, -42, -7, 0, -15, }, { 50, -3, -16, -5, -30, 53, -19, 20, -3, -55, -19, 8, 0, 34, -5, -11, -32, 18, -15, 4, -22, 13, -10, 13, -9, 8, 0, 19, 7, -29, -31, -26, 13, 33, 21, -22, -65, -9, -4, -23, }, { 79, -17, -27, -56, -36, 41, -19, 8, 10, -22, -15, 39, 20, 9, -35, -35, -15, 20, -26, 31, 26, 27, 6, 15, -20, 0, 23, -8, 38, -22, 5, -38, -15, -20, 39, 4, -18, 9, -13, -1, }, { 10, 3, -4, -18, -15, 27, -24, 13, -24, -51, -34, 47, 12, 28, -21, -19, -27, 16, -15, 30, 17, 12, 12, 0, -36, 10, 7, 27, 37, -16, -15, -22, 3, -4, 31, 1, -42, 7, -18, -9, }, { 74, -12, -30, -42, -30, 14, -16, 23, 29, -19, 20, -21, -7, 1, -19, -17, -14, -10, -43, -27, 3, 48, -8, 22, -16, -7, 4, -42, 9, -31, 6, -20, -6, -4, 3, -43, -22, -20, 28, 5, }, { 5, 7, -7, -4, -9, 0, -21, 28, -6, -48, 2, -14, -15, 20, -5, 0, -27, -14, -32, -28, -5, 32, -2, 6, -32, 3, -12, -5, 8, -25, -14, -4, 12, 11, -4, -47, -45, -22, 22, -2, }, { 34, -6, -18, -55, -15, -11, -21, 16, 8, -16, 6, 16, 5, -4, -35, -24, -10, -12, -43, -1, 43, 47, 14, 8, -43, -5, 10, -34, 39, -18, 22, -16, -17, -42, 13, -19, 1, -3, 14, 20, }, { -34, 14, 4, -17, 5, -24, -26, 20, -27, -45, -12, 24, -2, 13, -21, -8, -22, -16, -32, -2, 34, 31, 20, -7, -58, 5, -5, 2, 38, -12, 2, -1, 1, -26, 5, -23, -21, -6, 8, 11, }, { 124, -13, -21, -23, -62, 23, 0, 43, 57, 8, -13, -18, 14, -10, 6, -26, -3, 49, -19, 19, -31, 27, -7, 0, 11, -20, 29, -1, 12, -47, 4, -39, 11, 34, 28, -9, -5, -19, -13, -34, }, { 55, 6, 1, 14, -41, 10, -4, 48, 22, -20, -31, -10, 5, 7, 20, -9, -16, 45, -8, 19, -40, 12, -1, -15, -4, -10, 12, 34, 11, -41, -16, -24, 30, 49, 20, -13, -28, -22, -18, -43, }, { 84, -6, -9, -36, -47, -1, -4, 36, 36, 12, -27, 20, 26, -17, -9, -33, 1, 47, -19, 46, 9, 27, 15, -13, -15, -18, 35, 6, 42, -33, 20, -36, 1, -4, 38, 14, 18, -2, -27, -20, }, { 15, 13, 13, 1, -26, -14, -9, 41, 1, -16, -46, 27, 18, 1, 4, -16, -11, 43, -8, 45, 0, 11, 21, -29, -30, -8, 19, 42, 41, -28, 0, -20, 20, 11, 30, 10, -4, -5, -32, -28, }, { 79, -2, -12, -22, -42, -28, -1, 51, 54, 15, 8, -41, 0, -24, 6, -15, 1, 17, -36, -12, -14, 47, 0, -6, -11, -26, 16, -27, 13, -43, 22, -18, 10, 12, 2, -34, 15, -33, 13, -13, }, { 10, 18, 10, 15, -21, -41, -6, 56, 19, -13, -9, -33, -9, -6, 20, 1, -11, 13, -24, -13, -23, 32, 6, -22, -26, -15, 0, 8, 12, -37, 1, -2, 28, 27, -5, -37, -7, -35, 8, -21, }, { 39, 4, 0, -35, -27, -53, -6, 44, 33, 18, -5, -2, 11, -31, -9, -22, 6, 15, -36, 13, 25, 46, 23, -20, -37, -24, 23, -19, 43, -29, 38, -14, 0, -26, 12, -10, 38, -16, 0, 0, }, { -29, 25, 22, 2, -6, -67, -11, 49, -1, -10, -24, 5, 3, -13, 4, -5, -6, 11, -25, 12, 16, 31, 28, -36, -53, -13, 6, 16, 42, -24, 17, 1, 18, -10, 4, -13, 15, -18, -5, -7, }, { 29, -25, -22, -2, 6, 67, 11, -49, 1, 10, 24, -5, -3, 13, -4, 5, 6, -11, 25, -12, -16, -31, -28, 36, 53, 13, -6, -16, -42, 24, -17, -1, -18, 10, -4, 13, -15, 18, 5, 7, }, { -39, -4, 0, 35, 27, 53, 6, -44, -33, -18, 5, 2, -11, 31, 9, 22, -6, -15, 36, -13, -25, -46, -23, 20, 37, 24, -23, 19, -43, 29, -38, 14, 0, 26, -12, 10, -38, 16, 0, 0, }, { -10, -18, -10, -15, 21, 41, 6, -56, -19, 13, 9, 33, 9, 6, -20, -1, 11, -13, 24, 13, 23, -32, -6, 22, 26, 15, 0, -8, -12, 37, -1, 2, -28, -27, 5, 37, 7, 35, -8, 21, }, { -79, 2, 12, 22, 42, 28, 1, -51, -54, -15, -8, 41, 0, 24, -6, 15, -1, -17, 36, 12, 14, -47, 0, 6, 11, 26, -16, 27, -13, 43, -22, 18, -10, -12, -2, 34, -15, 33, -13, 13, }, { -15, -13, -13, -1, 26, 14, 9, -41, -1, 16, 46, -27, -18, -1, -4, 16, 11, -43, 8, -45, 0, -11, -21, 29, 30, 8, -19, -42, -41, 28, 0, 20, -20, -11, -30, -10, 4, 5, 32, 28, }, { -84, 6, 9, 36, 47, 1, 4, -36, -36, -12, 27, -20, -26, 17, 9, 33, -1, -47, 19, -46, -9, -27, -15, 13, 15, 18, -35, -6, -42, 33, -20, 36, -1, 4, -38, -14, -18, 2, 27, 20, }, { -55, -6, -1, -14, 41, -10, 4, -48, -22, 20, 31, 10, -5, -7, -20, 9, 16, -45, 8, -19, 40, -12, 1, 15, 4, 10, -12, -34, -11, 41, 16, 24, -30, -49, -20, 13, 28, 22, 18, 43, }, { -124, 13, 21, 23, 62, -23, 0, -43, -57, -8, 13, 18, -14, 10, -6, 26, 3, -49, 19, -19, 31, -27, 7, 0, -11, 20, -29, 1, -12, 47, -4, 39, -11, -34, -28, 9, 5, 19, 13, 34, }, { 34, -14, -4, 17, -5, 24, 26, -20, 27, 45, 12, -24, 2, -13, 21, 8, 22, 16, 32, 2, -34, -31, -20, 7, 58, -5, 5, -2, -38, 12, -2, 1, -1, 26, -5, 23, 21, 6, -8, -11, }, { -34, 6, 18, 55, 15, 11, 21, -16, -8, 16, -6, -16, -5, 4, 35, 24, 10, 12, 43, 1, -43, -47, -14, -8, 43, 5, -10, 34, -39, 18, -22, 16, 17, 42, -13, 19, -1, 3, -14, -20, }, { -5, -7, 7, 4, 9, 0, 21, -28, 6, 48, -2, 14, 15, -20, 5, 0, 27, 14, 32, 28, 5, -32, 2, -6, 32, -3, 12, 5, -8, 25, 14, 4, -12, -11, 4, 47, 45, 22, -22, 2, }, { -74, 12, 30, 42, 30, -14, 16, -23, -29, 19, -20, 21, 7, -1, 19, 17, 14, 10, 43, 27, -3, -48, 8, -22, 16, 7, -4, 42, -9, 31, -6, 20, 6, 4, -3, 43, 22, 20, -28, -5, }, { -10, -3, 4, 18, 15, -27, 24, -13, 24, 51, 34, -47, -12, -28, 21, 19, 27, -16, 15, -30, -17, -12, -12, 0, 36, -10, -7, -27, -37, 16, 15, 22, -3, 4, -31, -1, 42, -7, 18, 9, }, { -79, 17, 27, 56, 36, -41, 19, -8, -10, 22, 15, -39, -20, -9, 35, 35, 15, -20, 26, -31, -26, -27, -6, -15, 20, 0, -23, 8, -38, 22, -5, 38, 15, 20, -39, -4, 18, -9, 13, 1, }, { -50, 3, 16, 5, 30, -53, 19, -20, 3, 55, 19, -8, 0, -34, 5, 11, 32, -18, 15, -4, 22, -13, 10, -13, 9, -8, 0, -19, -7, 29, 31, 26, -13, -33, -21, 22, 65, 9, 4, 23, }, { -119, 24, 39, 44, 51, -66, 14, -15, -31, 26, 1, 0, -7, -16, 19, 28, 19, -22, 26, -4, 13, -28, 16, -29, -5, 1, -16, 16, -8, 35, 10, 42, 4, -17, -29, 19, 42, 7, 0, 15, }, { 31, -28, 6, -16, -16, 50, -31, -14, 0, 6, 36, 17, 0, 0, -29, -14, 11, 18, 20, -25, -6, -33, -19, 20, 43, 21, 21, -14, -55, 0, -32, -40, -42, 39, 1, -8, -14, 8, -6, -3, }, { -37, -8, 29, 22, 4, 37, -36, -9, -34, -22, 17, 24, -8, 18, -15, 2, -1, 14, 31, -25, -15, -48, -13, 4, 28, 31, 5, 21, -57, 4, -53, -24, -23, 55, -6, -12, -37, 6, -11, -11, }, { -7, -22, 18, -29, -1, 25, -36, -21, -20, 9, 22, 56, 11, -6, -45, -21, 15, 16, 20, 1, 33, -34, 2, 6, 17, 23, 28, -6, -25, 12, -16, -37, -53, 1, 11, 15, 8, 25, -20, 11, }, { -76, -1, 41, 9, 19, 12, -41, -17, -55, -18, 3, 63, 3, 12, -30, -5, 3, 12, 31, 0, 24, -49, 8, -9, 1, 33, 12, 29, -27, 18, -37, -21, -34, 17, 3, 11, -14, 23, -25, 2, }, { -13, -17, 15, -15, 3, -1, -33, -7, -1, 12, 58, -5, -15, -13, -29, -3, 16, -13, 4, -57, 10, -13, -11, 13, 21, 15, 9, -40, -55, 3, -14, -19, -44, 17, -24, -32, 5, -4, 21, 18, }, { -82, 3, 38, 23, 24, -15, -38, -2, -37, -15, 39, 2, -23, 4, -15, 12, 3, -17, 15, -58, 1, -29, -6, -2, 5, 26, -7, -4, -56, 9, -35, -3, -25, 33, -32, -36, -17, -7, 15, 9, }, { -52, -10, 27, -27, 18, -26, -38, -14, -23, 16, 44, 33, -3, -20, -45, -10, 20, -15, 3, -31, 50, -14, 10, 0, -5, 17, 15, -32, -24, 16, 1, -15, -55, -20, -14, -8, 29, 12, 7, 32, }, { -121, 9, 50, 10, 40, -40, -43, -9, -58, -12, 25, 41, -11, -2, -31, 5, 8, -19, 15, -32, 41, -30, 16, -16, -20, 28, 0, 3, -26, 22, -19, 0, -36, -4, -22, -12, 6, 9, 1, 24, }, { 36, -18, 24, 3, -28, 7, -16, 13, 26, 41, 24, -1, 5, -26, -3, -12, 27, 46, 27, -10, -24, -34, -10, -8, 49, 2, 34, 0, -51, -12, -17, -38, -25, 56, 0, 0, 22, -3, -20, -22, }, { -32, 2, 47, 42, -7, -5, -21, 18, -9, 12, 5, 5, -2, -8, 10, 4, 14, 42, 38, -10, -33, -49, -5, -24, 33, 12, 17, 35, -52, -6, -38, -22, -7, 72, -7, -3, 0, -6, -25, -30, }, { -2, -11, 36, -9, -13, -17, -21, 6, 5, 45, 10, 36, 18, -33, -19, -19, 31, 44, 27, 15, 16, -34, 11, -22, 22, 4, 40, 7, -21, 0, -1, -35, -36, 18, 10, 24, 46, 12, -34, -8, }, { -71, 9, 59, 29, 8, -30, -26, 11, -30, 16, -8, 44, 9, -14, -5, -2, 19, 40, 38, 15, 7, -50, 17, -38, 7, 14, 24, 43, -22, 6, -22, -19, -17, 34, 2, 20, 23, 10, -39, -16, }, { -8, -6, 33, 4, -7, -44, -18, 21, 23, 48, 46, -24, -9, -40, -3, -1, 32, 13, 11, -43, -7, -14, -3, -15, 26, -3, 21, -26, -50, -8, 0, -16, -27, 34, -25, -23, 43, -17, 6, -1, }, { -77, 13, 56, 43, 13, -57, -23, 26, -11, 19, 27, -16, -17, -22, 10, 15, 19, 10, 22, -43, -16, -30, 2, -31, 11, 6, 5, 9, -52, -2, -20, 0, -8, 50, -33, -27, 19, -19, 1, -9, }, { -47, 0, 45, -7, 7, -69, -23, 13, 2, 51, 32, 14, 3, -47, -19, -8, 37, 11, 10, -16, 32, -15, 19, -29, 0, -1, 28, -18, -20, 4, 16, -13, -38, -3, -15, 0, 66, 0, -7, 13, }, { -116, 20, 68, 30, 28, -83, -28, 18, -32, 22, 13, 21, -5, -28, -5, 7, 24, 8, 22, -17, 23, -30, 25, -45, -15, 9, 11, 18, -22, 10, -4, 2, -19, 12, -23, -3, 43, -2, -12, 4, }, { 72, -13, -29, -18, 4, 90, 37, -34, 4, -1, 13, -9, -6, 11, 2, 24, -13, 3, -7, -11, -4, 4, -42, 25, 31, -1, -8, -29, -6, 17, -19, 2, -10, -6, 38, 22, -42, -19, -6, -11, }, { 3, 7, -6, 20, 25, 77, 32, -29, -31, -30, -4, -2, -14, 29, 16, 40, -26, 0, 3, -12, -13, -10, -36, 9, 15, 8, -24, 6, -7, 22, -40, 17, 8, 9, 31, 18, -66, -22, -11, -19, }, { 32, -6, -17, -31, 19, 65, 33, -41, -16, 1, 0, 29, 6, 4, -13, 17, -9, 1, -8, 14, 35, 3, -19, 11, 4, 0, -1, -21, 23, 30, -3, 5, -20, -44, 48, 46, -19, -3, -20, 3, }, { -36, 13, 5, 7, 40, 51, 28, -36, -52, -27, -18, 36, -2, 22, 0, 33, -21, -2, 3, 13, 26, -11, -14, -4, -10, 10, -18, 14, 22, 36, -24, 21, -1, -28, 40, 42, -42, -5, -25, -5, }, { 27, -1, -20, -17, 24, 38, 35, -26, 1, 4, 35, -32, -21, -3, 2, 35, -8, -29, -24, -44, 12, 24, -34, 18, 8, -7, -21, -55, -5, 21, -2, 23, -11, -28, 13, -1, -22, -33, 21, 10, }, { -41, 18, 2, 21, 45, 24, 30, -21, -33, -24, 17, -24, -29, 15, 16, 51, -21, -33, -13, -45, 3, 8, -28, 2, -7, 2, -37, -19, -7, 27, -22, 39, 7, -12, 5, -5, -45, -35, 15, 1, }, { -12, 4, -8, -29, 39, 12, 30, -33, -19, 8, 21, 6, -8, -9, -13, 28, -4, -31, -24, -18, 52, 23, -12, 4, -18, -5, -14, -47, 24, 34, 14, 27, -22, -66, 22, 22, 1, -16, 6, 24, }, { -81, 25, 14, 8, 61, 0, 25, -28, -54, -20, 3, 14, -17, 8, 0, 44, -16, -35, -13, -18, 43, 7, -6, -11, -33, 4, -30, -11, 22, 40, -6, 43, -3, -50, 14, 18, -22, -18, 1, 16, }, { 77, -2, -11, 1, -7, 47, 52, -5, 29, 33, 1, -28, 0, -15, 28, 26, 2, 30, 0, 2, -22, 4, -33, -3, 36, -21, 3, -15, -2, 5, -4, 4, 6, 9, 37, 31, -5, -32, -20, -30, }, { 8, 17, 10, 40, 13, 34, 47, 0, -5, 4, -16, -21, -8, 2, 42, 43, -10, 26, 10, 2, -31, -11, -27, -19, 21, -10, -12, 20, -3, 11, -25, 20, 25, 25, 29, 28, -28, -34, -25, -38, }, { 37, 3, 0, -10, 7, 22, 48, -12, 8, 36, -12, 9, 12, -22, 12, 19, 6, 28, 0, 29, 18, 3, -11, -17, 10, -18, 10, -7, 27, 18, 11, 7, -3, -28, 47, 55, 18, -15, -34, -16, }, { -31, 24, 23, 27, 29, 9, 43, -8, -26, 7, -30, 17, 4, -3, 26, 35, -5, 24, 10, 28, 9, -12, -5, -33, -5, -8, -5, 29, 26, 24, -9, 23, 14, -12, 39, 52, -5, -18, -39, -24, }, { 32, 8, -3, 2, 13, -4, 50, 1, 27, 39, 23, -51, -15, -30, 27, 37, 7, -1, -17, -29, -5, 23, -25, -10, 14, -26, -8, -41, -1, 9, 13, 26, 5, -12, 12, 7, 14, -45, 6, -9, }, { -36, 29, 19, 41, 34, -18, 45, 6, -8, 10, 5, -43, -23, -11, 42, 53, -5, -5, -6, -30, -14, 8, -20, -26, -1, -16, -25, -4, -3, 15, -7, 41, 23, 3, 4, 3, -8, -48, 1, -17, }, { -7, 15, 9, -9, 28, -29, 45, -5, 6, 43, 9, -12, -2, -36, 12, 30, 11, -3, -17, -3, 34, 22, -3, -24, -12, -24, -2, -32, 28, 22, 29, 29, -5, -50, 21, 31, 38, -29, -7, 5, }, { -76, 35, 31, 28, 49, -43, 40, 0, -29, 14, -8, -5, -10, -18, 26, 46, 0, -7, -6, -3, 25, 7, 2, -40, -28, -14, -18, 3, 27, 28, 8, 45, 13, -34, 13, 27, 15, -31, -12, -3, }, { 74, -17, 0, -31, -18, 73, -5, 0, 3, -5, 25, 12, -3, -1, -22, 3, -9, 33, -12, -24, 6, 2, -33, 9, 21, 5, 20, -27, -19, -7, -34, -37, -34, 22, 44, 0, -41, -29, -17, -21, }, { 5, 3, 21, 7, 2, 60, -10, 5, -32, -34, 7, 20, -11, 16, -8, 20, -21, 29, -1, -24, -2, -13, -27, -6, 5, 15, 3, 8, -21, -1, -55, -21, -15, 38, 37, -3, -65, -32, -23, -30, }, { 35, -10, 11, -44, -3, 48, -10, -6, -17, -2, 11, 51, 8, -8, -38, -3, -4, 31, -12, 2, 46, 1, -10, -4, -5, 7, 26, -19, 10, 5, -18, -34, -45, -15, 54, 24, -18, -13, -31, -7, }, { -33, 10, 34, -5, 17, 35, -15, -1, -53, -30, -6, 59, 0, 10, -24, 13, -17, 27, -1, 1, 37, -13, -4, -20, -20, 18, 10, 16, 8, 11, -39, -18, -26, 0, 46, 20, -41, -15, -37, -15, }, { 29, -5, 7, -30, 1, 21, -7, 7, 0, 0, 47, -9, -18, -15, -22, 14, -4, 0, -28, -57, 23, 21, -25, 2, -1, 0, 7, -53, -19, -3, -17, -15, -36, 0, 19, -24, -21, -43, 9, 0, }, { -39, 14, 30, 8, 22, 8, -12, 12, -34, -27, 29, -2, -26, 2, -8, 31, -16, -3, -17, -57, 14, 6, -19, -13, -16, 10, -8, -17, -20, 2, -38, 0, -17, 16, 11, -27, -44, -45, 4, -8, }, { -9, 1, 20, -43, 17, -3, -12, 0, -20, 4, 33, 29, -6, -22, -38, 7, 0, -1, -29, -30, 63, 21, -3, -11, -27, 1, 14, -45, 10, 9, -1, -12, -47, -37, 28, 0, 2, -26, -4, 13, }, { -78, 21, 43, -4, 38, -17, -17, 5, -55, -24, 15, 36, -14, -4, -24, 24, -12, -5, -17, -31, 54, 5, 2, -27, -43, 12, -2, -9, 9, 15, -22, 3, -28, -21, 20, -3, -20, -28, -9, 5, }, { 80, -6, 16, -11, -30, 30, 9, 28, 28, 29, 13, -6, 2, -28, 3, 5, 7, 60, -5, -9, -11, 1, -24, -19, 27, -13, 32, -13, -15, -19, -19, -35, -17, 39, 43, 9, -4, -42, -32, -41, }, { 11, 14, 39, 27, -9, 17, 4, 33, -6, 0, -4, 1, -5, -10, 17, 22, -5, 57, 5, -9, -20, -13, -18, -35, 11, -3, 16, 22, -17, -13, -40, -19, 1, 55, 35, 5, -27, -44, -37, -49, }, { 40, 0, 28, -24, -14, 5, 4, 21, 7, 33, 0, 32, 15, -35, -12, -1, 11, 58, -5, 16, 28, 0, -1, -33, 0, -11, 39, -5, 14, -6, -3, -31, -28, 1, 53, 33, 19, -25, -46, -26, }, { -28, 20, 51, 14, 6, -7, 0, 26, -27, 4, -18, 40, 6, -16, 1, 15, 0, 55, 5, 16, 19, -14, 3, -49, -14, -1, 22, 30, 12, 0, -24, -15, -9, 17, 45, 29, -4, -28, -51, -35, }, { 34, 4, 25, -10, -9, -21, 7, 36, 26, 36, 35, -28, -12, -42, 3, 16, 12, 28, -21, -42, 5, 21, -16, -26, 4, -19, 19, -39, -15, -15, -1, -13, -19, 17, 17, -14, 15, -55, -4, -19, }, { -34, 25, 48, 28, 11, -34, 2, 41, -9, 7, 17, -21, -20, -24, 17, 33, 0, 24, -10, -42, -3, 5, -10, -42, -11, -8, 3, -3, -16, -9, -22, 2, 0, 33, 10, -18, -7, -58, -10, -28, }, { -4, 11, 37, -23, 5, -46, 2, 29, 5, 39, 21, 9, 0, -49, -12, 9, 16, 26, -22, -15, 45, 20, 5, -40, -22, -17, 26, -31, 14, -2, 14, -10, -30, -20, 27, 9, 39, -39, -18, -5, }, { -73, 32, 60, 15, 26, -59, -2, 33, -30, 10, 3, 17, -8, -30, 1, 26, 4, 22, -10, -16, 36, 5, 11, -56, -37, -6, 10, 5, 13, 3, -6, 5, -11, -4, 19, 5, 16, -41, -24, -13 } }; static const uint16_t cb1_base[128]={ 19657, 18474, 18365, 17520, 21048, 18231, 18584, 16671, 20363, 19069, 19409, 18430, 21844, 18753, 19613, 17411, 20389, 21772, 20129, 21702, 20978, 20472, 19627, 19387, 21477, 23134, 21841, 23919, 22089, 21519, 21134, 20852, 19675, 17821, 19044, 17477, 19986, 16955, 18446, 16086, 21138, 18899, 20952, 18929, 21452, 17833, 20104, 17159, 19770, 20056, 20336, 20866, 19329, 18217, 18908, 18004, 21556, 21948, 23079, 23889, 20922, 19544, 20984, 19781, 19781, 20984, 19544, 20922, 23889, 23079, 21948, 21556, 18004, 18908, 18217, 19329, 20866, 20336, 20056, 19770, 17159, 20104, 17833, 21452, 18929, 20952, 18899, 21138, 16086, 18446, 16955, 19986, 17477, 19044, 17821, 19675, 20852, 21134, 21519, 22089, 23919, 21841, 23134, 21477, 19387, 19627, 20472, 20978, 21702, 20129, 21772, 20389, 17411, 19613, 18753, 21844, 18430, 19409, 19069, 20363, 16671, 18584, 18231, 21048, 17520, 18365, 18474, 19657, }; static const uint16_t cb2_base[128]={ 12174, 13380, 13879, 13832, 13170, 13227, 13204, 12053, 12410, 13988, 14348, 14631, 13100, 13415, 13224, 12268, 11982, 13825, 13499, 14210, 13877, 14788, 13811, 13109, 11449, 13275, 12833, 13717, 12728, 13696, 12759, 12405, 10230, 12185, 11628, 13161, 11762, 13458, 12312, 12818, 10443, 12773, 12011, 14020, 11818, 13825, 12453, 13226, 10446, 13162, 11881, 14300, 12859, 16288, 13490, 15053, 10155, 12820, 11519, 13973, 12041, 15081, 12635, 14198, 14198, 12635, 15081, 12041, 13973, 11519, 12820, 10155, 15053, 13490, 16288, 12859, 14300, 11881, 13162, 10446, 13226, 12453, 13825, 11818, 14020, 12011, 12773, 10443, 12818, 12312, 13458, 11762, 13161, 11628, 12185, 10230, 12405, 12759, 13696, 12728, 13717, 12833, 13275, 11449, 13109, 13811, 14788, 13877, 14210, 13499, 13825, 11982, 12268, 13224, 13415, 13100, 14631, 14348, 13988, 12410, 12053, 13204, 13227, 13170, 13832, 13879, 13380, 12174, }; static const int16_t energy_tab[32]={ 0, 16, 20, 25, 32, 41, 51, 65, 81, 103, 129, 163, 205, 259, 326, 410, 516, 650, 819, 1031, 1298, 1634, 2057, 2590, 3261, 4105, 5168, 6507, 8192, 10313, 12983, 16345 }; static const int16_t lpc_refl_cb1[64]={ -4041, -4018, -3998, -3977, -3954, -3930, -3906, -3879, -3852, -3825, -3795, -3764, -3731, -3699, -3666, -3631, -3594, -3555, -3513, -3468, -3420, -3372, -3321, -3268, -3212, -3153, -3090, -3021, -2944, -2863, -2772, -2676, -2565, -2445, -2328, -2202, -2072, -1941, -1808, -1660, -1508, -1348, -1185, -994, -798, -600, -374, -110, 152, 447, 720, 982, 1229, 1456, 1682, 1916, 2130, 2353, 2595, 2853, 3118, 3363, 3588, 3814 }; static const int16_t lpc_refl_cb2[32]={ -3091, -2386, -1871, -1425, -1021, -649, -316, -20, 267, 544, 810, 1065, 1305, 1534, 1756, 1970, 2171, 2359, 2536, 2700, 2854, 2996, 3133, 3263, 3386, 3499, 3603, 3701, 3789, 3870, 3947, 4020 }; static const int16_t lpc_refl_cb3[32]={ -3525, -3295, -3081, -2890, -2696, -2511, -2328, -2149, -1979, -1817, -1658, -1498, -1341, -1188, -1032, -876, -721, -561, -394, -228, -54, 119, 296, 484, 683, 895, 1123, 1373, 1651, 1965, 2360, 2854 }; static const int16_t lpc_refl_cb4[16]={ -1845, -1057, -522, -77, 301, 647, 975, 1285, 1582, 1873, 2163, 2452, 2735, 3017, 3299, 3569 }; static const int16_t lpc_refl_cb5[16]={ -2691, -2187, -1788, -1435, -1118, -837, -571, -316, -59, 201, 470, 759, 1077, 1457, 1908, 2495 }; static const int16_t lpc_refl_cb6[8]={ -1372, -474, 133, 632, 1100, 1571, 2075, 2672 }; static const int16_t lpc_refl_cb7[8]={ -2389, -1787, -1231, -717, -239, 234, 770, 1474 }; static const int16_t lpc_refl_cb8[8]={ -1569, -864, -296, 200, 670, 1151, 1709, 2385 }; static const int16_t lpc_refl_cb9[8]={ -2200, -1608, -1062, -569, -120, 338, 863, 1621 }; static const int16_t lpc_refl_cb10[4]={ -617, 190, 802, 1483 }; static const int16_t * const lpc_refl_cb[10]={ lpc_refl_cb1, lpc_refl_cb2, lpc_refl_cb3, lpc_refl_cb4, lpc_refl_cb5, lpc_refl_cb6, lpc_refl_cb7, lpc_refl_cb8, lpc_refl_cb9, lpc_refl_cb10 }; #endif /* AVCODEC_RA144_H */
123linslouis-android-video-cutter
jni/libavcodec/ra144.h
C
asf20
69,296
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * common internal api header. */ #ifndef AVCODEC_INTERNAL_H #define AVCODEC_INTERNAL_H #include <stdint.h> #include "avcodec.h" /** * Determines whether pix_fmt is a hardware accelerated format. */ int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt); /** * Returns the hardware accelerated codec for codec codec_id and * pixel format pix_fmt. * * @param codec_id the codec to match * @param pix_fmt the pixel format to match * @return the hardware accelerated codec, or NULL if none was found. */ AVHWAccel *ff_find_hwaccel(enum CodecID codec_id, enum PixelFormat pix_fmt); /** * Return the index into tab at which {a,b} match elements {[0],[1]} of tab. * If there is no such matching pair then size is returned. */ int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b); #endif /* AVCODEC_INTERNAL_H */
123linslouis-android-video-cutter
jni/libavcodec/internal.h
C
asf20
1,617
/* * VC3/DNxHD encoder structure definitions and prototypes * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> * * VC-3 encoder funded by the British Broadcasting Corporation * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_DNXHDENC_H #define AVCODEC_DNXHDENC_H #include <stdint.h> #include "libavcodec/mpegvideo.h" #include "libavcodec/dnxhddata.h" typedef struct { uint16_t mb; int value; } RCCMPEntry; typedef struct { int ssd; int bits; } RCEntry; typedef struct DNXHDEncContext { MpegEncContext m; ///< Used for quantization dsp functions AVFrame frame; int cid; const CIDEntry *cid_table; uint8_t *msip; ///< Macroblock Scan Indexes Payload uint32_t *slice_size; uint32_t *slice_offs; struct DNXHDEncContext *thread[MAX_THREADS]; unsigned dct_y_offset; unsigned dct_uv_offset; int interlaced; int cur_field; DECLARE_ALIGNED(16, DCTELEM, blocks)[8][64]; int (*qmatrix_c) [64]; int (*qmatrix_l) [64]; uint16_t (*qmatrix_l16)[2][64]; uint16_t (*qmatrix_c16)[2][64]; unsigned frame_bits; uint8_t *src[3]; uint32_t *vlc_codes; uint8_t *vlc_bits; uint16_t *run_codes; uint8_t *run_bits; /** Rate control */ unsigned slice_bits; unsigned qscale; unsigned lambda; unsigned thread_size; uint16_t *mb_bits; uint8_t *mb_qscale; RCCMPEntry *mb_cmp; RCEntry (*mb_rc)[8160]; void (*get_pixels_8x4_sym)(DCTELEM */*align 16*/, const uint8_t *, int); } DNXHDEncContext; void ff_dnxhd_init_mmx(DNXHDEncContext *ctx); #endif /* AVCODEC_DNXHDENC_H */
123linslouis-android-video-cutter
jni/libavcodec/dnxhdenc.h
C
asf20
2,395
/* * Flash Screen Video decoder * Copyright (C) 2004 Alex Beregszaszi * Copyright (C) 2006 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Flash Screen Video decoder * @author Alex Beregszaszi * @author Benjamin Larsson */ /* Bitstream description * The picture is divided into blocks that are zlib compressed. * * The decoder is fed complete frames, the frameheader contains: * 4bits of block width * 12bits of frame width * 4bits of block height * 12bits of frame height * * Directly after the header are the compressed blocks. The blocks * have their compressed size represented with 16bits in the beginnig. * If the size = 0 then the block is unchanged from the previous frame. * All blocks are decompressed until the buffer is consumed. * * Encoding ideas, a basic encoder would just use a fixed block size. * Block sizes can be multipels of 16, from 16 to 256. The blocks don't * have to be quadratic. A brute force search with a set of diffrent * block sizes should give a better result then to just use a fixed size. */ #include <stdio.h> #include <stdlib.h> #include "avcodec.h" #include "get_bits.h" #include <zlib.h> typedef struct FlashSVContext { AVCodecContext *avctx; AVFrame frame; int image_width, image_height; int block_width, block_height; uint8_t* tmpblock; int block_size; z_stream zstream; } FlashSVContext; static void copy_region(uint8_t *sptr, uint8_t *dptr, int dx, int dy, int h, int w, int stride) { int i; for (i = dx+h; i > dx; i--) { memcpy(dptr+(i*stride)+dy*3, sptr, w*3); sptr += w*3; } } static av_cold int flashsv_decode_init(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; int zret; // Zlib return code s->avctx = avctx; s->zstream.zalloc = Z_NULL; s->zstream.zfree = Z_NULL; s->zstream.opaque = Z_NULL; zret = inflateInit(&(s->zstream)); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return 1; } avctx->pix_fmt = PIX_FMT_BGR24; s->frame.data[0] = NULL; return 0; } static int flashsv_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FlashSVContext *s = avctx->priv_data; int h_blocks, v_blocks, h_part, v_part, i, j; GetBitContext gb; /* no supplementary picture */ if (buf_size == 0) return 0; if (buf_size < 4) return -1; init_get_bits(&gb, buf, buf_size * 8); /* start to parse the bitstream */ s->block_width = 16* (get_bits(&gb, 4)+1); s->image_width = get_bits(&gb,12); s->block_height= 16* (get_bits(&gb, 4)+1); s->image_height= get_bits(&gb,12); /* calculate amount of blocks and the size of the border blocks */ h_blocks = s->image_width / s->block_width; h_part = s->image_width % s->block_width; v_blocks = s->image_height / s->block_height; v_part = s->image_height % s->block_height; /* the block size could change between frames, make sure the buffer * is large enough, if not, get a larger one */ if(s->block_size < s->block_width*s->block_height) { if (s->tmpblock != NULL) av_free(s->tmpblock); if ((s->tmpblock = av_malloc(3*s->block_width*s->block_height)) == NULL) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return -1; } } s->block_size = s->block_width*s->block_height; /* init the image size once */ if((avctx->width==0) && (avctx->height==0)){ avctx->width = s->image_width; avctx->height = s->image_height; } /* check for changes of image width and image height */ if ((avctx->width != s->image_width) || (avctx->height != s->image_height)) { av_log(avctx, AV_LOG_ERROR, "Frame width or height differs from first frames!\n"); av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n",avctx->height, avctx->width,s->image_height,s->image_width); return -1; } av_log(avctx, AV_LOG_DEBUG, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n", s->image_width, s->image_height, s->block_width, s->block_height, h_blocks, v_blocks, h_part, v_part); s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if(avctx->reget_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part?1:0); j++) { int hp = j*s->block_height; // horiz position in frame int hs = (j<v_blocks)?s->block_height:v_part; // size of block /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part?1:0); i++) { int wp = i*s->block_width; // vert position in frame int ws = (i<h_blocks)?s->block_width:h_part; // size of block /* get the size of the compressed zlib chunk */ int size = get_bits(&gb, 16); if (8 * size > get_bits_left(&gb)) { avctx->release_buffer(avctx, &s->frame); s->frame.data[0] = NULL; return -1; } if (size == 0) { /* no change, don't do anything */ } else { /* decompress block */ int ret = inflateReset(&(s->zstream)); if (ret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "error in decompression (reset) of block %dx%d\n", i, j); /* return -1; */ } s->zstream.next_in = buf+(get_bits_count(&gb)/8); s->zstream.avail_in = size; s->zstream.next_out = s->tmpblock; s->zstream.avail_out = s->block_size*3; ret = inflate(&(s->zstream), Z_FINISH); if (ret == Z_DATA_ERROR) { av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n"); inflateSync(&(s->zstream)); ret = inflate(&(s->zstream), Z_FINISH); } if ((ret != Z_OK) && (ret != Z_STREAM_END)) { av_log(avctx, AV_LOG_ERROR, "error in decompression of block %dx%d: %d\n", i, j, ret); /* return -1; */ } copy_region(s->tmpblock, s->frame.data[0], s->image_height-(hp+hs+1), wp, hs, ws, s->frame.linesize[0]); skip_bits_long(&gb, 8*size); /* skip the consumed bits */ } } } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; if ((get_bits_count(&gb)/8) != buf_size) av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n", buf_size, (get_bits_count(&gb)/8)); /* report that the buffer was completely consumed */ return buf_size; } static av_cold int flashsv_decode_end(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; inflateEnd(&(s->zstream)); /* release the frame if needed */ if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); /* free the tmpblock */ if (s->tmpblock != NULL) av_free(s->tmpblock); return 0; } AVCodec flashsv_decoder = { "flashsv", AVMEDIA_TYPE_VIDEO, CODEC_ID_FLASHSV, sizeof(FlashSVContext), flashsv_decode_init, NULL, flashsv_decode_end, flashsv_decode_frame, CODEC_CAP_DR1, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"), };
123linslouis-android-video-cutter
jni/libavcodec/flashsv.c
C
asf20
8,650
/* * WMA compatible codec * Copyright (c) 2002-2007 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "wma.h" #include "wmadata.h" #undef NDEBUG #include <assert.h> /* XXX: use same run/length optimization as mpeg decoders */ //FIXME maybe split decode / encode or pass flag static void init_coef_vlc(VLC *vlc, uint16_t **prun_table, float **plevel_table, uint16_t **pint_table, const CoefVLCTable *vlc_table) { int n = vlc_table->n; const uint8_t *table_bits = vlc_table->huffbits; const uint32_t *table_codes = vlc_table->huffcodes; const uint16_t *levels_table = vlc_table->levels; uint16_t *run_table, *level_table, *int_table; float *flevel_table; int i, l, j, k, level; init_vlc(vlc, VLCBITS, n, table_bits, 1, 1, table_codes, 4, 4, 0); run_table = av_malloc(n * sizeof(uint16_t)); level_table = av_malloc(n * sizeof(uint16_t)); flevel_table= av_malloc(n * sizeof(*flevel_table)); int_table = av_malloc(n * sizeof(uint16_t)); i = 2; level = 1; k = 0; while (i < n) { int_table[k] = i; l = levels_table[k++]; for (j = 0; j < l; j++) { run_table[i] = j; level_table[i] = level; flevel_table[i]= level; i++; } level++; } *prun_table = run_table; *plevel_table = flevel_table; *pint_table = int_table; av_free(level_table); } /** *@brief Get the samples per frame for this stream. *@param sample_rate output sample_rate *@param version wma version *@param decode_flags codec compression features *@return log2 of the number of output samples per frame */ int av_cold ff_wma_get_frame_len_bits(int sample_rate, int version, unsigned int decode_flags) { int frame_len_bits; if (sample_rate <= 16000) { frame_len_bits = 9; } else if (sample_rate <= 22050 || (sample_rate <= 32000 && version == 1)) { frame_len_bits = 10; } else if (sample_rate <= 48000) { frame_len_bits = 11; } else if (sample_rate <= 96000) { frame_len_bits = 12; } else { frame_len_bits = 13; } if (version == 3) { int tmp = decode_flags & 0x6; if (tmp == 0x2) { ++frame_len_bits; } else if (tmp == 0x4) { --frame_len_bits; } else if (tmp == 0x6) { frame_len_bits -= 2; } } return frame_len_bits; } int ff_wma_init(AVCodecContext *avctx, int flags2) { WMACodecContext *s = avctx->priv_data; int i; float bps1, high_freq; volatile float bps; int sample_rate1; int coef_vlc_table; if ( avctx->sample_rate <= 0 || avctx->sample_rate > 50000 || avctx->channels <= 0 || avctx->channels > 8 || avctx->bit_rate <= 0) return -1; s->sample_rate = avctx->sample_rate; s->nb_channels = avctx->channels; s->bit_rate = avctx->bit_rate; s->block_align = avctx->block_align; dsputil_init(&s->dsp, avctx); if (avctx->codec->id == CODEC_ID_WMAV1) { s->version = 1; } else { s->version = 2; } /* compute MDCT block size */ s->frame_len_bits = ff_wma_get_frame_len_bits(s->sample_rate, s->version, 0); s->frame_len = 1 << s->frame_len_bits; if (s->use_variable_block_len) { int nb_max, nb; nb = ((flags2 >> 3) & 3) + 1; if ((s->bit_rate / s->nb_channels) >= 32000) nb += 2; nb_max = s->frame_len_bits - BLOCK_MIN_BITS; if (nb > nb_max) nb = nb_max; s->nb_block_sizes = nb + 1; } else { s->nb_block_sizes = 1; } /* init rate dependent parameters */ s->use_noise_coding = 1; high_freq = s->sample_rate * 0.5; /* if version 2, then the rates are normalized */ sample_rate1 = s->sample_rate; if (s->version == 2) { if (sample_rate1 >= 44100) { sample_rate1 = 44100; } else if (sample_rate1 >= 22050) { sample_rate1 = 22050; } else if (sample_rate1 >= 16000) { sample_rate1 = 16000; } else if (sample_rate1 >= 11025) { sample_rate1 = 11025; } else if (sample_rate1 >= 8000) { sample_rate1 = 8000; } } bps = (float)s->bit_rate / (float)(s->nb_channels * s->sample_rate); s->byte_offset_bits = av_log2((int)(bps * s->frame_len / 8.0 + 0.5)) + 2; /* compute high frequency value and choose if noise coding should be activated */ bps1 = bps; if (s->nb_channels == 2) bps1 = bps * 1.6; if (sample_rate1 == 44100) { if (bps1 >= 0.61) { s->use_noise_coding = 0; } else { high_freq = high_freq * 0.4; } } else if (sample_rate1 == 22050) { if (bps1 >= 1.16) { s->use_noise_coding = 0; } else if (bps1 >= 0.72) { high_freq = high_freq * 0.7; } else { high_freq = high_freq * 0.6; } } else if (sample_rate1 == 16000) { if (bps > 0.5) { high_freq = high_freq * 0.5; } else { high_freq = high_freq * 0.3; } } else if (sample_rate1 == 11025) { high_freq = high_freq * 0.7; } else if (sample_rate1 == 8000) { if (bps <= 0.625) { high_freq = high_freq * 0.5; } else if (bps > 0.75) { s->use_noise_coding = 0; } else { high_freq = high_freq * 0.65; } } else { if (bps >= 0.8) { high_freq = high_freq * 0.75; } else if (bps >= 0.6) { high_freq = high_freq * 0.6; } else { high_freq = high_freq * 0.5; } } dprintf(s->avctx, "flags2=0x%x\n", flags2); dprintf(s->avctx, "version=%d channels=%d sample_rate=%d bitrate=%d block_align=%d\n", s->version, s->nb_channels, s->sample_rate, s->bit_rate, s->block_align); dprintf(s->avctx, "bps=%f bps1=%f high_freq=%f bitoffset=%d\n", bps, bps1, high_freq, s->byte_offset_bits); dprintf(s->avctx, "use_noise_coding=%d use_exp_vlc=%d nb_block_sizes=%d\n", s->use_noise_coding, s->use_exp_vlc, s->nb_block_sizes); /* compute the scale factor band sizes for each MDCT block size */ { int a, b, pos, lpos, k, block_len, i, j, n; const uint8_t *table; if (s->version == 1) { s->coefs_start = 3; } else { s->coefs_start = 0; } for (k = 0; k < s->nb_block_sizes; k++) { block_len = s->frame_len >> k; if (s->version == 1) { lpos = 0; for (i = 0; i < 25; i++) { a = ff_wma_critical_freqs[i]; b = s->sample_rate; pos = ((block_len * 2 * a) + (b >> 1)) / b; if (pos > block_len) pos = block_len; s->exponent_bands[0][i] = pos - lpos; if (pos >= block_len) { i++; break; } lpos = pos; } s->exponent_sizes[0] = i; } else { /* hardcoded tables */ table = NULL; a = s->frame_len_bits - BLOCK_MIN_BITS - k; if (a < 3) { if (s->sample_rate >= 44100) { table = exponent_band_44100[a]; } else if (s->sample_rate >= 32000) { table = exponent_band_32000[a]; } else if (s->sample_rate >= 22050) { table = exponent_band_22050[a]; } } if (table) { n = *table++; for (i = 0; i < n; i++) s->exponent_bands[k][i] = table[i]; s->exponent_sizes[k] = n; } else { j = 0; lpos = 0; for (i = 0; i < 25; i++) { a = ff_wma_critical_freqs[i]; b = s->sample_rate; pos = ((block_len * 2 * a) + (b << 1)) / (4 * b); pos <<= 2; if (pos > block_len) pos = block_len; if (pos > lpos) s->exponent_bands[k][j++] = pos - lpos; if (pos >= block_len) break; lpos = pos; } s->exponent_sizes[k] = j; } } /* max number of coefs */ s->coefs_end[k] = (s->frame_len - ((s->frame_len * 9) / 100)) >> k; /* high freq computation */ s->high_band_start[k] = (int)((block_len * 2 * high_freq) / s->sample_rate + 0.5); n = s->exponent_sizes[k]; j = 0; pos = 0; for (i = 0; i < n; i++) { int start, end; start = pos; pos += s->exponent_bands[k][i]; end = pos; if (start < s->high_band_start[k]) start = s->high_band_start[k]; if (end > s->coefs_end[k]) end = s->coefs_end[k]; if (end > start) s->exponent_high_bands[k][j++] = end - start; } s->exponent_high_sizes[k] = j; #if 0 tprintf(s->avctx, "%5d: coefs_end=%d high_band_start=%d nb_high_bands=%d: ", s->frame_len >> k, s->coefs_end[k], s->high_band_start[k], s->exponent_high_sizes[k]); for (j = 0; j < s->exponent_high_sizes[k]; j++) tprintf(s->avctx, " %d", s->exponent_high_bands[k][j]); tprintf(s->avctx, "\n"); #endif } } #ifdef TRACE { int i, j; for (i = 0; i < s->nb_block_sizes; i++) { tprintf(s->avctx, "%5d: n=%2d:", s->frame_len >> i, s->exponent_sizes[i]); for (j = 0; j < s->exponent_sizes[i]; j++) tprintf(s->avctx, " %d", s->exponent_bands[i][j]); tprintf(s->avctx, "\n"); } } #endif /* init MDCT windows : simple sinus window */ for (i = 0; i < s->nb_block_sizes; i++) { ff_init_ff_sine_windows(s->frame_len_bits - i); s->windows[i] = ff_sine_windows[s->frame_len_bits - i]; } s->reset_block_lengths = 1; if (s->use_noise_coding) { /* init the noise generator */ if (s->use_exp_vlc) { s->noise_mult = 0.02; } else { s->noise_mult = 0.04; } #ifdef TRACE for (i = 0; i < NOISE_TAB_SIZE; i++) s->noise_table[i] = 1.0 * s->noise_mult; #else { unsigned int seed; float norm; seed = 1; norm = (1.0 / (float)(1LL << 31)) * sqrt(3) * s->noise_mult; for (i = 0; i < NOISE_TAB_SIZE; i++) { seed = seed * 314159 + 1; s->noise_table[i] = (float)((int)seed) * norm; } } #endif } /* choose the VLC tables for the coefficients */ coef_vlc_table = 2; if (s->sample_rate >= 32000) { if (bps1 < 0.72) { coef_vlc_table = 0; } else if (bps1 < 1.16) { coef_vlc_table = 1; } } s->coef_vlcs[0]= &coef_vlcs[coef_vlc_table * 2 ]; s->coef_vlcs[1]= &coef_vlcs[coef_vlc_table * 2 + 1]; init_coef_vlc(&s->coef_vlc[0], &s->run_table[0], &s->level_table[0], &s->int_table[0], s->coef_vlcs[0]); init_coef_vlc(&s->coef_vlc[1], &s->run_table[1], &s->level_table[1], &s->int_table[1], s->coef_vlcs[1]); return 0; } int ff_wma_total_gain_to_bits(int total_gain) { if (total_gain < 15) return 13; else if (total_gain < 32) return 12; else if (total_gain < 40) return 11; else if (total_gain < 45) return 10; else return 9; } int ff_wma_end(AVCodecContext *avctx) { WMACodecContext *s = avctx->priv_data; int i; for (i = 0; i < s->nb_block_sizes; i++) ff_mdct_end(&s->mdct_ctx[i]); if (s->use_exp_vlc) { free_vlc(&s->exp_vlc); } if (s->use_noise_coding) { free_vlc(&s->hgain_vlc); } for (i = 0; i < 2; i++) { free_vlc(&s->coef_vlc[i]); av_free(s->run_table[i]); av_free(s->level_table[i]); av_free(s->int_table[i]); } return 0; } /** * Decode an uncompressed coefficient. * @param s codec context * @return the decoded coefficient */ unsigned int ff_wma_get_large_val(GetBitContext* gb) { /** consumes up to 34 bits */ int n_bits = 8; /** decode length */ if (get_bits1(gb)) { n_bits += 8; if (get_bits1(gb)) { n_bits += 8; if (get_bits1(gb)) { n_bits += 7; } } } return get_bits_long(gb, n_bits); } /** * Decode run level compressed coefficients. * @param avctx codec context * @param gb bitstream reader context * @param vlc vlc table for get_vlc2 * @param level_table level codes * @param run_table run codes * @param version 0 for wma1,2 1 for wmapro * @param ptr output buffer * @param offset offset in the output buffer * @param num_coefs number of input coefficents * @param block_len input buffer length (2^n) * @param frame_len_bits number of bits for escaped run codes * @param coef_nb_bits number of bits for escaped level codes * @return 0 on success, -1 otherwise */ int ff_wma_run_level_decode(AVCodecContext* avctx, GetBitContext* gb, VLC *vlc, const float *level_table, const uint16_t *run_table, int version, WMACoef *ptr, int offset, int num_coefs, int block_len, int frame_len_bits, int coef_nb_bits) { int code, level, sign; const uint32_t *ilvl = (const uint32_t*)level_table; uint32_t *iptr = (uint32_t*)ptr; const unsigned int coef_mask = block_len - 1; for (; offset < num_coefs; offset++) { code = get_vlc2(gb, vlc->table, VLCBITS, VLCMAX); if (code > 1) { /** normal code */ offset += run_table[code]; sign = get_bits1(gb) - 1; iptr[offset & coef_mask] = ilvl[code] ^ sign<<31; } else if (code == 1) { /** EOB */ break; } else { /** escape */ if (!version) { level = get_bits(gb, coef_nb_bits); /** NOTE: this is rather suboptimal. reading block_len_bits would be better */ offset += get_bits(gb, frame_len_bits); } else { level = ff_wma_get_large_val(gb); /** escape decode */ if (get_bits1(gb)) { if (get_bits1(gb)) { if (get_bits1(gb)) { av_log(avctx,AV_LOG_ERROR, "broken escape sequence\n"); return -1; } else offset += get_bits(gb, frame_len_bits) + 4; } else offset += get_bits(gb, 2) + 1; } } sign = get_bits1(gb) - 1; ptr[offset & coef_mask] = (level^sign) - sign; } } /** NOTE: EOB can be omitted */ if (offset > num_coefs) { av_log(avctx, AV_LOG_ERROR, "overflow in spectral RLE, ignoring\n"); return -1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/wma.c
C
asf20
16,869
/* * Xvid rate control wrapper for lavc video encoders * * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <xvid.h> #include <unistd.h> #include "avcodec.h" #include "libxvid_internal.h" //#include "dsputil.h" #include "mpegvideo.h" #undef NDEBUG #include <assert.h> extern unsigned int xvid_debug; int ff_xvid_rate_control_init(MpegEncContext *s){ char *tmp_name; int fd, i; xvid_plg_create_t xvid_plg_create; xvid_plugin_2pass2_t xvid_2pass2; //xvid_debug=-1; fd=av_tempfile("xvidrc.", &tmp_name); if (fd == -1) { av_log(NULL, AV_LOG_ERROR, "Can't create temporary pass2 file.\n"); return -1; } for(i=0; i<s->rc_context.num_entries; i++){ static const char *frame_types = " ipbs"; char tmp[256]; RateControlEntry *rce; rce= &s->rc_context.entry[i]; snprintf(tmp, sizeof(tmp), "%c %d %d %d %d %d %d\n", frame_types[rce->pict_type], (int)lrintf(rce->qscale / FF_QP2LAMBDA), rce->i_count, s->mb_num - rce->i_count - rce->skip_count, rce->skip_count, (rce->i_tex_bits + rce->p_tex_bits + rce->misc_bits+7)/8, (rce->header_bits+rce->mv_bits+7)/8); //av_log(NULL, AV_LOG_ERROR, "%s\n", tmp); write(fd, tmp, strlen(tmp)); } close(fd); memset(&xvid_2pass2, 0, sizeof(xvid_2pass2)); xvid_2pass2.version= XVID_MAKE_VERSION(1,1,0); xvid_2pass2.filename= tmp_name; xvid_2pass2.bitrate= s->avctx->bit_rate; xvid_2pass2.vbv_size= s->avctx->rc_buffer_size; xvid_2pass2.vbv_maxrate= s->avctx->rc_max_rate; xvid_2pass2.vbv_initial= s->avctx->rc_initial_buffer_occupancy; memset(&xvid_plg_create, 0, sizeof(xvid_plg_create)); xvid_plg_create.version= XVID_MAKE_VERSION(1,1,0); xvid_plg_create.fbase= s->avctx->time_base.den; xvid_plg_create.fincr= s->avctx->time_base.num; xvid_plg_create.param= &xvid_2pass2; if(xvid_plugin_2pass2(NULL, XVID_PLG_CREATE, &xvid_plg_create, &s->rc_context.non_lavc_opaque)<0){ av_log(NULL, AV_LOG_ERROR, "xvid_plugin_2pass2 failed\n"); return -1; } return 0; } float ff_xvid_rate_estimate_qscale(MpegEncContext *s, int dry_run){ xvid_plg_data_t xvid_plg_data; memset(&xvid_plg_data, 0, sizeof(xvid_plg_data)); xvid_plg_data.version= XVID_MAKE_VERSION(1,1,0); xvid_plg_data.width = s->width; xvid_plg_data.height= s->height; xvid_plg_data.mb_width = s->mb_width; xvid_plg_data.mb_height= s->mb_height; xvid_plg_data.fbase= s->avctx->time_base.den; xvid_plg_data.fincr= s->avctx->time_base.num; xvid_plg_data.min_quant[0]= s->avctx->qmin; xvid_plg_data.min_quant[1]= s->avctx->qmin; xvid_plg_data.min_quant[2]= s->avctx->qmin; //FIXME i/b factor & offset xvid_plg_data.max_quant[0]= s->avctx->qmax; xvid_plg_data.max_quant[1]= s->avctx->qmax; xvid_plg_data.max_quant[2]= s->avctx->qmax; //FIXME i/b factor & offset xvid_plg_data.bquant_offset = 0; // 100 * s->avctx->b_quant_offset; xvid_plg_data.bquant_ratio = 100; // * s->avctx->b_quant_factor; #if 0 xvid_plg_data.stats.hlength= X #endif if(!s->rc_context.dry_run_qscale){ if(s->picture_number){ xvid_plg_data.length= xvid_plg_data.stats.length= (s->frame_bits + 7)/8; xvid_plg_data.frame_num= s->rc_context.last_picture_number; xvid_plg_data.quant= s->qscale; xvid_plg_data.type= s->last_pict_type; if(xvid_plugin_2pass2(s->rc_context.non_lavc_opaque, XVID_PLG_AFTER, &xvid_plg_data, NULL)){ av_log(s->avctx, AV_LOG_ERROR, "xvid_plugin_2pass2(handle, XVID_PLG_AFTER, ...) FAILED\n"); return -1; } } s->rc_context.last_picture_number= xvid_plg_data.frame_num= s->picture_number; xvid_plg_data.quant= 0; if(xvid_plugin_2pass2(s->rc_context.non_lavc_opaque, XVID_PLG_BEFORE, &xvid_plg_data, NULL)){ av_log(s->avctx, AV_LOG_ERROR, "xvid_plugin_2pass2(handle, XVID_PLG_BEFORE, ...) FAILED\n"); return -1; } s->rc_context.dry_run_qscale= xvid_plg_data.quant; } xvid_plg_data.quant= s->rc_context.dry_run_qscale; if(!dry_run) s->rc_context.dry_run_qscale= 0; if(s->pict_type == FF_B_TYPE) //FIXME this is not exactly identical to xvid return xvid_plg_data.quant * FF_QP2LAMBDA * s->avctx->b_quant_factor + s->avctx->b_quant_offset; else return xvid_plg_data.quant * FF_QP2LAMBDA; } void ff_xvid_rate_control_uninit(MpegEncContext *s){ xvid_plg_destroy_t xvid_plg_destroy; xvid_plugin_2pass2(s->rc_context.non_lavc_opaque, XVID_PLG_DESTROY, &xvid_plg_destroy, NULL); }
123linslouis-android-video-cutter
jni/libavcodec/libxvid_rc.c
C
asf20
5,475
/* * Targa (.tga) image decoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" enum TargaCompr{ TGA_NODATA = 0, // no image data TGA_PAL = 1, // palettized TGA_RGB = 2, // true-color TGA_BW = 3, // black & white or grayscale TGA_RLE = 8, // flag pointing that data is RLE-coded }; typedef struct TargaContext { AVFrame picture; int width, height; int bpp; int color_type; int compression_type; } TargaContext; static void targa_decode_rle(AVCodecContext *avctx, TargaContext *s, const uint8_t *src, uint8_t *dst, int w, int h, int stride, int bpp) { int i, x, y; int depth = (bpp + 1) >> 3; int type, count; int diff; diff = stride - w * depth; x = y = 0; while(y < h){ type = *src++; count = (type & 0x7F) + 1; type &= 0x80; if((x + count > w) && (x + count + 1 > (h - y) * w)){ av_log(avctx, AV_LOG_ERROR, "Packet went out of bounds: position (%i,%i) size %i\n", x, y, count); return; } for(i = 0; i < count; i++){ switch(depth){ case 1: *dst = *src; break; case 2: *((uint16_t*)dst) = AV_RL16(src); break; case 3: dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; break; case 4: *((uint32_t*)dst) = AV_RL32(src); break; } dst += depth; if(!type) src += depth; x++; if(x == w){ x = 0; y++; dst += diff; } } if(type) src += depth; } } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TargaContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p= (AVFrame*)&s->picture; uint8_t *dst; int stride; int idlen, pal, compr, x, y, w, h, bpp, flags; int first_clr, colors, csize; /* parse image header */ idlen = *buf++; pal = *buf++; compr = *buf++; first_clr = AV_RL16(buf); buf += 2; colors = AV_RL16(buf); buf += 2; csize = *buf++; x = AV_RL16(buf); buf += 2; y = AV_RL16(buf); buf += 2; w = AV_RL16(buf); buf += 2; h = AV_RL16(buf); buf += 2; bpp = *buf++; flags = *buf++; //skip identifier if any buf += idlen; s->bpp = bpp; s->width = w; s->height = h; switch(s->bpp){ case 8: avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? PIX_FMT_GRAY8 : PIX_FMT_PAL8; break; case 15: avctx->pix_fmt = PIX_FMT_RGB555; break; case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", s->bpp); return -1; } if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); if(avcodec_check_dimensions(avctx, w, h)) return -1; if(w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if(flags & 0x20){ dst = p->data[0]; stride = p->linesize[0]; }else{ //image is upside-down dst = p->data[0] + p->linesize[0] * (h - 1); stride = -p->linesize[0]; } if(avctx->pix_fmt == PIX_FMT_PAL8 && avctx->palctrl){ memcpy(p->data[1], avctx->palctrl->palette, AVPALETTE_SIZE); if(avctx->palctrl->palette_changed){ p->palette_has_changed = 1; avctx->palctrl->palette_changed = 0; } } if(colors){ if((colors + first_clr) > 256){ av_log(avctx, AV_LOG_ERROR, "Incorrect palette: %i colors with offset %i\n", colors, first_clr); return -1; } if(csize != 24){ av_log(avctx, AV_LOG_ERROR, "Palette entry size %i bits is not supported\n", csize); return -1; } if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway buf += colors * ((csize + 1) >> 3); else{ int r, g, b, t; int32_t *pal = ((int32_t*)p->data[1]) + first_clr; for(t = 0; t < colors; t++){ r = *buf++; g = *buf++; b = *buf++; *pal++ = (b << 16) | (g << 8) | r; } p->palette_has_changed = 1; } } if((compr & (~TGA_RLE)) == TGA_NODATA) memset(p->data[0], 0, p->linesize[0] * s->height); else{ if(compr & TGA_RLE) targa_decode_rle(avctx, s, buf, dst, avctx->width, avctx->height, stride, bpp); else{ for(y = 0; y < s->height; y++){ #if HAVE_BIGENDIAN if((s->bpp + 1) >> 3 == 2){ uint16_t *dst16 = (uint16_t*)dst; for(x = 0; x < s->width; x++) dst16[x] = AV_RL16(buf + x * 2); }else if((s->bpp + 1) >> 3 == 4){ uint32_t *dst32 = (uint32_t*)dst; for(x = 0; x < s->width; x++) dst32[x] = AV_RL32(buf + x * 4); }else #endif memcpy(dst, buf, s->width * ((s->bpp + 1) >> 3)); dst += stride; buf += s->width * ((s->bpp + 1) >> 3); } } } *picture= *(AVFrame*)&s->picture; *data_size = sizeof(AVPicture); return buf_size; } static av_cold int targa_init(AVCodecContext *avctx){ TargaContext *s = avctx->priv_data; avcodec_get_frame_defaults((AVFrame*)&s->picture); avctx->coded_frame= (AVFrame*)&s->picture; return 0; } static av_cold int targa_end(AVCodecContext *avctx){ TargaContext *s = avctx->priv_data; if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec targa_decoder = { "targa", AVMEDIA_TYPE_VIDEO, CODEC_ID_TARGA, sizeof(TargaContext), targa_init, NULL, targa_end, decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"), };
123linslouis-android-video-cutter
jni/libavcodec/targa.c
C
asf20
7,437
/* * Brute Force & Ignorance (BFI) video decoder * Copyright (c) 2008 Sisir Koppaka * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Brute Force & Ignorance (.bfi) video decoder * @author Sisir Koppaka ( sisir.koppaka at gmail dot com ) * @sa http://wiki.multimedia.cx/index.php?title=BFI */ #include "libavutil/common.h" #include "avcodec.h" #include "bytestream.h" typedef struct BFIContext { AVCodecContext *avctx; AVFrame frame; uint8_t *dst; } BFIContext; static av_cold int bfi_decode_init(AVCodecContext * avctx) { BFIContext *bfi = avctx->priv_data; avctx->pix_fmt = PIX_FMT_PAL8; bfi->dst = av_mallocz(avctx->width * avctx->height); return 0; } static int bfi_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; BFIContext *bfi = avctx->priv_data; uint8_t *dst = bfi->dst; uint8_t *src, *dst_offset, colour1, colour2; uint8_t *frame_end = bfi->dst + avctx->width * avctx->height; uint32_t *pal; int i, j, height = avctx->height; if (bfi->frame.data[0]) avctx->release_buffer(avctx, &bfi->frame); bfi->frame.reference = 1; if (avctx->get_buffer(avctx, &bfi->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } /* Set frame parameters and palette, if necessary */ if (!avctx->frame_number) { bfi->frame.pict_type = FF_I_TYPE; bfi->frame.key_frame = 1; /* Setting the palette */ if(avctx->extradata_size>768) { av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n"); return -1; } pal = (uint32_t *) bfi->frame.data[1]; for (i = 0; i < avctx->extradata_size / 3; i++) { int shift = 16; *pal = 0; for (j = 0; j < 3; j++, shift -= 8) *pal += ((avctx->extradata[i * 3 + j] << 2) | (avctx->extradata[i * 3 + j] >> 4)) << shift; pal++; } bfi->frame.palette_has_changed = 1; } else { bfi->frame.pict_type = FF_P_TYPE; bfi->frame.key_frame = 0; } buf += 4; //Unpacked size, not required. while (dst != frame_end) { static const uint8_t lentab[4]={0,2,0,1}; unsigned int byte = *buf++, av_uninit(offset); unsigned int code = byte >> 6; unsigned int length = byte & ~0xC0; /* Get length and offset(if required) */ if (length == 0) { if (code == 1) { length = bytestream_get_byte(&buf); offset = bytestream_get_le16(&buf); } else { length = bytestream_get_le16(&buf); if (code == 2 && length == 0) break; } } else { if (code == 1) offset = bytestream_get_byte(&buf); } /* Do boundary check */ if (dst + (length<<lentab[code]) > frame_end) break; switch (code) { case 0: //Normal Chain bytestream_get_buffer(&buf, dst, length); dst += length; break; case 1: //Back Chain dst_offset = dst - offset; length *= 4; //Convert dwords to bytes. if (dst_offset < bfi->dst) break; while (length--) *dst++ = *dst_offset++; break; case 2: //Skip Chain dst += length; break; case 3: //Fill Chain colour1 = bytestream_get_byte(&buf); colour2 = bytestream_get_byte(&buf); while (length--) { *dst++ = colour1; *dst++ = colour2; } break; } } src = bfi->dst; dst = bfi->frame.data[0]; while (height--) { memcpy(dst, src, avctx->width); src += avctx->width; dst += bfi->frame.linesize[0]; } *data_size = sizeof(AVFrame); *(AVFrame *) data = bfi->frame; return buf_size; } static av_cold int bfi_decode_close(AVCodecContext * avctx) { BFIContext *bfi = avctx->priv_data; if (bfi->frame.data[0]) avctx->release_buffer(avctx, &bfi->frame); av_free(bfi->dst); return 0; } AVCodec bfi_decoder = { .name = "bfi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_BFI, .priv_data_size = sizeof(BFIContext), .init = bfi_decode_init, .close = bfi_decode_close, .decode = bfi_decode_frame, .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Brute Force & Ignorance"), };
123linslouis-android-video-cutter
jni/libavcodec/bfi.c
C
asf20
5,514
/* * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Options definition for AVCodecContext. */ #include "avcodec.h" #include "opt.h" #include <float.h> /* FLT_MIN, FLT_MAX */ static const char* context_to_name(void* ptr) { AVCodecContext *avc= ptr; if(avc && avc->codec && avc->codec->name) return avc->codec->name; else return "NULL"; } #define OFFSET(x) offsetof(AVCodecContext,x) #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C //these names are too long to be readable #define V AV_OPT_FLAG_VIDEO_PARAM #define A AV_OPT_FLAG_AUDIO_PARAM #define S AV_OPT_FLAG_SUBTITLE_PARAM #define E AV_OPT_FLAG_ENCODING_PARAM #define D AV_OPT_FLAG_DECODING_PARAM #define AV_CODEC_DEFAULT_BITRATE 200*1000 static const AVOption options[]={ {"b", "set bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, AV_CODEC_DEFAULT_BITRATE, INT_MIN, INT_MAX, V|E}, {"ab", "set bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, 64*1000, INT_MIN, INT_MAX, A|E}, {"bt", "set video bitrate tolerance (in bits/s)", OFFSET(bit_rate_tolerance), FF_OPT_TYPE_INT, AV_CODEC_DEFAULT_BITRATE*20, 1, INT_MAX, V|E}, {"flags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, 0, UINT_MAX, V|A|E|D, "flags"}, {"mv4", "use four motion vector by macroblock (mpeg4)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_4MV, INT_MIN, INT_MAX, V|E, "flags"}, {"obmc", "use overlapped block motion compensation (h263+)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_OBMC, INT_MIN, INT_MAX, V|E, "flags"}, {"qpel", "use 1/4 pel motion compensation", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_QPEL, INT_MIN, INT_MAX, V|E, "flags"}, {"loop", "use loop filter", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_LOOP_FILTER, INT_MIN, INT_MAX, V|E, "flags"}, {"qscale", "use fixed qscale", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_QSCALE, INT_MIN, INT_MAX, 0, "flags"}, {"gmc", "use gmc", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GMC, INT_MIN, INT_MAX, V|E, "flags"}, {"mv0", "always try a mb with mv=<0,0>", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_MV0, INT_MIN, INT_MAX, V|E, "flags"}, {"part", "use data partitioning", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PART, INT_MIN, INT_MAX, V|E, "flags"}, {"input_preserved", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INPUT_PRESERVED, INT_MIN, INT_MAX, 0, "flags"}, {"pass1", "use internal 2pass ratecontrol in first pass mode", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PASS1, INT_MIN, INT_MAX, 0, "flags"}, {"pass2", "use internal 2pass ratecontrol in second pass mode", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PASS2, INT_MIN, INT_MAX, 0, "flags"}, {"extern_huff", "use external huffman table (for mjpeg)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_EXTERN_HUFF, INT_MIN, INT_MAX, 0, "flags"}, {"gray", "only decode/encode grayscale", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GRAY, INT_MIN, INT_MAX, V|E|D, "flags"}, {"emu_edge", "don't draw edges", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_EMU_EDGE, INT_MIN, INT_MAX, 0, "flags"}, {"psnr", "error[?] variables will be set during encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_PSNR, INT_MIN, INT_MAX, V|E, "flags"}, {"truncated", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_TRUNCATED, INT_MIN, INT_MAX, 0, "flags"}, {"naq", "normalize adaptive quantization", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_NORMALIZE_AQP, INT_MIN, INT_MAX, V|E, "flags"}, {"ildct", "use interlaced dct", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INTERLACED_DCT, INT_MIN, INT_MAX, V|E, "flags"}, {"low_delay", "force low delay", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_LOW_DELAY, INT_MIN, INT_MAX, V|D|E, "flags"}, {"alt", "enable alternate scantable (mpeg2/mpeg4)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_ALT_SCAN, INT_MIN, INT_MAX, V|E, "flags"}, {"global_header", "place global headers in extradata instead of every keyframe", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GLOBAL_HEADER, INT_MIN, INT_MAX, V|A|E, "flags"}, {"bitexact", "use only bitexact stuff (except (i)dct)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_BITEXACT, INT_MIN, INT_MAX, A|V|S|D|E, "flags"}, {"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_AC_PRED, INT_MIN, INT_MAX, V|E, "flags"}, {"umv", "use unlimited motion vectors", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_H263P_UMV, INT_MIN, INT_MAX, V|E, "flags"}, {"cbp", "use rate distortion optimization for cbp", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_CBP_RD, INT_MIN, INT_MAX, V|E, "flags"}, {"qprd", "use rate distortion optimization for qp selection", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_QP_RD, INT_MIN, INT_MAX, V|E, "flags"}, {"aiv", "h263 alternative inter vlc", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_H263P_AIV, INT_MIN, INT_MAX, V|E, "flags"}, {"slice", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_H263P_SLICE_STRUCT, INT_MIN, INT_MAX, V|E, "flags"}, {"ilme", "interlaced motion estimation", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INTERLACED_ME, INT_MIN, INT_MAX, V|E, "flags"}, {"scan_offset", "will reserve space for svcd scan offset user data", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_SVCD_SCAN_OFFSET, INT_MIN, INT_MAX, V|E, "flags"}, {"cgop", "closed gop", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_CLOSED_GOP, INT_MIN, INT_MAX, V|E, "flags"}, {"fast", "allow non spec compliant speedup tricks", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_FAST, INT_MIN, INT_MAX, V|E, "flags2"}, {"sgop", "strictly enforce gop size", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_STRICT_GOP, INT_MIN, INT_MAX, V|E, "flags2"}, {"noout", "skip bitstream encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_NO_OUTPUT, INT_MIN, INT_MAX, V|E, "flags2"}, {"local_header", "place global headers at every keyframe instead of in extradata", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_LOCAL_HEADER, INT_MIN, INT_MAX, V|E, "flags2"}, {"sub_id", NULL, OFFSET(sub_id), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"me_method", "set motion estimation method", OFFSET(me_method), FF_OPT_TYPE_INT, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method"}, {"zero", "zero motion estimation (fastest)", 0, FF_OPT_TYPE_CONST, ME_ZERO, INT_MIN, INT_MAX, V|E, "me_method" }, {"full", "full motion estimation (slowest)", 0, FF_OPT_TYPE_CONST, ME_FULL, INT_MIN, INT_MAX, V|E, "me_method" }, {"epzs", "EPZS motion estimation (default)", 0, FF_OPT_TYPE_CONST, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method" }, {"esa", "esa motion estimation (alias for full)", 0, FF_OPT_TYPE_CONST, ME_FULL, INT_MIN, INT_MAX, V|E, "me_method" }, {"tesa", "tesa motion estimation", 0, FF_OPT_TYPE_CONST, ME_TESA, INT_MIN, INT_MAX, V|E, "me_method" }, {"dia", "dia motion estimation (alias for epzs)", 0, FF_OPT_TYPE_CONST, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method" }, {"log", "log motion estimation", 0, FF_OPT_TYPE_CONST, ME_LOG, INT_MIN, INT_MAX, V|E, "me_method" }, {"phods", "phods motion estimation", 0, FF_OPT_TYPE_CONST, ME_PHODS, INT_MIN, INT_MAX, V|E, "me_method" }, {"x1", "X1 motion estimation", 0, FF_OPT_TYPE_CONST, ME_X1, INT_MIN, INT_MAX, V|E, "me_method" }, {"hex", "hex motion estimation", 0, FF_OPT_TYPE_CONST, ME_HEX, INT_MIN, INT_MAX, V|E, "me_method" }, {"umh", "umh motion estimation", 0, FF_OPT_TYPE_CONST, ME_UMH, INT_MIN, INT_MAX, V|E, "me_method" }, {"iter", "iter motion estimation", 0, FF_OPT_TYPE_CONST, ME_ITER, INT_MIN, INT_MAX, V|E, "me_method" }, {"extradata_size", NULL, OFFSET(extradata_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"time_base", NULL, OFFSET(time_base), FF_OPT_TYPE_RATIONAL, DEFAULT, INT_MIN, INT_MAX}, {"g", "set the group of picture size", OFFSET(gop_size), FF_OPT_TYPE_INT, 12, INT_MIN, INT_MAX, V|E}, {"rate_emu", "frame rate emulation", OFFSET(rate_emu), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"ac", "set number of audio channels", OFFSET(channels), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"cutoff", "set cutoff bandwidth", OFFSET(cutoff), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|E}, {"frame_size", NULL, OFFSET(frame_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|E}, {"frame_number", NULL, OFFSET(frame_number), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, #if LIBAVCODEC_VERSION_MAJOR < 53 {"real_pict_num", NULL, OFFSET(real_pict_num), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, #endif {"delay", NULL, OFFSET(delay), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"qcomp", "video quantizer scale compression (VBR)", OFFSET(qcompress), FF_OPT_TYPE_FLOAT, 0.5, -FLT_MAX, FLT_MAX, V|E}, {"qblur", "video quantizer scale blur (VBR)", OFFSET(qblur), FF_OPT_TYPE_FLOAT, 0.5, 0, FLT_MAX, V|E}, {"qmin", "min video quantizer scale (VBR)", OFFSET(qmin), FF_OPT_TYPE_INT, 2, 1, 51, V|E}, {"qmax", "max video quantizer scale (VBR)", OFFSET(qmax), FF_OPT_TYPE_INT, 31, 1, 51, V|E}, {"qdiff", "max difference between the quantizer scale (VBR)", OFFSET(max_qdiff), FF_OPT_TYPE_INT, 3, INT_MIN, INT_MAX, V|E}, {"bf", "use 'frames' B frames", OFFSET(max_b_frames), FF_OPT_TYPE_INT, DEFAULT, 0, FF_MAX_B_FRAMES, V|E}, {"b_qfactor", "qp factor between p and b frames", OFFSET(b_quant_factor), FF_OPT_TYPE_FLOAT, 1.25, -FLT_MAX, FLT_MAX, V|E}, {"rc_strategy", "ratecontrol method", OFFSET(rc_strategy), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"b_strategy", "strategy to choose between I/P/B-frames", OFFSET(b_frame_strategy), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E}, {"wpredp", "weighted prediction analysis method", OFFSET(weighted_p_pred), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E}, {"hurry_up", NULL, OFFSET(hurry_up), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D}, {"ps", "rtp payload size in bytes", OFFSET(rtp_payload_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"mv_bits", NULL, OFFSET(mv_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"header_bits", NULL, OFFSET(header_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"i_tex_bits", NULL, OFFSET(i_tex_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"p_tex_bits", NULL, OFFSET(p_tex_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"i_count", NULL, OFFSET(i_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"p_count", NULL, OFFSET(p_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"skip_count", NULL, OFFSET(skip_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"misc_bits", NULL, OFFSET(misc_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"frame_bits", NULL, OFFSET(frame_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"codec_tag", NULL, OFFSET(codec_tag), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"bug", "workaround not auto detected encoder bugs", OFFSET(workaround_bugs), FF_OPT_TYPE_FLAGS, FF_BUG_AUTODETECT, INT_MIN, INT_MAX, V|D, "bug"}, {"autodetect", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_AUTODETECT, INT_MIN, INT_MAX, V|D, "bug"}, {"old_msmpeg4", "some old lavc generated msmpeg4v3 files (no autodetection)", 0, FF_OPT_TYPE_CONST, FF_BUG_OLD_MSMPEG4, INT_MIN, INT_MAX, V|D, "bug"}, {"xvid_ilace", "Xvid interlacing bug (autodetected if fourcc==XVIX)", 0, FF_OPT_TYPE_CONST, FF_BUG_XVID_ILACE, INT_MIN, INT_MAX, V|D, "bug"}, {"ump4", "(autodetected if fourcc==UMP4)", 0, FF_OPT_TYPE_CONST, FF_BUG_UMP4, INT_MIN, INT_MAX, V|D, "bug"}, {"no_padding", "padding bug (autodetected)", 0, FF_OPT_TYPE_CONST, FF_BUG_NO_PADDING, INT_MIN, INT_MAX, V|D, "bug"}, {"amv", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_AMV, INT_MIN, INT_MAX, V|D, "bug"}, {"ac_vlc", "illegal vlc bug (autodetected per fourcc)", 0, FF_OPT_TYPE_CONST, FF_BUG_AC_VLC, INT_MIN, INT_MAX, V|D, "bug"}, {"qpel_chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_QPEL_CHROMA, INT_MIN, INT_MAX, V|D, "bug"}, {"std_qpel", "old standard qpel (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, FF_BUG_STD_QPEL, INT_MIN, INT_MAX, V|D, "bug"}, {"qpel_chroma2", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_QPEL_CHROMA2, INT_MIN, INT_MAX, V|D, "bug"}, {"direct_blocksize", "direct-qpel-blocksize bug (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, FF_BUG_DIRECT_BLOCKSIZE, INT_MIN, INT_MAX, V|D, "bug"}, {"edge", "edge padding bug (autodetected per fourcc/version)", 0, FF_OPT_TYPE_CONST, FF_BUG_EDGE, INT_MIN, INT_MAX, V|D, "bug"}, {"hpel_chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_HPEL_CHROMA, INT_MIN, INT_MAX, V|D, "bug"}, {"dc_clip", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_DC_CLIP, INT_MIN, INT_MAX, V|D, "bug"}, {"ms", "workaround various bugs in microsofts broken decoders", 0, FF_OPT_TYPE_CONST, FF_BUG_MS, INT_MIN, INT_MAX, V|D, "bug"}, {"trunc", "trancated frames", 0, FF_OPT_TYPE_CONST,FF_BUG_TRUNCATED, INT_MIN, INT_MAX, V|D, "bug"}, {"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|V|D|E, "strict"}, {"very", "strictly conform to a older more strict version of the spec or reference software", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_VERY_STRICT, INT_MIN, INT_MAX, V|D|E, "strict"}, {"strict", "strictly conform to all the things in the spec no matter what consequences", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_STRICT, INT_MIN, INT_MAX, V|D|E, "strict"}, {"normal", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_NORMAL, INT_MIN, INT_MAX, V|D|E, "strict"}, {"inofficial", "allow unofficial extensions", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_INOFFICIAL, INT_MIN, INT_MAX, V|D|E, "strict"}, {"experimental", "allow non standardized experimental things", 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_EXPERIMENTAL, INT_MIN, INT_MAX, V|D|E, "strict"}, {"b_qoffset", "qp offset between P and B frames", OFFSET(b_quant_offset), FF_OPT_TYPE_FLOAT, 1.25, -FLT_MAX, FLT_MAX, V|E}, {"er", "set error detection aggressivity", OFFSET(error_recognition), FF_OPT_TYPE_INT, FF_ER_CAREFUL, INT_MIN, INT_MAX, A|V|D, "er"}, {"careful", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_CAREFUL, INT_MIN, INT_MAX, V|D, "er"}, {"compliant", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_COMPLIANT, INT_MIN, INT_MAX, V|D, "er"}, {"aggressive", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_AGGRESSIVE, INT_MIN, INT_MAX, V|D, "er"}, {"very_aggressive", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_VERY_AGGRESSIVE, INT_MIN, INT_MAX, V|D, "er"}, {"has_b_frames", NULL, OFFSET(has_b_frames), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"block_align", NULL, OFFSET(block_align), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"parse_only", NULL, OFFSET(parse_only), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"mpeg_quant", "use MPEG quantizers instead of H.263", OFFSET(mpeg_quant), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"stats_out", NULL, OFFSET(stats_out), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX}, {"stats_in", NULL, OFFSET(stats_in), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX}, {"qsquish", "how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)", OFFSET(rc_qsquish), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 99, V|E}, {"rc_qmod_amp", "experimental quantizer modulation", OFFSET(rc_qmod_amp), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E}, {"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"rc_override_count", NULL, OFFSET(rc_override_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"rc_eq", "set rate control equation", OFFSET(rc_eq), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX, V|E}, {"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|V|E}, {"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), FF_OPT_TYPE_FLOAT, 1.0, -FLT_MAX, FLT_MAX, V|E}, {"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), FF_OPT_TYPE_FLOAT, -0.8, -FLT_MAX, FLT_MAX, V|E}, {"i_qoffset", "qp offset between P and I frames", OFFSET(i_quant_offset), FF_OPT_TYPE_FLOAT, 0.0, -FLT_MAX, FLT_MAX, V|E}, {"rc_init_cplx", "initial complexity for 1-pass encoding", OFFSET(rc_initial_cplx), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E}, {"dct", "DCT algorithm", OFFSET(dct_algo), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|E, "dct"}, {"auto", "autoselect a good one (default)", 0, FF_OPT_TYPE_CONST, FF_DCT_AUTO, INT_MIN, INT_MAX, V|E, "dct"}, {"fastint", "fast integer", 0, FF_OPT_TYPE_CONST, FF_DCT_FASTINT, INT_MIN, INT_MAX, V|E, "dct"}, {"int", "accurate integer", 0, FF_OPT_TYPE_CONST, FF_DCT_INT, INT_MIN, INT_MAX, V|E, "dct"}, {"mmx", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_MMX, INT_MIN, INT_MAX, V|E, "dct"}, {"mlib", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_MLIB, INT_MIN, INT_MAX, V|E, "dct"}, {"altivec", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_ALTIVEC, INT_MIN, INT_MAX, V|E, "dct"}, {"faan", "floating point AAN DCT", 0, FF_OPT_TYPE_CONST, FF_DCT_FAAN, INT_MIN, INT_MAX, V|E, "dct"}, {"lumi_mask", "compresses bright areas stronger than medium ones", OFFSET(lumi_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E}, {"tcplx_mask", "temporal complexity masking", OFFSET(temporal_cplx_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E}, {"scplx_mask", "spatial complexity masking", OFFSET(spatial_cplx_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E}, {"p_mask", "inter masking", OFFSET(p_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E}, {"dark_mask", "compresses dark areas stronger than medium ones", OFFSET(dark_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E}, {"idct", "select IDCT implementation", OFFSET(idct_algo), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|E|D, "idct"}, {"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_AUTO, INT_MIN, INT_MAX, V|E|D, "idct"}, {"int", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_INT, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simple", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLE, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simplemmx", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEMMX, INT_MIN, INT_MAX, V|E|D, "idct"}, {"libmpeg2mmx", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_LIBMPEG2MMX, INT_MIN, INT_MAX, V|E|D, "idct"}, {"ps2", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_PS2, INT_MIN, INT_MAX, V|E|D, "idct"}, {"mlib", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_MLIB, INT_MIN, INT_MAX, V|E|D, "idct"}, {"arm", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_ARM, INT_MIN, INT_MAX, V|E|D, "idct"}, {"altivec", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_ALTIVEC, INT_MIN, INT_MAX, V|E|D, "idct"}, {"sh4", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SH4, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simplearm", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARM, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simplearmv5te", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARMV5TE, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simplearmv6", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARMV6, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simpleneon", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLENEON, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simplealpha", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEALPHA, INT_MIN, INT_MAX, V|E|D, "idct"}, {"h264", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_H264, INT_MIN, INT_MAX, V|E|D, "idct"}, {"vp3", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_VP3, INT_MIN, INT_MAX, V|E|D, "idct"}, {"ipp", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_IPP, INT_MIN, INT_MAX, V|E|D, "idct"}, {"xvidmmx", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_XVIDMMX, INT_MIN, INT_MAX, V|E|D, "idct"}, {"faani", "floating point AAN IDCT", 0, FF_OPT_TYPE_CONST, FF_IDCT_FAAN, INT_MIN, INT_MAX, V|D|E, "idct"}, {"slice_count", NULL, OFFSET(slice_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"ec", "set error concealment strategy", OFFSET(error_concealment), FF_OPT_TYPE_FLAGS, 3, INT_MIN, INT_MAX, V|D, "ec"}, {"guess_mvs", "iterative motion vector (MV) search (slow)", 0, FF_OPT_TYPE_CONST, FF_EC_GUESS_MVS, INT_MIN, INT_MAX, V|D, "ec"}, {"deblock", "use strong deblock filter for damaged MBs", 0, FF_OPT_TYPE_CONST, FF_EC_DEBLOCK, INT_MIN, INT_MAX, V|D, "ec"}, {"bits_per_coded_sample", NULL, OFFSET(bits_per_coded_sample), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"pred", "prediction method", OFFSET(prediction_method), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "pred"}, {"left", NULL, 0, FF_OPT_TYPE_CONST, FF_PRED_LEFT, INT_MIN, INT_MAX, V|E, "pred"}, {"plane", NULL, 0, FF_OPT_TYPE_CONST, FF_PRED_PLANE, INT_MIN, INT_MAX, V|E, "pred"}, {"median", NULL, 0, FF_OPT_TYPE_CONST, FF_PRED_MEDIAN, INT_MIN, INT_MAX, V|E, "pred"}, {"aspect", "sample aspect ratio", OFFSET(sample_aspect_ratio), FF_OPT_TYPE_RATIONAL, DEFAULT, 0, 10, V|E}, {"debug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, V|A|S|E|D, "debug"}, {"pict", "picture info", 0, FF_OPT_TYPE_CONST, FF_DEBUG_PICT_INFO, INT_MIN, INT_MAX, V|D, "debug"}, {"rc", "rate control", 0, FF_OPT_TYPE_CONST, FF_DEBUG_RC, INT_MIN, INT_MAX, V|E, "debug"}, {"bitstream", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_BITSTREAM, INT_MIN, INT_MAX, V|D, "debug"}, {"mb_type", "macroblock (MB) type", 0, FF_OPT_TYPE_CONST, FF_DEBUG_MB_TYPE, INT_MIN, INT_MAX, V|D, "debug"}, {"qp", "per-block quantization parameter (QP)", 0, FF_OPT_TYPE_CONST, FF_DEBUG_QP, INT_MIN, INT_MAX, V|D, "debug"}, {"mv", "motion vector", 0, FF_OPT_TYPE_CONST, FF_DEBUG_MV, INT_MIN, INT_MAX, V|D, "debug"}, {"dct_coeff", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_DCT_COEFF, INT_MIN, INT_MAX, V|D, "debug"}, {"skip", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_SKIP, INT_MIN, INT_MAX, V|D, "debug"}, {"startcode", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_STARTCODE, INT_MIN, INT_MAX, V|D, "debug"}, {"pts", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_PTS, INT_MIN, INT_MAX, V|D, "debug"}, {"er", "error recognition", 0, FF_OPT_TYPE_CONST, FF_DEBUG_ER, INT_MIN, INT_MAX, V|D, "debug"}, {"mmco", "memory management control operations (H.264)", 0, FF_OPT_TYPE_CONST, FF_DEBUG_MMCO, INT_MIN, INT_MAX, V|D, "debug"}, {"bugs", NULL, 0, FF_OPT_TYPE_CONST, FF_DEBUG_BUGS, INT_MIN, INT_MAX, V|D, "debug"}, {"vis_qp", "visualize quantization parameter (QP), lower QP are tinted greener", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_QP, INT_MIN, INT_MAX, V|D, "debug"}, {"vis_mb_type", "visualize block types", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MB_TYPE, INT_MIN, INT_MAX, V|D, "debug"}, {"buffers", "picture buffer allocations", 0, FF_OPT_TYPE_CONST, FF_DEBUG_BUFFERS, INT_MIN, INT_MAX, V|D, "debug"}, {"vismv", "visualize motion vectors (MVs)", OFFSET(debug_mv), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|D, "debug_mv"}, {"pf", "forward predicted MVs of P-frames", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MV_P_FOR, INT_MIN, INT_MAX, V|D, "debug_mv"}, {"bf", "forward predicted MVs of B-frames", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MV_B_FOR, INT_MIN, INT_MAX, V|D, "debug_mv"}, {"bb", "backward predicted MVs of B-frames", 0, FF_OPT_TYPE_CONST, FF_DEBUG_VIS_MV_B_BACK, INT_MIN, INT_MAX, V|D, "debug_mv"}, {"mb_qmin", "obsolete, use qmin", OFFSET(mb_qmin), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"mb_qmax", "obsolete, use qmax", OFFSET(mb_qmax), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"cmp", "full pel me compare function", OFFSET(me_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"subcmp", "sub pel me compare function", OFFSET(me_sub_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"mbcmp", "macroblock compare function", OFFSET(mb_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"ildctcmp", "interlaced dct compare function", OFFSET(ildct_cmp), FF_OPT_TYPE_INT, FF_CMP_VSAD, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"dia_size", "diamond type & size for motion estimation", OFFSET(dia_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"last_pred", "amount of motion predictors from the previous frame", OFFSET(last_predictor_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"preme", "pre motion estimation", OFFSET(pre_me), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"precmp", "pre motion estimation compare function", OFFSET(me_pre_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"sad", "sum of absolute differences, fast (default)", 0, FF_OPT_TYPE_CONST, FF_CMP_SAD, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"sse", "sum of squared errors", 0, FF_OPT_TYPE_CONST, FF_CMP_SSE, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"satd", "sum of absolute Hadamard transformed differences", 0, FF_OPT_TYPE_CONST, FF_CMP_SATD, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"dct", "sum of absolute DCT transformed differences", 0, FF_OPT_TYPE_CONST, FF_CMP_DCT, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"psnr", "sum of squared quantization errors (avoid, low quality)", 0, FF_OPT_TYPE_CONST, FF_CMP_PSNR, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"bit", "number of bits needed for the block", 0, FF_OPT_TYPE_CONST, FF_CMP_BIT, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"rd", "rate distortion optimal, slow", 0, FF_OPT_TYPE_CONST, FF_CMP_RD, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"zero", "0", 0, FF_OPT_TYPE_CONST, FF_CMP_ZERO, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"vsad", "sum of absolute vertical differences", 0, FF_OPT_TYPE_CONST, FF_CMP_VSAD, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"vsse","sum of squared vertical differences", 0, FF_OPT_TYPE_CONST, FF_CMP_VSSE, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"nsse", "noise preserving sum of squared differences", 0, FF_OPT_TYPE_CONST, FF_CMP_NSSE, INT_MIN, INT_MAX, V|E, "cmp_func"}, #if CONFIG_SNOW_ENCODER {"w53", "5/3 wavelet, only used in snow", 0, FF_OPT_TYPE_CONST, FF_CMP_W53, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"w97", "9/7 wavelet, only used in snow", 0, FF_OPT_TYPE_CONST, FF_CMP_W97, INT_MIN, INT_MAX, V|E, "cmp_func"}, #endif {"dctmax", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_DCTMAX, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_CHROMA, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"pre_dia_size", "diamond type & size for motion estimation pre-pass", OFFSET(pre_dia_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"subq", "sub pel motion estimation quality", OFFSET(me_subpel_quality), FF_OPT_TYPE_INT, 8, INT_MIN, INT_MAX, V|E}, {"dtg_active_format", NULL, OFFSET(dtg_active_format), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"ibias", "intra quant bias", OFFSET(intra_quant_bias), FF_OPT_TYPE_INT, FF_DEFAULT_QUANT_BIAS, INT_MIN, INT_MAX, V|E}, {"pbias", "inter quant bias", OFFSET(inter_quant_bias), FF_OPT_TYPE_INT, FF_DEFAULT_QUANT_BIAS, INT_MIN, INT_MAX, V|E}, {"color_table_id", NULL, OFFSET(color_table_id), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"internal_buffer_count", NULL, OFFSET(internal_buffer_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"global_quality", NULL, OFFSET(global_quality), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"coder", NULL, OFFSET(coder_type), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "coder"}, {"vlc", "variable length coder / huffman coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_VLC, INT_MIN, INT_MAX, V|E, "coder"}, {"ac", "arithmetic coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_AC, INT_MIN, INT_MAX, V|E, "coder"}, {"raw", "raw (no encoding)", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_RAW, INT_MIN, INT_MAX, V|E, "coder"}, {"rle", "run-length coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_RLE, INT_MIN, INT_MAX, V|E, "coder"}, {"deflate", "deflate-based coder", 0, FF_OPT_TYPE_CONST, FF_CODER_TYPE_DEFLATE, INT_MIN, INT_MAX, V|E, "coder"}, {"context", "context model", OFFSET(context_model), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"slice_flags", NULL, OFFSET(slice_flags), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"xvmc_acceleration", NULL, OFFSET(xvmc_acceleration), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"mbd", "macroblock decision algorithm (high quality mode)", OFFSET(mb_decision), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "mbd"}, {"simple", "use mbcmp (default)", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_SIMPLE, INT_MIN, INT_MAX, V|E, "mbd"}, {"bits", "use fewest bits", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_BITS, INT_MIN, INT_MAX, V|E, "mbd"}, {"rd", "use best rate distortion", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_RD, INT_MIN, INT_MAX, V|E, "mbd"}, {"stream_codec_tag", NULL, OFFSET(stream_codec_tag), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"sc_threshold", "scene change threshold", OFFSET(scenechange_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"lmin", "min lagrange factor (VBR)", OFFSET(lmin), FF_OPT_TYPE_INT, 2*FF_QP2LAMBDA, 0, INT_MAX, V|E}, {"lmax", "max lagrange factor (VBR)", OFFSET(lmax), FF_OPT_TYPE_INT, 31*FF_QP2LAMBDA, 0, INT_MAX, V|E}, {"nr", "noise reduction", OFFSET(noise_reduction), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"inter_threshold", NULL, OFFSET(inter_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"flags2", NULL, OFFSET(flags2), FF_OPT_TYPE_FLAGS, CODEC_FLAG2_FASTPSKIP|CODEC_FLAG2_BIT_RESERVOIR|CODEC_FLAG2_PSY|CODEC_FLAG2_MBTREE, 0, UINT_MAX, V|A|E|D, "flags2"}, {"error", NULL, OFFSET(error_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"antialias", "MP3 antialias algorithm", OFFSET(antialias_algo), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D, "aa"}, {"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_AUTO, INT_MIN, INT_MAX, V|D, "aa"}, {"fastint", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_FASTINT, INT_MIN, INT_MAX, V|D, "aa"}, {"int", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_INT, INT_MIN, INT_MAX, V|D, "aa"}, {"float", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_FLOAT, INT_MIN, INT_MAX, V|D, "aa"}, {"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"threads", NULL, OFFSET(thread_count), FF_OPT_TYPE_INT, 1, INT_MIN, INT_MAX, V|E|D}, {"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"mb_threshold", "macroblock threshold", OFFSET(mb_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"dc", "intra_dc_precision", OFFSET(intra_dc_precision), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E}, {"nssew", "nsse weight", OFFSET(nsse_weight), FF_OPT_TYPE_INT, 8, INT_MIN, INT_MAX, V|E}, {"skip_top", "number of macroblock rows at the top which are skipped", OFFSET(skip_top), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D}, {"skip_bottom", "number of macroblock rows at the bottom which are skipped", OFFSET(skip_bottom), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D}, {"profile", NULL, OFFSET(profile), FF_OPT_TYPE_INT, FF_PROFILE_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "profile"}, {"unknown", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "profile"}, {"aac_main", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_MAIN, INT_MIN, INT_MAX, A|E, "profile"}, {"aac_low", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_LOW, INT_MIN, INT_MAX, A|E, "profile"}, {"aac_ssr", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_SSR, INT_MIN, INT_MAX, A|E, "profile"}, {"aac_ltp", NULL, 0, FF_OPT_TYPE_CONST, FF_PROFILE_AAC_LTP, INT_MIN, INT_MAX, A|E, "profile"}, {"level", NULL, OFFSET(level), FF_OPT_TYPE_INT, FF_LEVEL_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "level"}, {"unknown", NULL, 0, FF_OPT_TYPE_CONST, FF_LEVEL_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "level"}, {"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), FF_OPT_TYPE_INT, 0, 0, INT_MAX, V|D}, {"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"skipcmp", "frame skip compare function", OFFSET(frame_skip_cmp), FF_OPT_TYPE_INT, FF_CMP_DCTMAX, INT_MIN, INT_MAX, V|E, "cmp_func"}, {"border_mask", "increases the quantizer for macroblocks close to borders", OFFSET(border_masking), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E}, {"mblmin", "min macroblock lagrange factor (VBR)", OFFSET(mb_lmin), FF_OPT_TYPE_INT, FF_QP2LAMBDA * 2, 1, FF_LAMBDA_MAX, V|E}, {"mblmax", "max macroblock lagrange factor (VBR)", OFFSET(mb_lmax), FF_OPT_TYPE_INT, FF_QP2LAMBDA * 31, 1, FF_LAMBDA_MAX, V|E}, {"mepc", "motion estimation bitrate penalty compensation (1.0 = 256)", OFFSET(me_penalty_compensation), FF_OPT_TYPE_INT, 256, INT_MIN, INT_MAX, V|E}, {"skip_loop_filter", NULL, OFFSET(skip_loop_filter), FF_OPT_TYPE_INT, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"}, {"skip_idct" , NULL, OFFSET(skip_idct) , FF_OPT_TYPE_INT, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"}, {"skip_frame" , NULL, OFFSET(skip_frame) , FF_OPT_TYPE_INT, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"}, {"none" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_NONE , INT_MIN, INT_MAX, V|D, "avdiscard"}, {"default" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_DEFAULT, INT_MIN, INT_MAX, V|D, "avdiscard"}, {"noref" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_NONREF , INT_MIN, INT_MAX, V|D, "avdiscard"}, {"bidir" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_BIDIR , INT_MIN, INT_MAX, V|D, "avdiscard"}, {"nokey" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_NONKEY , INT_MIN, INT_MAX, V|D, "avdiscard"}, {"all" , NULL, 0, FF_OPT_TYPE_CONST, AVDISCARD_ALL , INT_MIN, INT_MAX, V|D, "avdiscard"}, {"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), FF_OPT_TYPE_INT, 1, 0, 4, V|E}, {"brd_scale", "downscales frames for dynamic B-frame decision", OFFSET(brd_scale), FF_OPT_TYPE_INT, DEFAULT, 0, 10, V|E}, {"crf", "enables constant quality mode, and selects the quality (x264)", OFFSET(crf), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 51, V|E}, {"cqp", "constant quantization parameter rate control method", OFFSET(cqp), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, V|E}, {"keyint_min", "minimum interval between IDR-frames (x264)", OFFSET(keyint_min), FF_OPT_TYPE_INT, 25, INT_MIN, INT_MAX, V|E}, {"refs", "reference frames to consider for motion compensation (Snow)", OFFSET(refs), FF_OPT_TYPE_INT, 1, INT_MIN, INT_MAX, V|E}, {"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"bframebias", "influences how often B-frames are used", OFFSET(bframebias), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"trellis", "rate-distortion optimal quantization", OFFSET(trellis), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|A|E}, {"directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)", OFFSET(directpred), FF_OPT_TYPE_INT, 2, INT_MIN, INT_MAX, V|E}, {"bpyramid", "allows B-frames to be used as references for predicting", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_BPYRAMID, INT_MIN, INT_MAX, V|E, "flags2"}, {"wpred", "weighted biprediction for b-frames (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_WPRED, INT_MIN, INT_MAX, V|E, "flags2"}, {"mixed_refs", "one reference per partition, as opposed to one reference per macroblock", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_MIXED_REFS, INT_MIN, INT_MAX, V|E, "flags2"}, {"dct8x8", "high profile 8x8 transform (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_8X8DCT, INT_MIN, INT_MAX, V|E, "flags2"}, {"fastpskip", "fast pskip (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_FASTPSKIP, INT_MIN, INT_MAX, V|E, "flags2"}, {"aud", "access unit delimiters (H.264)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_AUD, INT_MIN, INT_MAX, V|E, "flags2"}, {"skiprd", "RD optimal MB level residual skipping", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_SKIP_RD, INT_MIN, INT_MAX, V|E, "flags2"}, {"complexityblur", "reduce fluctuations in qp (before curve compression)", OFFSET(complexityblur), FF_OPT_TYPE_FLOAT, 20.0, FLT_MIN, FLT_MAX, V|E}, {"deblockalpha", "in-loop deblocking filter alphac0 parameter", OFFSET(deblockalpha), FF_OPT_TYPE_INT, DEFAULT, -6, 6, V|E}, {"deblockbeta", "in-loop deblocking filter beta parameter", OFFSET(deblockbeta), FF_OPT_TYPE_INT, DEFAULT, -6, 6, V|E}, {"partitions", "macroblock subpartition sizes to consider", OFFSET(partitions), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, V|E, "partitions"}, {"parti4x4", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_I4X4, INT_MIN, INT_MAX, V|E, "partitions"}, {"parti8x8", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_I8X8, INT_MIN, INT_MAX, V|E, "partitions"}, {"partp4x4", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_P4X4, INT_MIN, INT_MAX, V|E, "partitions"}, {"partp8x8", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_P8X8, INT_MIN, INT_MAX, V|E, "partitions"}, {"partb8x8", NULL, 0, FF_OPT_TYPE_CONST, X264_PART_B8X8, INT_MIN, INT_MAX, V|E, "partitions"}, {"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), FF_OPT_TYPE_INT, 6, 0, INT_MAX, V|E}, {"mv0_threshold", NULL, OFFSET(mv0_threshold), FF_OPT_TYPE_INT, 256, 0, INT_MAX, V|E}, {"ivlc", "intra vlc table", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_INTRA_VLC, INT_MIN, INT_MAX, V|E, "flags2"}, {"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), FF_OPT_TYPE_INT, 40, 1, INT_MAX, V|E}, {"compression_level", NULL, OFFSET(compression_level), FF_OPT_TYPE_INT, FF_COMPRESSION_DEFAULT, INT_MIN, INT_MAX, V|A|E}, {"use_lpc", "sets whether to use LPC mode (FLAC)", OFFSET(use_lpc), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E}, {"lpc_coeff_precision", "LPC coefficient precision (FLAC)", OFFSET(lpc_coeff_precision), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, A|E}, {"min_prediction_order", NULL, OFFSET(min_prediction_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E}, {"max_prediction_order", NULL, OFFSET(max_prediction_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E}, {"prediction_order_method", "search method for selecting prediction order", OFFSET(prediction_order_method), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E}, {"min_partition_order", NULL, OFFSET(min_partition_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E}, {"max_partition_order", NULL, OFFSET(max_partition_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E}, {"timecode_frame_start", "GOP timecode frame start number, in non drop frame format", OFFSET(timecode_frame_start), FF_OPT_TYPE_INT64, 0, 0, INT64_MAX, V|E}, {"drop_frame_timecode", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_DROP_FRAME_TIMECODE, INT_MIN, INT_MAX, V|E, "flags2"}, {"non_linear_q", "use non linear quantizer", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_NON_LINEAR_QUANT, INT_MIN, INT_MAX, V|E, "flags2"}, {"request_channels", "set desired number of audio channels", OFFSET(request_channels), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, A|D}, {"drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), FF_OPT_TYPE_FLOAT, 1.0, 0.0, 1.0, A|D}, {"reservoir", "use bit reservoir", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_BIT_RESERVOIR, INT_MIN, INT_MAX, A|E, "flags2"}, {"mbtree", "use macroblock tree ratecontrol (x264 only)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_MBTREE, INT_MIN, INT_MAX, V|E, "flags2"}, {"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"channel_layout", NULL, OFFSET(channel_layout), FF_OPT_TYPE_INT64, DEFAULT, 0, INT64_MAX, A|E|D, "channel_layout"}, {"request_channel_layout", NULL, OFFSET(request_channel_layout), FF_OPT_TYPE_INT64, DEFAULT, 0, INT64_MAX, A|D, "request_channel_layout"}, {"rc_max_vbv_use", NULL, OFFSET(rc_max_available_vbv_use), FF_OPT_TYPE_FLOAT, 1.0/3, 0.0, FLT_MAX, V|E}, {"rc_min_vbv_use", NULL, OFFSET(rc_min_vbv_overflow_use), FF_OPT_TYPE_FLOAT, 3, 0.0, FLT_MAX, V|E}, {"ticks_per_frame", NULL, OFFSET(ticks_per_frame), FF_OPT_TYPE_INT, 1, 1, INT_MAX, A|V|E|D}, {"color_primaries", NULL, OFFSET(color_primaries), FF_OPT_TYPE_INT, AVCOL_PRI_UNSPECIFIED, 1, AVCOL_PRI_NB-1, V|E|D}, {"color_trc", NULL, OFFSET(color_trc), FF_OPT_TYPE_INT, AVCOL_TRC_UNSPECIFIED, 1, AVCOL_TRC_NB-1, V|E|D}, {"colorspace", NULL, OFFSET(colorspace), FF_OPT_TYPE_INT, AVCOL_SPC_UNSPECIFIED, 1, AVCOL_SPC_NB-1, V|E|D}, {"color_range", NULL, OFFSET(color_range), FF_OPT_TYPE_INT, AVCOL_RANGE_UNSPECIFIED, 0, AVCOL_RANGE_NB-1, V|E|D}, {"chroma_sample_location", NULL, OFFSET(chroma_sample_location), FF_OPT_TYPE_INT, AVCHROMA_LOC_UNSPECIFIED, 0, AVCHROMA_LOC_NB-1, V|E|D}, {"psy", "use psycho visual optimization", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_PSY, INT_MIN, INT_MAX, V|E, "flags2"}, {"psy_rd", "specify psycho visual strength", OFFSET(psy_rd), FF_OPT_TYPE_FLOAT, 1.0, 0, FLT_MAX, V|E}, {"psy_trellis", "specify psycho visual trellis", OFFSET(psy_trellis), FF_OPT_TYPE_FLOAT, 0, 0, FLT_MAX, V|E}, {"aq_mode", "specify aq method", OFFSET(aq_mode), FF_OPT_TYPE_INT, 1, 0, INT_MAX, V|E}, {"aq_strength", "specify aq strength", OFFSET(aq_strength), FF_OPT_TYPE_FLOAT, 1.0, 0, FLT_MAX, V|E}, {"rc_lookahead", "specify number of frames to look ahead for frametype", OFFSET(rc_lookahead), FF_OPT_TYPE_INT, 40, 0, INT_MAX, V|E}, {"ssim", "ssim will be calculated during encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_SSIM, INT_MIN, INT_MAX, V|E, "flags2"}, {NULL}, }; #undef A #undef V #undef S #undef E #undef D #undef DEFAULT static const AVClass av_codec_context_class = { "AVCodecContext", context_to_name, options, LIBAVUTIL_VERSION_INT }; void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType codec_type){ int flags=0; memset(s, 0, sizeof(AVCodecContext)); s->av_class= &av_codec_context_class; s->codec_type = codec_type; if(codec_type == AVMEDIA_TYPE_AUDIO) flags= AV_OPT_FLAG_AUDIO_PARAM; else if(codec_type == AVMEDIA_TYPE_VIDEO) flags= AV_OPT_FLAG_VIDEO_PARAM; else if(codec_type == AVMEDIA_TYPE_SUBTITLE) flags= AV_OPT_FLAG_SUBTITLE_PARAM; av_opt_set_defaults2(s, flags, flags); s->time_base= (AVRational){0,1}; s->get_buffer= avcodec_default_get_buffer; s->release_buffer= avcodec_default_release_buffer; s->get_format= avcodec_default_get_format; s->execute= avcodec_default_execute; s->execute2= avcodec_default_execute2; s->sample_aspect_ratio= (AVRational){0,1}; s->pix_fmt= PIX_FMT_NONE; s->sample_fmt= SAMPLE_FMT_NONE; s->palctrl = NULL; s->reget_buffer= avcodec_default_reget_buffer; s->reordered_opaque= AV_NOPTS_VALUE; } AVCodecContext *avcodec_alloc_context2(enum AVMediaType codec_type){ AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext)); if(avctx==NULL) return NULL; avcodec_get_context_defaults2(avctx, codec_type); return avctx; } void avcodec_get_context_defaults(AVCodecContext *s){ avcodec_get_context_defaults2(s, AVMEDIA_TYPE_UNKNOWN); } AVCodecContext *avcodec_alloc_context(void){ return avcodec_alloc_context2(AVMEDIA_TYPE_UNKNOWN); } int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src) { if (dest->codec) { // check that the dest context is uninitialized av_log(dest, AV_LOG_ERROR, "Tried to copy AVCodecContext %p into already-initialized %p\n", src, dest); return AVERROR(EINVAL); } memcpy(dest, src, sizeof(*dest)); /* set values specific to opened codecs back to their default state */ dest->priv_data = NULL; dest->codec = NULL; dest->palctrl = NULL; dest->slice_offset = NULL; dest->internal_buffer = NULL; dest->hwaccel = NULL; dest->thread_opaque = NULL; /* reallocate values that should be allocated separately */ dest->rc_eq = NULL; dest->extradata = NULL; dest->intra_matrix = NULL; dest->inter_matrix = NULL; dest->rc_override = NULL; if (src->rc_eq) { dest->rc_eq = av_strdup(src->rc_eq); if (!dest->rc_eq) return AVERROR(ENOMEM); } #define alloc_and_copy_or_fail(obj, size, pad) \ if (src->obj && size > 0) { \ dest->obj = av_malloc(size + pad); \ if (!dest->obj) \ goto fail; \ memcpy(dest->obj, src->obj, size); \ if (pad) \ memset(((uint8_t *) dest->obj) + size, 0, pad); \ } alloc_and_copy_or_fail(extradata, src->extradata_size, FF_INPUT_BUFFER_PADDING_SIZE); alloc_and_copy_or_fail(intra_matrix, 64 * sizeof(int16_t), 0); alloc_and_copy_or_fail(inter_matrix, 64 * sizeof(int16_t), 0); alloc_and_copy_or_fail(rc_override, src->rc_override_count * sizeof(*src->rc_override), 0); #undef alloc_and_copy_or_fail return 0; fail: av_freep(&dest->rc_override); av_freep(&dest->intra_matrix); av_freep(&dest->inter_matrix); av_freep(&dest->extradata); av_freep(&dest->rc_eq); return AVERROR(ENOMEM); }
123linslouis-android-video-cutter
jni/libavcodec/options.c
C
asf20
45,655
/* * Header file for hardcoded DV tables * * Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef DV_TABLEGEN_H #define DV_TABLEGEN_H #include <stdint.h> #include "dv_vlc_data.h" #if CONFIG_SMALL #define DV_VLC_MAP_RUN_SIZE 15 #define DV_VLC_MAP_LEV_SIZE 23 #else #define DV_VLC_MAP_RUN_SIZE 64 #define DV_VLC_MAP_LEV_SIZE 512 //FIXME sign was removed so this should be /2 but needs check #endif /* VLC encoding lookup table */ struct dv_vlc_pair { uint32_t vlc; uint32_t size; }; #if CONFIG_HARDCODED_TABLES #define dv_vlc_map_tableinit() #include "libavcodec/dv_tables.h" #else static struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE]; static void dv_vlc_map_tableinit(void) { int i, j; for (i = 0; i < NB_DV_VLC - 1; i++) { if (dv_vlc_run[i] >= DV_VLC_MAP_RUN_SIZE) continue; #if CONFIG_SMALL if (dv_vlc_level[i] >= DV_VLC_MAP_LEV_SIZE) continue; #endif if (dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].size != 0) continue; dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].vlc = dv_vlc_bits[i] << (!!dv_vlc_level[i]); dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].size = dv_vlc_len[i] + (!!dv_vlc_level[i]); } for (i = 0; i < DV_VLC_MAP_RUN_SIZE; i++) { #if CONFIG_SMALL for (j = 1; j < DV_VLC_MAP_LEV_SIZE; j++) { if (dv_vlc_map[i][j].size == 0) { dv_vlc_map[i][j].vlc = dv_vlc_map[0][j].vlc | (dv_vlc_map[i-1][0].vlc << (dv_vlc_map[0][j].size)); dv_vlc_map[i][j].size = dv_vlc_map[i-1][0].size + dv_vlc_map[0][j].size; } } #else for (j = 1; j < DV_VLC_MAP_LEV_SIZE/2; j++) { if (dv_vlc_map[i][j].size == 0) { dv_vlc_map[i][j].vlc = dv_vlc_map[0][j].vlc | (dv_vlc_map[i-1][0].vlc << (dv_vlc_map[0][j].size)); dv_vlc_map[i][j].size = dv_vlc_map[i-1][0].size + dv_vlc_map[0][j].size; } dv_vlc_map[i][((uint16_t)(-j))&0x1ff].vlc = dv_vlc_map[i][j].vlc | 1; dv_vlc_map[i][((uint16_t)(-j))&0x1ff].size = dv_vlc_map[i][j].size; } #endif } } #endif /* CONFIG_HARDCODED_TABLES */ #endif /* DV_TABLEGEN_H */
123linslouis-android-video-cutter
jni/libavcodec/dv_tablegen.h
C
asf20
3,174
/* * RealVideo 3 decoder * copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * miscellaneous RV30 tables */ #ifndef AVCODEC_RV30DATA_H #define AVCODEC_RV30DATA_H #include <stdint.h> /** DC quantizer mapping for RV30 */ static const uint8_t rv30_luma_dc_quant[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 23, 23, 23, 24, 24, 25, 25 }; /** * This table is used for storing the differences * between the predicted and the real intra type. */ static const uint8_t rv30_itype_code[9*9*2] = { 0, 0, 0, 1, 1, 0, 1, 1, 0, 2, 2, 0, 0, 3, 3, 0, 1, 2, 2, 1, 0, 4, 4, 0, 3, 1, 1, 3, 0, 5, 5, 0, 2, 2, 1, 4, 4, 1, 0, 6, 3, 2, 1, 5, 2, 3, 5, 1, 6, 0, 0, 7, 4, 2, 2, 4, 3, 3, 6, 1, 1, 6, 7, 0, 0, 8, 5, 2, 4, 3, 2, 5, 3, 4, 1, 7, 4, 4, 7, 1, 8, 0, 6, 2, 3, 5, 5, 3, 2, 6, 1, 8, 2, 7, 7, 2, 8, 1, 5, 4, 4, 5, 3, 6, 6, 3, 8, 2, 4, 6, 5, 5, 6, 4, 2, 8, 7, 3, 3, 7, 6, 5, 5, 6, 7, 4, 4, 7, 8, 3, 3, 8, 7, 5, 8, 4, 5, 7, 4, 8, 6, 6, 7, 6, 5, 8, 8, 5, 6, 7, 8, 6, 7, 7, 6, 8, 8, 7, 7, 8, 8, 8, }; /** * This table is used for retrieving the current intra type * based on its neighbors and adjustment provided by * code read and decoded before. * * This is really a three-dimensional matrix with dimensions * [-1..9][-1..9][0..9]. The first and second coordinates are * detemined by the top and left neighbors (-1 if unavailable). */ static const uint8_t rv30_itype_from_context[900] = { 0, 9, 9, 9, 9, 9, 9, 9, 9, 0, 2, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 2, 0, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 0, 1, 9, 9, 9, 9, 9, 9, 9, 0, 2, 1, 6, 4, 8, 5, 7, 3, 1, 0, 2, 6, 5, 4, 3, 8, 7, 2, 8, 0, 1, 7, 4, 3, 6, 5, 2, 0, 1, 3, 8, 5, 4, 7, 6, 2, 0, 1, 4, 6, 7, 8, 3, 5, 0, 1, 5, 2, 6, 3, 8, 4, 7, 0, 1, 6, 2, 4, 7, 5, 8, 3, 2, 7, 0, 1, 4, 8, 6, 3, 5, 2, 8, 0, 1, 7, 3, 4, 5, 6, 1, 0, 9, 9, 9, 9, 9, 9, 9, 1, 2, 5, 6, 3, 0, 4, 8, 7, 1, 6, 2, 5, 3, 0, 4, 8, 7, 2, 1, 7, 6, 8, 3, 5, 0, 4, 1, 2, 5, 3, 6, 8, 4, 7, 0, 1, 6, 2, 0, 4, 5, 8, 7, 3, 1, 5, 2, 6, 3, 8, 4, 0, 7, 1, 6, 0, 2, 4, 5, 7, 3, 8, 2, 1, 7, 6, 0, 8, 5, 4, 3, 1, 2, 7, 8, 3, 4, 5, 6, 0, 9, 9, 9, 9, 9, 9, 9, 9, 9, 0, 2, 1, 8, 7, 6, 5, 4, 3, 1, 2, 0, 6, 5, 7, 4, 8, 3, 2, 8, 7, 1, 0, 6, 4, 3, 5, 2, 0, 8, 1, 3, 7, 5, 4, 6, 2, 0, 4, 1, 7, 8, 6, 3, 5, 2, 0, 1, 5, 8, 4, 6, 7, 3, 2, 0, 6, 1, 4, 7, 8, 5, 3, 2, 7, 8, 1, 0, 5, 4, 6, 3, 2, 8, 7, 1, 0, 4, 3, 6, 5, 9, 9, 9, 9, 9, 9, 9, 9, 9, 0, 2, 1, 3, 5, 8, 6, 4, 7, 1, 0, 2, 5, 3, 6, 4, 8, 7, 2, 8, 1, 0, 3, 5, 7, 6, 4, 3, 2, 5, 8, 1, 4, 6, 7, 0, 4, 2, 0, 6, 1, 5, 8, 3, 7, 5, 3, 1, 2, 8, 6, 4, 0, 7, 1, 6, 0, 2, 4, 5, 8, 3, 7, 2, 7, 0, 1, 5, 4, 8, 6, 3, 2, 8, 3, 5, 1, 0, 7, 6, 4, 9, 9, 9, 9, 9, 9, 9, 9, 9, 2, 0, 6, 1, 4, 7, 5, 8, 3, 1, 6, 2, 0, 4, 5, 3, 7, 8, 2, 8, 7, 6, 4, 0, 1, 5, 3, 4, 2, 1, 0, 6, 8, 3, 5, 7, 4, 2, 6, 0, 1, 5, 7, 8, 3, 1, 2, 5, 0, 6, 3, 4, 7, 8, 6, 4, 0, 1, 2, 7, 5, 3, 8, 2, 7, 4, 6, 0, 1, 8, 5, 3, 2, 8, 7, 4, 6, 1, 3, 5, 0, 9, 9, 9, 9, 9, 9, 9, 9, 9, 5, 1, 2, 3, 6, 8, 0, 4, 7, 1, 5, 6, 3, 2, 0, 4, 8, 7, 2, 1, 5, 3, 6, 8, 7, 4, 0, 5, 3, 1, 2, 6, 8, 4, 7, 0, 1, 6, 2, 4, 5, 8, 0, 3, 7, 5, 1, 3, 6, 2, 0, 8, 4, 7, 1, 6, 5, 2, 0, 4, 3, 7, 8, 2, 7, 1, 6, 5, 0, 8, 3, 4, 2, 5, 1, 3, 6, 8, 4, 0, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1, 6, 2, 0, 5, 4, 3, 7, 8, 1, 6, 5, 4, 2, 3, 0, 7, 8, 2, 1, 6, 7, 4, 8, 5, 3, 0, 2, 1, 6, 5, 8, 4, 3, 0, 7, 6, 4, 1, 2, 0, 5, 7, 8, 3, 1, 6, 5, 2, 3, 0, 4, 8, 7, 6, 1, 4, 0, 2, 7, 5, 3, 8, 2, 7, 4, 6, 1, 5, 0, 8, 3, 2, 1, 6, 8, 4, 7, 3, 5, 0, 9, 9, 9, 9, 9, 9, 9, 9, 9, 2, 0, 4, 7, 6, 1, 8, 5, 3, 6, 1, 2, 0, 4, 7, 5, 8, 3, 2, 7, 8, 0, 1, 6, 4, 3, 5, 2, 4, 0, 8, 3, 1, 7, 6, 5, 4, 2, 7, 0, 6, 1, 8, 5, 3, 2, 1, 0, 8, 5, 6, 7, 4, 3, 2, 6, 4, 1, 7, 0, 5, 8, 3, 2, 7, 4, 0, 8, 6, 1, 5, 3, 2, 8, 7, 4, 1, 0, 3, 6, 5, 9, 9, 9, 9, 9, 9, 9, 9, 9, 2, 0, 8, 1, 3, 4, 6, 5, 7, 1, 2, 0, 6, 8, 5, 7, 3, 4, 2, 8, 7, 1, 0, 3, 6, 5, 4, 8, 3, 2, 5, 1, 0, 4, 7, 6, 2, 0, 4, 8, 5, 1, 7, 6, 3, 2, 1, 0, 8, 5, 3, 6, 4, 7, 2, 1, 6, 0, 8, 4, 5, 7, 3, 2, 7, 8, 4, 0, 6, 1, 5, 3, 2, 8, 3, 0, 7, 4, 1, 6, 5, }; /** * Loop filter limits are taken from this table. */ static const uint8_t rv30_loop_filt_lim[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5 }; #endif /* AVCODEC_RV30DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/rv30data.h
C
asf20
5,612
/* * TIFF image decoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * TIFF image decoder * @file * @author Konstantin Shishkov */ #include "avcodec.h" #if CONFIG_ZLIB #include <zlib.h> #endif #include "lzw.h" #include "tiff.h" #include "faxcompr.h" #include "libavutil/common.h" #include "libavutil/intreadwrite.h" typedef struct TiffContext { AVCodecContext *avctx; AVFrame picture; int width, height; unsigned int bpp; int le; int compr; int invert; int fax_opts; int predictor; int fill_order; int strips, rps, sstype; int sot; const uint8_t* stripdata; const uint8_t* stripsizes; int stripsize, stripoff; LZWState *lzw; } TiffContext; static int tget_short(const uint8_t **p, int le){ int v = le ? AV_RL16(*p) : AV_RB16(*p); *p += 2; return v; } static int tget_long(const uint8_t **p, int le){ int v = le ? AV_RL32(*p) : AV_RB32(*p); *p += 4; return v; } static int tget(const uint8_t **p, int type, int le){ switch(type){ case TIFF_BYTE : return *(*p)++; case TIFF_SHORT: return tget_short(p, le); case TIFF_LONG : return tget_long (p, le); default : return -1; } } #if CONFIG_ZLIB static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src, int size) { z_stream zstream; int zret; memset(&zstream, 0, sizeof(zstream)); zstream.next_in = src; zstream.avail_in = size; zstream.next_out = dst; zstream.avail_out = *len; zret = inflateInit(&zstream); if (zret != Z_OK) { av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return zret; } zret = inflate(&zstream, Z_SYNC_FLUSH); inflateEnd(&zstream); *len = zstream.total_out; return zret == Z_STREAM_END ? Z_OK : zret; } #endif static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uint8_t *src, int size, int lines){ int c, line, pixels, code; const uint8_t *ssrc = src; int width = s->width * s->bpp >> 3; #if CONFIG_ZLIB uint8_t *zbuf; unsigned long outlen; if(s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE){ int ret; outlen = width * lines; zbuf = av_malloc(outlen); ret = tiff_uncompress(zbuf, &outlen, src, size); if(ret != Z_OK){ av_log(s->avctx, AV_LOG_ERROR, "Uncompressing failed (%lu of %lu) with error %d\n", outlen, (unsigned long)width * lines, ret); av_free(zbuf); return -1; } src = zbuf; for(line = 0; line < lines; line++){ memcpy(dst, src, width); dst += stride; src += width; } av_free(zbuf); return 0; } #endif if(s->compr == TIFF_LZW){ if(ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF) < 0){ av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n"); return -1; } } if(s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 || s->compr == TIFF_G4){ int i, ret = 0; uint8_t *src2 = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); if(!src2 || (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE < (unsigned)size){ av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); return -1; } if(s->fax_opts & 2){ av_log(s->avctx, AV_LOG_ERROR, "Uncompressed fax mode is not supported (yet)\n"); av_free(src2); return -1; } if(!s->fill_order){ memcpy(src2, src, size); }else{ for(i = 0; i < size; i++) src2[i] = av_reverse[src[i]]; } memset(src2+size, 0, FF_INPUT_BUFFER_PADDING_SIZE); switch(s->compr){ case TIFF_CCITT_RLE: case TIFF_G3: case TIFF_G4: ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride, s->compr, s->fax_opts); break; } av_free(src2); return ret; } for(line = 0; line < lines; line++){ if(src - ssrc > size){ av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n"); return -1; } switch(s->compr){ case TIFF_RAW: memcpy(dst, src, width); src += width; break; case TIFF_PACKBITS: for(pixels = 0; pixels < width;){ code = (int8_t)*src++; if(code >= 0){ code++; if(pixels + code > width){ av_log(s->avctx, AV_LOG_ERROR, "Copy went out of bounds\n"); return -1; } memcpy(dst + pixels, src, code); src += code; pixels += code; }else if(code != -128){ // -127..-1 code = (-code) + 1; if(pixels + code > width){ av_log(s->avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } c = *src++; memset(dst + pixels, c, code); pixels += code; } } break; case TIFF_LZW: pixels = ff_lzw_decode(s->lzw, dst, width); if(pixels < width){ av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n", pixels, width); return -1; } break; } dst += stride; } return 0; } static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf) { int tag, type, count, off, value = 0; int i, j; uint32_t *pal; const uint8_t *rp, *gp, *bp; tag = tget_short(&buf, s->le); type = tget_short(&buf, s->le); count = tget_long(&buf, s->le); off = tget_long(&buf, s->le); if(count == 1){ switch(type){ case TIFF_BYTE: case TIFF_SHORT: buf -= 4; value = tget(&buf, type, s->le); buf = NULL; break; case TIFF_LONG: value = off; buf = NULL; break; case TIFF_STRING: if(count <= 4){ buf -= 4; break; } default: value = -1; buf = start + off; } }else if(type_sizes[type] * count <= 4){ buf -= 4; }else{ buf = start + off; } if(buf && (buf < start || buf > end_buf)){ av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n"); return -1; } switch(tag){ case TIFF_WIDTH: s->width = value; break; case TIFF_HEIGHT: s->height = value; break; case TIFF_BPP: if(count == 1) s->bpp = value; else{ switch(type){ case TIFF_BYTE: s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) + ((off >> 16) & 0xFF) + ((off >> 24) & 0xFF); break; case TIFF_SHORT: case TIFF_LONG: s->bpp = 0; for(i = 0; i < count; i++) s->bpp += tget(&buf, type, s->le); break; default: s->bpp = -1; } } if(count > 4){ av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count); return -1; } switch(s->bpp*10 + count){ case 11: s->avctx->pix_fmt = PIX_FMT_MONOBLACK; break; case 81: s->avctx->pix_fmt = PIX_FMT_PAL8; break; case 243: s->avctx->pix_fmt = PIX_FMT_RGB24; break; case 161: s->avctx->pix_fmt = PIX_FMT_GRAY16BE; break; case 324: s->avctx->pix_fmt = PIX_FMT_RGBA; break; case 483: s->avctx->pix_fmt = s->le ? PIX_FMT_RGB48LE : PIX_FMT_RGB48BE; break; default: av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count); return -1; } if(s->width != s->avctx->width || s->height != s->avctx->height){ if(avcodec_check_dimensions(s->avctx, s->width, s->height)) return -1; avcodec_set_dimensions(s->avctx, s->width, s->height); } if(s->picture.data[0]) s->avctx->release_buffer(s->avctx, &s->picture); if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if(s->bpp == 8){ /* make default grayscale pal */ pal = (uint32_t *) s->picture.data[1]; for(i = 0; i < 256; i++) pal[i] = i * 0x010101; } break; case TIFF_COMPR: s->compr = value; s->predictor = 0; switch(s->compr){ case TIFF_RAW: case TIFF_PACKBITS: case TIFF_LZW: case TIFF_CCITT_RLE: break; case TIFF_G3: case TIFF_G4: s->fax_opts = 0; break; case TIFF_DEFLATE: case TIFF_ADOBE_DEFLATE: #if CONFIG_ZLIB break; #else av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n"); return -1; #endif case TIFF_JPEG: case TIFF_NEWJPEG: av_log(s->avctx, AV_LOG_ERROR, "JPEG compression is not supported\n"); return -1; default: av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr); return -1; } break; case TIFF_ROWSPERSTRIP: if(type == TIFF_LONG && value == -1) value = s->avctx->height; if(value < 1){ av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n"); return -1; } s->rps = value; break; case TIFF_STRIP_OFFS: if(count == 1){ s->stripdata = NULL; s->stripoff = value; }else s->stripdata = start + off; s->strips = count; if(s->strips == 1) s->rps = s->height; s->sot = type; if(s->stripdata > end_buf){ av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n"); return -1; } break; case TIFF_STRIP_SIZE: if(count == 1){ s->stripsizes = NULL; s->stripsize = value; s->strips = 1; }else{ s->stripsizes = start + off; } s->strips = count; s->sstype = type; if(s->stripsizes > end_buf){ av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n"); return -1; } break; case TIFF_PREDICTOR: s->predictor = value; break; case TIFF_INVERT: switch(value){ case 0: s->invert = 1; break; case 1: s->invert = 0; break; case 2: case 3: break; default: av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", value); return -1; } break; case TIFF_FILL_ORDER: if(value < 1 || value > 2){ av_log(s->avctx, AV_LOG_ERROR, "Unknown FillOrder value %d, trying default one\n", value); value = 1; } s->fill_order = value - 1; break; case TIFF_PAL: if(s->avctx->pix_fmt != PIX_FMT_PAL8){ av_log(s->avctx, AV_LOG_ERROR, "Palette met but this is not palettized format\n"); return -1; } pal = (uint32_t *) s->picture.data[1]; off = type_sizes[type]; rp = buf; gp = buf + count / 3 * off; bp = buf + count / 3 * off * 2; off = (type_sizes[type] - 1) << 3; for(i = 0; i < count / 3; i++){ j = (tget(&rp, type, s->le) >> off) << 16; j |= (tget(&gp, type, s->le) >> off) << 8; j |= tget(&bp, type, s->le) >> off; pal[i] = j; } break; case TIFF_PLANAR: if(value == 2){ av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n"); return -1; } break; case TIFF_T4OPTIONS: if(s->compr == TIFF_G3) s->fax_opts = value; break; case TIFF_T6OPTIONS: if(s->compr == TIFF_G4) s->fax_opts = value; break; } return 0; } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TiffContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p= (AVFrame*)&s->picture; const uint8_t *orig_buf = buf, *end_buf = buf + buf_size; int id, le, off; int i, j, entries; int stride, soff, ssize; uint8_t *dst; //parse image header id = AV_RL16(buf); buf += 2; if(id == 0x4949) le = 1; else if(id == 0x4D4D) le = 0; else{ av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n"); return -1; } s->le = le; s->invert = 0; s->compr = TIFF_RAW; s->fill_order = 0; // As TIFF 6.0 specification puts it "An arbitrary but carefully chosen number // that further identifies the file as a TIFF file" if(tget_short(&buf, le) != 42){ av_log(avctx, AV_LOG_ERROR, "The answer to life, universe and everything is not correct!\n"); return -1; } /* parse image file directory */ off = tget_long(&buf, le); if(orig_buf + off + 14 >= end_buf){ av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n"); return -1; } buf = orig_buf + off; entries = tget_short(&buf, le); for(i = 0; i < entries; i++){ if(tiff_decode_tag(s, orig_buf, buf, end_buf) < 0) return -1; buf += 12; } if(!s->stripdata && !s->stripoff){ av_log(avctx, AV_LOG_ERROR, "Image data is missing\n"); return -1; } /* now we have the data and may start decoding */ if(!p->data[0]){ s->bpp = 1; avctx->pix_fmt = PIX_FMT_MONOBLACK; if(s->width != s->avctx->width || s->height != s->avctx->height){ if(avcodec_check_dimensions(s->avctx, s->width, s->height)) return -1; avcodec_set_dimensions(s->avctx, s->width, s->height); } if(s->picture.data[0]) s->avctx->release_buffer(s->avctx, &s->picture); if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } } if(s->strips == 1 && !s->stripsize){ av_log(avctx, AV_LOG_WARNING, "Image data size missing\n"); s->stripsize = buf_size - s->stripoff; } stride = p->linesize[0]; dst = p->data[0]; for(i = 0; i < s->height; i += s->rps){ if(s->stripsizes) ssize = tget(&s->stripsizes, s->sstype, s->le); else ssize = s->stripsize; if(s->stripdata){ soff = tget(&s->stripdata, s->sot, s->le); }else soff = s->stripoff; if(tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize, FFMIN(s->rps, s->height - i)) < 0) break; dst += s->rps * stride; } if(s->predictor == 2){ dst = p->data[0]; soff = s->bpp >> 3; ssize = s->width * soff; for(i = 0; i < s->height; i++) { for(j = soff; j < ssize; j++) dst[j] += dst[j - soff]; dst += stride; } } if(s->invert){ uint8_t *src; int j; src = s->picture.data[0]; for(j = 0; j < s->height; j++){ for(i = 0; i < s->picture.linesize[0]; i++) src[i] = 255 - src[i]; src += s->picture.linesize[0]; } } *picture= *(AVFrame*)&s->picture; *data_size = sizeof(AVPicture); return buf_size; } static av_cold int tiff_init(AVCodecContext *avctx){ TiffContext *s = avctx->priv_data; s->width = 0; s->height = 0; s->avctx = avctx; avcodec_get_frame_defaults((AVFrame*)&s->picture); avctx->coded_frame= (AVFrame*)&s->picture; ff_lzw_decode_open(&s->lzw); ff_ccitt_unpack_init(); return 0; } static av_cold int tiff_end(AVCodecContext *avctx) { TiffContext * const s = avctx->priv_data; ff_lzw_decode_close(&s->lzw); if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec tiff_decoder = { "tiff", AVMEDIA_TYPE_VIDEO, CODEC_ID_TIFF, sizeof(TiffContext), tiff_init, NULL, tiff_end, decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("TIFF image"), };
123linslouis-android-video-cutter
jni/libavcodec/tiff.c
C
asf20
17,985
/* * JPEG-LS encoder * Copyright (c) 2003 Michael Niedermayer * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * JPEG-LS encoder. */ #include "avcodec.h" #include "get_bits.h" #include "golomb.h" #include "mathops.h" #include "dsputil.h" #include "mjpeg.h" #include "jpegls.h" /** * Encode error from regular symbol */ static inline void ls_encode_regular(JLSState *state, PutBitContext *pb, int Q, int err){ int k; int val; int map; for(k = 0; (state->N[Q] << k) < state->A[Q]; k++); map = !state->near && !k && (2 * state->B[Q] <= -state->N[Q]); if(err < 0) err += state->range; if(err >= ((state->range + 1) >> 1)) { err -= state->range; val = 2 * FFABS(err) - 1 - map; } else val = 2 * err + map; set_ur_golomb_jpegls(pb, val, k, state->limit, state->qbpp); ff_jpegls_update_state_regular(state, Q, err); } /** * Encode error from run termination */ static inline void ls_encode_runterm(JLSState *state, PutBitContext *pb, int RItype, int err, int limit_add){ int k; int val, map; int Q = 365 + RItype; int temp; temp = state->A[Q]; if(RItype) temp += state->N[Q] >> 1; for(k = 0; (state->N[Q] << k) < temp; k++); map = 0; if(!k && err && (2 * state->B[Q] < state->N[Q])) map = 1; if(err < 0) val = - (2 * err) - 1 - RItype + map; else val = 2 * err - RItype - map; set_ur_golomb_jpegls(pb, val, k, state->limit - limit_add - 1, state->qbpp); if(err < 0) state->B[Q]++; state->A[Q] += (val + 1 - RItype) >> 1; ff_jpegls_downscale_state(state, Q); } /** * Encode run value as specified by JPEG-LS standard */ static inline void ls_encode_run(JLSState *state, PutBitContext *pb, int run, int comp, int trail){ while(run >= (1 << ff_log2_run[state->run_index[comp]])){ put_bits(pb, 1, 1); run -= 1 << ff_log2_run[state->run_index[comp]]; if(state->run_index[comp] < 31) state->run_index[comp]++; } /* if hit EOL, encode another full run, else encode aborted run */ if(!trail && run) { put_bits(pb, 1, 1); }else if(trail){ put_bits(pb, 1, 0); if(ff_log2_run[state->run_index[comp]]) put_bits(pb, ff_log2_run[state->run_index[comp]], run); } } /** * Encode one line of image */ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, void *last, void *cur, int last2, int w, int stride, int comp, int bits){ int x = 0; int Ra, Rb, Rc, Rd; int D0, D1, D2; while(x < w) { int err, pred, sign; /* compute gradients */ Ra = x ? R(cur, x - stride) : R(last, x); Rb = R(last, x); Rc = x ? R(last, x - stride) : last2; Rd = (x >= w - stride) ? R(last, x) : R(last, x + stride); D0 = Rd - Rb; D1 = Rb - Rc; D2 = Rc - Ra; /* run mode */ if((FFABS(D0) <= state->near) && (FFABS(D1) <= state->near) && (FFABS(D2) <= state->near)) { int RUNval, RItype, run; run = 0; RUNval = Ra; while(x < w && (FFABS(R(cur, x) - RUNval) <= state->near)){ run++; W(cur, x, Ra); x += stride; } ls_encode_run(state, pb, run, comp, x < w); if(x >= w) return; Rb = R(last, x); RItype = (FFABS(Ra - Rb) <= state->near); pred = RItype ? Ra : Rb; err = R(cur, x) - pred; if(!RItype && Ra > Rb) err = -err; if(state->near){ if(err > 0) err = (state->near + err) / state->twonear; else err = -(state->near - err) / state->twonear; if(RItype || (Rb >= Ra)) Ra = av_clip(pred + err * state->twonear, 0, state->maxval); else Ra = av_clip(pred - err * state->twonear, 0, state->maxval); W(cur, x, Ra); } if(err < 0) err += state->range; if(err >= ((state->range + 1) >> 1)) err -= state->range; ls_encode_runterm(state, pb, RItype, err, ff_log2_run[state->run_index[comp]]); if(state->run_index[comp] > 0) state->run_index[comp]--; } else { /* regular mode */ int context; context = ff_jpegls_quantize(state, D0) * 81 + ff_jpegls_quantize(state, D1) * 9 + ff_jpegls_quantize(state, D2); pred = mid_pred(Ra, Ra + Rb - Rc, Rb); if(context < 0){ context = -context; sign = 1; pred = av_clip(pred - state->C[context], 0, state->maxval); err = pred - R(cur, x); }else{ sign = 0; pred = av_clip(pred + state->C[context], 0, state->maxval); err = R(cur, x) - pred; } if(state->near){ if(err > 0) err = (state->near + err) / state->twonear; else err = -(state->near - err) / state->twonear; if(!sign) Ra = av_clip(pred + err * state->twonear, 0, state->maxval); else Ra = av_clip(pred - err * state->twonear, 0, state->maxval); W(cur, x, Ra); } ls_encode_regular(state, pb, context, err); } x += stride; } } static void ls_store_lse(JLSState *state, PutBitContext *pb){ /* Test if we have default params and don't need to store LSE */ JLSState state2; memset(&state2, 0, sizeof(JLSState)); state2.bpp = state->bpp; state2.near = state->near; ff_jpegls_reset_coding_parameters(&state2, 1); if(state->T1 == state2.T1 && state->T2 == state2.T2 && state->T3 == state2.T3 && state->reset == state2.reset) return; /* store LSE type 1 */ put_marker(pb, LSE); put_bits(pb, 16, 13); put_bits(pb, 8, 1); put_bits(pb, 16, state->maxval); put_bits(pb, 16, state->T1); put_bits(pb, 16, state->T2); put_bits(pb, 16, state->T3); put_bits(pb, 16, state->reset); } static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ JpeglsContext * const s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; const int near = avctx->prediction_method; PutBitContext pb, pb2; GetBitContext gb; uint8_t *buf2, *zero, *cur, *last; JLSState *state; int i, size; int comps; buf2 = av_malloc(buf_size); init_put_bits(&pb, buf, buf_size); init_put_bits(&pb2, buf2, buf_size); *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; if(avctx->pix_fmt == PIX_FMT_GRAY8 || avctx->pix_fmt == PIX_FMT_GRAY16) comps = 1; else comps = 3; /* write our own JPEG header, can't use mjpeg_picture_header */ put_marker(&pb, SOI); put_marker(&pb, SOF48); put_bits(&pb, 16, 8 + comps * 3); // header size depends on components put_bits(&pb, 8, (avctx->pix_fmt == PIX_FMT_GRAY16) ? 16 : 8); // bpp put_bits(&pb, 16, avctx->height); put_bits(&pb, 16, avctx->width); put_bits(&pb, 8, comps); // components for(i = 1; i <= comps; i++) { put_bits(&pb, 8, i); // component ID put_bits(&pb, 8, 0x11); // subsampling: none put_bits(&pb, 8, 0); // Tiq, used by JPEG-LS ext } put_marker(&pb, SOS); put_bits(&pb, 16, 6 + comps * 2); put_bits(&pb, 8, comps); for(i = 1; i <= comps; i++) { put_bits(&pb, 8, i); // component ID put_bits(&pb, 8, 0); // mapping index: none } put_bits(&pb, 8, near); put_bits(&pb, 8, (comps > 1) ? 1 : 0); // interleaving: 0 - plane, 1 - line put_bits(&pb, 8, 0); // point transform: none state = av_mallocz(sizeof(JLSState)); /* initialize JPEG-LS state from JPEG parameters */ state->near = near; state->bpp = (avctx->pix_fmt == PIX_FMT_GRAY16) ? 16 : 8; ff_jpegls_reset_coding_parameters(state, 0); ff_jpegls_init_state(state); ls_store_lse(state, &pb); zero = av_mallocz(p->linesize[0]); last = zero; cur = p->data[0]; if(avctx->pix_fmt == PIX_FMT_GRAY8){ int t = 0; for(i = 0; i < avctx->height; i++) { ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0, 8); t = last[0]; last = cur; cur += p->linesize[0]; } }else if(avctx->pix_fmt == PIX_FMT_GRAY16){ int t = 0; for(i = 0; i < avctx->height; i++) { ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0, 16); t = *((uint16_t*)last); last = cur; cur += p->linesize[0]; } }else if(avctx->pix_fmt == PIX_FMT_RGB24){ int j, width; int Rc[3] = {0, 0, 0}; width = avctx->width * 3; for(i = 0; i < avctx->height; i++) { for(j = 0; j < 3; j++) { ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j, 8); Rc[j] = last[j]; } last = cur; cur += s->picture.linesize[0]; } }else if(avctx->pix_fmt == PIX_FMT_BGR24){ int j, width; int Rc[3] = {0, 0, 0}; width = avctx->width * 3; for(i = 0; i < avctx->height; i++) { for(j = 2; j >= 0; j--) { ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j, 8); Rc[j] = last[j]; } last = cur; cur += s->picture.linesize[0]; } } av_free(zero); av_free(state); // the specification says that after doing 0xff escaping unused bits in the // last byte must be set to 0, so just append 7 "optional" zero-bits to // avoid special-casing. put_bits(&pb2, 7, 0); size = put_bits_count(&pb2); flush_put_bits(&pb2); /* do escape coding */ init_get_bits(&gb, buf2, size); size -= 7; while(get_bits_count(&gb) < size){ int v; v = get_bits(&gb, 8); put_bits(&pb, 8, v); if(v == 0xFF){ v = get_bits(&gb, 7); put_bits(&pb, 8, v); } } align_put_bits(&pb); av_free(buf2); /* End of image */ put_marker(&pb, EOI); flush_put_bits(&pb); emms_c(); return put_bits_count(&pb) >> 3; } static av_cold int encode_init_ls(AVCodecContext *ctx) { JpeglsContext *c = (JpeglsContext*)ctx->priv_data; c->avctx = ctx; ctx->coded_frame = &c->picture; if(ctx->pix_fmt != PIX_FMT_GRAY8 && ctx->pix_fmt != PIX_FMT_GRAY16 && ctx->pix_fmt != PIX_FMT_RGB24 && ctx->pix_fmt != PIX_FMT_BGR24){ av_log(ctx, AV_LOG_ERROR, "Only grayscale and RGB24/BGR24 images are supported\n"); return -1; } return 0; } AVCodec jpegls_encoder = { //FIXME avoid MPV_* lossless JPEG should not need them "jpegls", AVMEDIA_TYPE_VIDEO, CODEC_ID_JPEGLS, sizeof(JpeglsContext), encode_init_ls, encode_picture_ls, NULL, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("JPEG-LS"), };
123linslouis-android-video-cutter
jni/libavcodec/jpeglsenc.c
C
asf20
12,228
/* * QDM2 compatible decoder * Copyright (c) 2003 Ewald Snel * Copyright (c) 2005 Benjamin Larsson * Copyright (c) 2005 Alex Beregszaszi * Copyright (c) 2005 Roberto Togni * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * QDM2 decoder * @author Ewald Snel, Benjamin Larsson, Alex Beregszaszi, Roberto Togni * The decoder is not perfect yet, there are still some distortions * especially on files encoded with 16 or 8 subbands. */ #include <math.h> #include <stddef.h> #include <stdio.h> #define ALT_BITSTREAM_READER_LE #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "fft.h" #include "mpegaudio.h" #include "qdm2data.h" #include "qdm2_tablegen.h" #undef NDEBUG #include <assert.h> #define QDM2_LIST_ADD(list, size, packet) \ do { \ if (size > 0) { \ list[size - 1].next = &list[size]; \ } \ list[size].packet = packet; \ list[size].next = NULL; \ size++; \ } while(0) // Result is 8, 16 or 30 #define QDM2_SB_USED(sub_sampling) (((sub_sampling) >= 2) ? 30 : 8 << (sub_sampling)) #define FIX_NOISE_IDX(noise_idx) \ if ((noise_idx) >= 3840) \ (noise_idx) -= 3840; \ #define SB_DITHERING_NOISE(sb,noise_idx) (noise_table[(noise_idx)++] * sb_noise_attenuation[(sb)]) #define BITS_LEFT(length,gb) ((length) - get_bits_count ((gb))) #define SAMPLES_NEEDED \ av_log (NULL,AV_LOG_INFO,"This file triggers some untested code. Please contact the developers.\n"); #define SAMPLES_NEEDED_2(why) \ av_log (NULL,AV_LOG_INFO,"This file triggers some missing code. Please contact the developers.\nPosition: %s\n",why); typedef int8_t sb_int8_array[2][30][64]; /** * Subpacket */ typedef struct { int type; ///< subpacket type unsigned int size; ///< subpacket size const uint8_t *data; ///< pointer to subpacket data (points to input data buffer, it's not a private copy) } QDM2SubPacket; /** * A node in the subpacket list */ typedef struct QDM2SubPNode { QDM2SubPacket *packet; ///< packet struct QDM2SubPNode *next; ///< pointer to next packet in the list, NULL if leaf node } QDM2SubPNode; typedef struct { float re; float im; } QDM2Complex; typedef struct { float level; QDM2Complex *complex; const float *table; int phase; int phase_shift; int duration; short time_index; short cutoff; } FFTTone; typedef struct { int16_t sub_packet; uint8_t channel; int16_t offset; int16_t exp; uint8_t phase; } FFTCoefficient; typedef struct { DECLARE_ALIGNED(16, QDM2Complex, complex)[MPA_MAX_CHANNELS][256]; } QDM2FFT; /** * QDM2 decoder context */ typedef struct { /// Parameters from codec header, do not change during playback int nb_channels; ///< number of channels int channels; ///< number of channels int group_size; ///< size of frame group (16 frames per group) int fft_size; ///< size of FFT, in complex numbers int checksum_size; ///< size of data block, used also for checksum /// Parameters built from header parameters, do not change during playback int group_order; ///< order of frame group int fft_order; ///< order of FFT (actually fftorder+1) int fft_frame_size; ///< size of fft frame, in components (1 comples = re + im) int frame_size; ///< size of data frame int frequency_range; int sub_sampling; ///< subsampling: 0=25%, 1=50%, 2=100% */ int coeff_per_sb_select; ///< selector for "num. of coeffs. per subband" tables. Can be 0, 1, 2 int cm_table_select; ///< selector for "coding method" tables. Can be 0, 1 (from init: 0-4) /// Packets and packet lists QDM2SubPacket sub_packets[16]; ///< the packets themselves QDM2SubPNode sub_packet_list_A[16]; ///< list of all packets QDM2SubPNode sub_packet_list_B[16]; ///< FFT packets B are on list int sub_packets_B; ///< number of packets on 'B' list QDM2SubPNode sub_packet_list_C[16]; ///< packets with errors? QDM2SubPNode sub_packet_list_D[16]; ///< DCT packets /// FFT and tones FFTTone fft_tones[1000]; int fft_tone_start; int fft_tone_end; FFTCoefficient fft_coefs[1000]; int fft_coefs_index; int fft_coefs_min_index[5]; int fft_coefs_max_index[5]; int fft_level_exp[6]; RDFTContext rdft_ctx; QDM2FFT fft; /// I/O data const uint8_t *compressed_data; int compressed_size; float output_buffer[1024]; /// Synthesis filter DECLARE_ALIGNED(16, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512*2]; int synth_buf_offset[MPA_MAX_CHANNELS]; DECLARE_ALIGNED(16, int32_t, sb_samples)[MPA_MAX_CHANNELS][128][SBLIMIT]; /// Mixed temporary data used in decoding float tone_level[MPA_MAX_CHANNELS][30][64]; int8_t coding_method[MPA_MAX_CHANNELS][30][64]; int8_t quantized_coeffs[MPA_MAX_CHANNELS][10][8]; int8_t tone_level_idx_base[MPA_MAX_CHANNELS][30][8]; int8_t tone_level_idx_hi1[MPA_MAX_CHANNELS][3][8][8]; int8_t tone_level_idx_mid[MPA_MAX_CHANNELS][26][8]; int8_t tone_level_idx_hi2[MPA_MAX_CHANNELS][26]; int8_t tone_level_idx[MPA_MAX_CHANNELS][30][64]; int8_t tone_level_idx_temp[MPA_MAX_CHANNELS][30][64]; // Flags int has_errors; ///< packet has errors int superblocktype_2_3; ///< select fft tables and some algorithm based on superblock type int do_synth_filter; ///< used to perform or skip synthesis filter int sub_packet; int noise_idx; ///< index for dithering noise table } QDM2Context; static uint8_t empty_buffer[FF_INPUT_BUFFER_PADDING_SIZE]; static VLC vlc_tab_level; static VLC vlc_tab_diff; static VLC vlc_tab_run; static VLC fft_level_exp_alt_vlc; static VLC fft_level_exp_vlc; static VLC fft_stereo_exp_vlc; static VLC fft_stereo_phase_vlc; static VLC vlc_tab_tone_level_idx_hi1; static VLC vlc_tab_tone_level_idx_mid; static VLC vlc_tab_tone_level_idx_hi2; static VLC vlc_tab_type30; static VLC vlc_tab_type34; static VLC vlc_tab_fft_tone_offset[5]; static const uint16_t qdm2_vlc_offs[] = { 0,260,566,598,894,1166,1230,1294,1678,1950,2214,2278,2310,2570,2834,3124,3448,3838, }; static av_cold void qdm2_init_vlc(void) { static int vlcs_initialized = 0; static VLC_TYPE qdm2_table[3838][2]; if (!vlcs_initialized) { vlc_tab_level.table = &qdm2_table[qdm2_vlc_offs[0]]; vlc_tab_level.table_allocated = qdm2_vlc_offs[1] - qdm2_vlc_offs[0]; init_vlc (&vlc_tab_level, 8, 24, vlc_tab_level_huffbits, 1, 1, vlc_tab_level_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_diff.table = &qdm2_table[qdm2_vlc_offs[1]]; vlc_tab_diff.table_allocated = qdm2_vlc_offs[2] - qdm2_vlc_offs[1]; init_vlc (&vlc_tab_diff, 8, 37, vlc_tab_diff_huffbits, 1, 1, vlc_tab_diff_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_run.table = &qdm2_table[qdm2_vlc_offs[2]]; vlc_tab_run.table_allocated = qdm2_vlc_offs[3] - qdm2_vlc_offs[2]; init_vlc (&vlc_tab_run, 5, 6, vlc_tab_run_huffbits, 1, 1, vlc_tab_run_huffcodes, 1, 1, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); fft_level_exp_alt_vlc.table = &qdm2_table[qdm2_vlc_offs[3]]; fft_level_exp_alt_vlc.table_allocated = qdm2_vlc_offs[4] - qdm2_vlc_offs[3]; init_vlc (&fft_level_exp_alt_vlc, 8, 28, fft_level_exp_alt_huffbits, 1, 1, fft_level_exp_alt_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); fft_level_exp_vlc.table = &qdm2_table[qdm2_vlc_offs[4]]; fft_level_exp_vlc.table_allocated = qdm2_vlc_offs[5] - qdm2_vlc_offs[4]; init_vlc (&fft_level_exp_vlc, 8, 20, fft_level_exp_huffbits, 1, 1, fft_level_exp_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); fft_stereo_exp_vlc.table = &qdm2_table[qdm2_vlc_offs[5]]; fft_stereo_exp_vlc.table_allocated = qdm2_vlc_offs[6] - qdm2_vlc_offs[5]; init_vlc (&fft_stereo_exp_vlc, 6, 7, fft_stereo_exp_huffbits, 1, 1, fft_stereo_exp_huffcodes, 1, 1, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); fft_stereo_phase_vlc.table = &qdm2_table[qdm2_vlc_offs[6]]; fft_stereo_phase_vlc.table_allocated = qdm2_vlc_offs[7] - qdm2_vlc_offs[6]; init_vlc (&fft_stereo_phase_vlc, 6, 9, fft_stereo_phase_huffbits, 1, 1, fft_stereo_phase_huffcodes, 1, 1, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_tone_level_idx_hi1.table = &qdm2_table[qdm2_vlc_offs[7]]; vlc_tab_tone_level_idx_hi1.table_allocated = qdm2_vlc_offs[8] - qdm2_vlc_offs[7]; init_vlc (&vlc_tab_tone_level_idx_hi1, 8, 20, vlc_tab_tone_level_idx_hi1_huffbits, 1, 1, vlc_tab_tone_level_idx_hi1_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_tone_level_idx_mid.table = &qdm2_table[qdm2_vlc_offs[8]]; vlc_tab_tone_level_idx_mid.table_allocated = qdm2_vlc_offs[9] - qdm2_vlc_offs[8]; init_vlc (&vlc_tab_tone_level_idx_mid, 8, 24, vlc_tab_tone_level_idx_mid_huffbits, 1, 1, vlc_tab_tone_level_idx_mid_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_tone_level_idx_hi2.table = &qdm2_table[qdm2_vlc_offs[9]]; vlc_tab_tone_level_idx_hi2.table_allocated = qdm2_vlc_offs[10] - qdm2_vlc_offs[9]; init_vlc (&vlc_tab_tone_level_idx_hi2, 8, 24, vlc_tab_tone_level_idx_hi2_huffbits, 1, 1, vlc_tab_tone_level_idx_hi2_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_type30.table = &qdm2_table[qdm2_vlc_offs[10]]; vlc_tab_type30.table_allocated = qdm2_vlc_offs[11] - qdm2_vlc_offs[10]; init_vlc (&vlc_tab_type30, 6, 9, vlc_tab_type30_huffbits, 1, 1, vlc_tab_type30_huffcodes, 1, 1, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_type34.table = &qdm2_table[qdm2_vlc_offs[11]]; vlc_tab_type34.table_allocated = qdm2_vlc_offs[12] - qdm2_vlc_offs[11]; init_vlc (&vlc_tab_type34, 5, 10, vlc_tab_type34_huffbits, 1, 1, vlc_tab_type34_huffcodes, 1, 1, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_fft_tone_offset[0].table = &qdm2_table[qdm2_vlc_offs[12]]; vlc_tab_fft_tone_offset[0].table_allocated = qdm2_vlc_offs[13] - qdm2_vlc_offs[12]; init_vlc (&vlc_tab_fft_tone_offset[0], 8, 23, vlc_tab_fft_tone_offset_0_huffbits, 1, 1, vlc_tab_fft_tone_offset_0_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_fft_tone_offset[1].table = &qdm2_table[qdm2_vlc_offs[13]]; vlc_tab_fft_tone_offset[1].table_allocated = qdm2_vlc_offs[14] - qdm2_vlc_offs[13]; init_vlc (&vlc_tab_fft_tone_offset[1], 8, 28, vlc_tab_fft_tone_offset_1_huffbits, 1, 1, vlc_tab_fft_tone_offset_1_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_fft_tone_offset[2].table = &qdm2_table[qdm2_vlc_offs[14]]; vlc_tab_fft_tone_offset[2].table_allocated = qdm2_vlc_offs[15] - qdm2_vlc_offs[14]; init_vlc (&vlc_tab_fft_tone_offset[2], 8, 32, vlc_tab_fft_tone_offset_2_huffbits, 1, 1, vlc_tab_fft_tone_offset_2_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_fft_tone_offset[3].table = &qdm2_table[qdm2_vlc_offs[15]]; vlc_tab_fft_tone_offset[3].table_allocated = qdm2_vlc_offs[16] - qdm2_vlc_offs[15]; init_vlc (&vlc_tab_fft_tone_offset[3], 8, 35, vlc_tab_fft_tone_offset_3_huffbits, 1, 1, vlc_tab_fft_tone_offset_3_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlc_tab_fft_tone_offset[4].table = &qdm2_table[qdm2_vlc_offs[16]]; vlc_tab_fft_tone_offset[4].table_allocated = qdm2_vlc_offs[17] - qdm2_vlc_offs[16]; init_vlc (&vlc_tab_fft_tone_offset[4], 8, 38, vlc_tab_fft_tone_offset_4_huffbits, 1, 1, vlc_tab_fft_tone_offset_4_huffcodes, 2, 2, INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); vlcs_initialized=1; } } /* for floating point to fixed point conversion */ static const float f2i_scale = (float) (1 << (FRAC_BITS - 15)); static int qdm2_get_vlc (GetBitContext *gb, VLC *vlc, int flag, int depth) { int value; value = get_vlc2(gb, vlc->table, vlc->bits, depth); /* stage-2, 3 bits exponent escape sequence */ if (value-- == 0) value = get_bits (gb, get_bits (gb, 3) + 1); /* stage-3, optional */ if (flag) { int tmp = vlc_stage3_values[value]; if ((value & ~3) > 0) tmp += get_bits (gb, (value >> 2)); value = tmp; } return value; } static int qdm2_get_se_vlc (VLC *vlc, GetBitContext *gb, int depth) { int value = qdm2_get_vlc (gb, vlc, 0, depth); return (value & 1) ? ((value + 1) >> 1) : -(value >> 1); } /** * QDM2 checksum * * @param data pointer to data to be checksum'ed * @param length data length * @param value checksum value * * @return 0 if checksum is OK */ static uint16_t qdm2_packet_checksum (const uint8_t *data, int length, int value) { int i; for (i=0; i < length; i++) value -= data[i]; return (uint16_t)(value & 0xffff); } /** * Fills a QDM2SubPacket structure with packet type, size, and data pointer. * * @param gb bitreader context * @param sub_packet packet under analysis */ static void qdm2_decode_sub_packet_header (GetBitContext *gb, QDM2SubPacket *sub_packet) { sub_packet->type = get_bits (gb, 8); if (sub_packet->type == 0) { sub_packet->size = 0; sub_packet->data = NULL; } else { sub_packet->size = get_bits (gb, 8); if (sub_packet->type & 0x80) { sub_packet->size <<= 8; sub_packet->size |= get_bits (gb, 8); sub_packet->type &= 0x7f; } if (sub_packet->type == 0x7f) sub_packet->type |= (get_bits (gb, 8) << 8); sub_packet->data = &gb->buffer[get_bits_count(gb) / 8]; // FIXME: this depends on bitreader internal data } av_log(NULL,AV_LOG_DEBUG,"Subpacket: type=%d size=%d start_offs=%x\n", sub_packet->type, sub_packet->size, get_bits_count(gb) / 8); } /** * Return node pointer to first packet of requested type in list. * * @param list list of subpackets to be scanned * @param type type of searched subpacket * @return node pointer for subpacket if found, else NULL */ static QDM2SubPNode* qdm2_search_subpacket_type_in_list (QDM2SubPNode *list, int type) { while (list != NULL && list->packet != NULL) { if (list->packet->type == type) return list; list = list->next; } return NULL; } /** * Replaces 8 elements with their average value. * Called by qdm2_decode_superblock before starting subblock decoding. * * @param q context */ static void average_quantized_coeffs (QDM2Context *q) { int i, j, n, ch, sum; n = coeff_per_sb_for_avg[q->coeff_per_sb_select][QDM2_SB_USED(q->sub_sampling) - 1] + 1; for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < n; i++) { sum = 0; for (j = 0; j < 8; j++) sum += q->quantized_coeffs[ch][i][j]; sum /= 8; if (sum > 0) sum--; for (j=0; j < 8; j++) q->quantized_coeffs[ch][i][j] = sum; } } /** * Build subband samples with noise weighted by q->tone_level. * Called by synthfilt_build_sb_samples. * * @param q context * @param sb subband index */ static void build_sb_samples_from_noise (QDM2Context *q, int sb) { int ch, j; FIX_NOISE_IDX(q->noise_idx); if (!q->nb_channels) return; for (ch = 0; ch < q->nb_channels; ch++) for (j = 0; j < 64; j++) { q->sb_samples[ch][j * 2][sb] = (int32_t)(f2i_scale * SB_DITHERING_NOISE(sb,q->noise_idx) * q->tone_level[ch][sb][j] + .5); q->sb_samples[ch][j * 2 + 1][sb] = (int32_t)(f2i_scale * SB_DITHERING_NOISE(sb,q->noise_idx) * q->tone_level[ch][sb][j] + .5); } } /** * Called while processing data from subpackets 11 and 12. * Used after making changes to coding_method array. * * @param sb subband index * @param channels number of channels * @param coding_method q->coding_method[0][0][0] */ static void fix_coding_method_array (int sb, int channels, sb_int8_array coding_method) { int j,k; int ch; int run, case_val; int switchtable[23] = {0,5,1,5,5,5,5,5,2,5,5,5,5,5,5,5,3,5,5,5,5,5,4}; for (ch = 0; ch < channels; ch++) { for (j = 0; j < 64; ) { if((coding_method[ch][sb][j] - 8) > 22) { run = 1; case_val = 8; } else { switch (switchtable[coding_method[ch][sb][j]-8]) { case 0: run = 10; case_val = 10; break; case 1: run = 1; case_val = 16; break; case 2: run = 5; case_val = 24; break; case 3: run = 3; case_val = 30; break; case 4: run = 1; case_val = 30; break; case 5: run = 1; case_val = 8; break; default: run = 1; case_val = 8; break; } } for (k = 0; k < run; k++) if (j + k < 128) if (coding_method[ch][sb + (j + k) / 64][(j + k) % 64] > coding_method[ch][sb][j]) if (k > 0) { SAMPLES_NEEDED //not debugged, almost never used memset(&coding_method[ch][sb][j + k], case_val, k * sizeof(int8_t)); memset(&coding_method[ch][sb][j + k], case_val, 3 * sizeof(int8_t)); } j += run; } } } /** * Related to synthesis filter * Called by process_subpacket_10 * * @param q context * @param flag 1 if called after getting data from subpacket 10, 0 if no subpacket 10 */ static void fill_tone_level_array (QDM2Context *q, int flag) { int i, sb, ch, sb_used; int tmp, tab; // This should never happen if (q->nb_channels <= 0) return; for (ch = 0; ch < q->nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (i = 0; i < 8; i++) { if ((tab=coeff_per_sb_for_dequant[q->coeff_per_sb_select][sb]) < (last_coeff[q->coeff_per_sb_select] - 1)) tmp = q->quantized_coeffs[ch][tab + 1][i] * dequant_table[q->coeff_per_sb_select][tab + 1][sb]+ q->quantized_coeffs[ch][tab][i] * dequant_table[q->coeff_per_sb_select][tab][sb]; else tmp = q->quantized_coeffs[ch][tab][i] * dequant_table[q->coeff_per_sb_select][tab][sb]; if(tmp < 0) tmp += 0xff; q->tone_level_idx_base[ch][sb][i] = (tmp / 256) & 0xff; } sb_used = QDM2_SB_USED(q->sub_sampling); if ((q->superblocktype_2_3 != 0) && !flag) { for (sb = 0; sb < sb_used; sb++) for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < 64; i++) { q->tone_level_idx[ch][sb][i] = q->tone_level_idx_base[ch][sb][i / 8]; if (q->tone_level_idx[ch][sb][i] < 0) q->tone_level[ch][sb][i] = 0; else q->tone_level[ch][sb][i] = fft_tone_level_table[0][q->tone_level_idx[ch][sb][i] & 0x3f]; } } else { tab = q->superblocktype_2_3 ? 0 : 1; for (sb = 0; sb < sb_used; sb++) { if ((sb >= 4) && (sb <= 23)) { for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < 64; i++) { tmp = q->tone_level_idx_base[ch][sb][i / 8] - q->tone_level_idx_hi1[ch][sb / 8][i / 8][i % 8] - q->tone_level_idx_mid[ch][sb - 4][i / 8] - q->tone_level_idx_hi2[ch][sb - 4]; q->tone_level_idx[ch][sb][i] = tmp & 0xff; if ((tmp < 0) || (!q->superblocktype_2_3 && !tmp)) q->tone_level[ch][sb][i] = 0; else q->tone_level[ch][sb][i] = fft_tone_level_table[tab][tmp & 0x3f]; } } else { if (sb > 4) { for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < 64; i++) { tmp = q->tone_level_idx_base[ch][sb][i / 8] - q->tone_level_idx_hi1[ch][2][i / 8][i % 8] - q->tone_level_idx_hi2[ch][sb - 4]; q->tone_level_idx[ch][sb][i] = tmp & 0xff; if ((tmp < 0) || (!q->superblocktype_2_3 && !tmp)) q->tone_level[ch][sb][i] = 0; else q->tone_level[ch][sb][i] = fft_tone_level_table[tab][tmp & 0x3f]; } } else { for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < 64; i++) { tmp = q->tone_level_idx[ch][sb][i] = q->tone_level_idx_base[ch][sb][i / 8]; if ((tmp < 0) || (!q->superblocktype_2_3 && !tmp)) q->tone_level[ch][sb][i] = 0; else q->tone_level[ch][sb][i] = fft_tone_level_table[tab][tmp & 0x3f]; } } } } } return; } /** * Related to synthesis filter * Called by process_subpacket_11 * c is built with data from subpacket 11 * Most of this function is used only if superblock_type_2_3 == 0, never seen it in samples * * @param tone_level_idx * @param tone_level_idx_temp * @param coding_method q->coding_method[0][0][0] * @param nb_channels number of channels * @param c coming from subpacket 11, passed as 8*c * @param superblocktype_2_3 flag based on superblock packet type * @param cm_table_select q->cm_table_select */ static void fill_coding_method_array (sb_int8_array tone_level_idx, sb_int8_array tone_level_idx_temp, sb_int8_array coding_method, int nb_channels, int c, int superblocktype_2_3, int cm_table_select) { int ch, sb, j; int tmp, acc, esp_40, comp; int add1, add2, add3, add4; int64_t multres; // This should never happen if (nb_channels <= 0) return; if (!superblocktype_2_3) { /* This case is untested, no samples available */ SAMPLES_NEEDED for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) { for (j = 1; j < 63; j++) { // The loop only iterates to 63 so the code doesn't overflow the buffer add1 = tone_level_idx[ch][sb][j] - 10; if (add1 < 0) add1 = 0; add2 = add3 = add4 = 0; if (sb > 1) { add2 = tone_level_idx[ch][sb - 2][j] + tone_level_idx_offset_table[sb][0] - 6; if (add2 < 0) add2 = 0; } if (sb > 0) { add3 = tone_level_idx[ch][sb - 1][j] + tone_level_idx_offset_table[sb][1] - 6; if (add3 < 0) add3 = 0; } if (sb < 29) { add4 = tone_level_idx[ch][sb + 1][j] + tone_level_idx_offset_table[sb][3] - 6; if (add4 < 0) add4 = 0; } tmp = tone_level_idx[ch][sb][j + 1] * 2 - add4 - add3 - add2 - add1; if (tmp < 0) tmp = 0; tone_level_idx_temp[ch][sb][j + 1] = tmp & 0xff; } tone_level_idx_temp[ch][sb][0] = tone_level_idx_temp[ch][sb][1]; } acc = 0; for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) acc += tone_level_idx_temp[ch][sb][j]; multres = 0x66666667 * (acc * 10); esp_40 = (multres >> 32) / 8 + ((multres & 0xffffffff) >> 31); for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) { comp = tone_level_idx_temp[ch][sb][j]* esp_40 * 10; if (comp < 0) comp += 0xff; comp /= 256; // signed shift switch(sb) { case 0: if (comp < 30) comp = 30; comp += 15; break; case 1: if (comp < 24) comp = 24; comp += 10; break; case 2: case 3: case 4: if (comp < 16) comp = 16; } if (comp <= 5) tmp = 0; else if (comp <= 10) tmp = 10; else if (comp <= 16) tmp = 16; else if (comp <= 24) tmp = -1; else tmp = 0; coding_method[ch][sb][j] = ((tmp & 0xfffa) + 30 )& 0xff; } for (sb = 0; sb < 30; sb++) fix_coding_method_array(sb, nb_channels, coding_method); for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) if (sb >= 10) { if (coding_method[ch][sb][j] < 10) coding_method[ch][sb][j] = 10; } else { if (sb >= 2) { if (coding_method[ch][sb][j] < 16) coding_method[ch][sb][j] = 16; } else { if (coding_method[ch][sb][j] < 30) coding_method[ch][sb][j] = 30; } } } else { // superblocktype_2_3 != 0 for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) coding_method[ch][sb][j] = coding_method_table[cm_table_select][sb]; } return; } /** * * Called by process_subpacket_11 to process more data from subpacket 11 with sb 0-8 * Called by process_subpacket_12 to process data from subpacket 12 with sb 8-sb_used * * @param q context * @param gb bitreader context * @param length packet length in bits * @param sb_min lower subband processed (sb_min included) * @param sb_max higher subband processed (sb_max excluded) */ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int length, int sb_min, int sb_max) { int sb, j, k, n, ch, run, channels; int joined_stereo, zero_encoding, chs; int type34_first; float type34_div = 0; float type34_predictor; float samples[10], sign_bits[16]; if (length == 0) { // If no data use noise for (sb=sb_min; sb < sb_max; sb++) build_sb_samples_from_noise (q, sb); return; } for (sb = sb_min; sb < sb_max; sb++) { FIX_NOISE_IDX(q->noise_idx); channels = q->nb_channels; if (q->nb_channels <= 1 || sb < 12) joined_stereo = 0; else if (sb >= 24) joined_stereo = 1; else joined_stereo = (BITS_LEFT(length,gb) >= 1) ? get_bits1 (gb) : 0; if (joined_stereo) { if (BITS_LEFT(length,gb) >= 16) for (j = 0; j < 16; j++) sign_bits[j] = get_bits1 (gb); for (j = 0; j < 64; j++) if (q->coding_method[1][sb][j] > q->coding_method[0][sb][j]) q->coding_method[0][sb][j] = q->coding_method[1][sb][j]; fix_coding_method_array(sb, q->nb_channels, q->coding_method); channels = 1; } for (ch = 0; ch < channels; ch++) { zero_encoding = (BITS_LEFT(length,gb) >= 1) ? get_bits1(gb) : 0; type34_predictor = 0.0; type34_first = 1; for (j = 0; j < 128; ) { switch (q->coding_method[ch][sb][j / 2]) { case 8: if (BITS_LEFT(length,gb) >= 10) { if (zero_encoding) { for (k = 0; k < 5; k++) { if ((j + 2 * k) >= 128) break; samples[2 * k] = get_bits1(gb) ? dequant_1bit[joined_stereo][2 * get_bits1(gb)] : 0; } } else { n = get_bits(gb, 8); for (k = 0; k < 5; k++) samples[2 * k] = dequant_1bit[joined_stereo][random_dequant_index[n][k]]; } for (k = 0; k < 5; k++) samples[2 * k + 1] = SB_DITHERING_NOISE(sb,q->noise_idx); } else { for (k = 0; k < 10; k++) samples[k] = SB_DITHERING_NOISE(sb,q->noise_idx); } run = 10; break; case 10: if (BITS_LEFT(length,gb) >= 1) { float f = 0.81; if (get_bits1(gb)) f = -f; f -= noise_samples[((sb + 1) * (j +5 * ch + 1)) & 127] * 9.0 / 40.0; samples[0] = f; } else { samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx); } run = 1; break; case 16: if (BITS_LEFT(length,gb) >= 10) { if (zero_encoding) { for (k = 0; k < 5; k++) { if ((j + k) >= 128) break; samples[k] = (get_bits1(gb) == 0) ? 0 : dequant_1bit[joined_stereo][2 * get_bits1(gb)]; } } else { n = get_bits (gb, 8); for (k = 0; k < 5; k++) samples[k] = dequant_1bit[joined_stereo][random_dequant_index[n][k]]; } } else { for (k = 0; k < 5; k++) samples[k] = SB_DITHERING_NOISE(sb,q->noise_idx); } run = 5; break; case 24: if (BITS_LEFT(length,gb) >= 7) { n = get_bits(gb, 7); for (k = 0; k < 3; k++) samples[k] = (random_dequant_type24[n][k] - 2.0) * 0.5; } else { for (k = 0; k < 3; k++) samples[k] = SB_DITHERING_NOISE(sb,q->noise_idx); } run = 3; break; case 30: if (BITS_LEFT(length,gb) >= 4) samples[0] = type30_dequant[qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1)]; else samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx); run = 1; break; case 34: if (BITS_LEFT(length,gb) >= 7) { if (type34_first) { type34_div = (float)(1 << get_bits(gb, 2)); samples[0] = ((float)get_bits(gb, 5) - 16.0) / 15.0; type34_predictor = samples[0]; type34_first = 0; } else { samples[0] = type34_delta[qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1)] / type34_div + type34_predictor; type34_predictor = samples[0]; } } else { samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx); } run = 1; break; default: samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx); run = 1; break; } if (joined_stereo) { float tmp[10][MPA_MAX_CHANNELS]; for (k = 0; k < run; k++) { tmp[k][0] = samples[k]; tmp[k][1] = (sign_bits[(j + k) / 8]) ? -samples[k] : samples[k]; } for (chs = 0; chs < q->nb_channels; chs++) for (k = 0; k < run; k++) if ((j + k) < 128) q->sb_samples[chs][j + k][sb] = (int32_t)(f2i_scale * q->tone_level[chs][sb][((j + k)/2)] * tmp[k][chs] + .5); } else { for (k = 0; k < run; k++) if ((j + k) < 128) q->sb_samples[ch][j + k][sb] = (int32_t)(f2i_scale * q->tone_level[ch][sb][(j + k)/2] * samples[k] + .5); } j += run; } // j loop } // channel loop } // subband loop } /** * Init the first element of a channel in quantized_coeffs with data from packet 10 (quantized_coeffs[ch][0]). * This is similar to process_subpacket_9, but for a single channel and for element [0] * same VLC tables as process_subpacket_9 are used. * * @param q context * @param quantized_coeffs pointer to quantized_coeffs[ch][0] * @param gb bitreader context * @param length packet length in bits */ static void init_quantized_coeffs_elem0 (int8_t *quantized_coeffs, GetBitContext *gb, int length) { int i, k, run, level, diff; if (BITS_LEFT(length,gb) < 16) return; level = qdm2_get_vlc(gb, &vlc_tab_level, 0, 2); quantized_coeffs[0] = level; for (i = 0; i < 7; ) { if (BITS_LEFT(length,gb) < 16) break; run = qdm2_get_vlc(gb, &vlc_tab_run, 0, 1) + 1; if (BITS_LEFT(length,gb) < 16) break; diff = qdm2_get_se_vlc(&vlc_tab_diff, gb, 2); for (k = 1; k <= run; k++) quantized_coeffs[i + k] = (level + ((k * diff) / run)); level += diff; i += run; } } /** * Related to synthesis filter, process data from packet 10 * Init part of quantized_coeffs via function init_quantized_coeffs_elem0 * Init tone_level_idx_hi1, tone_level_idx_hi2, tone_level_idx_mid with data from packet 10 * * @param q context * @param gb bitreader context * @param length packet length in bits */ static void init_tone_level_dequantization (QDM2Context *q, GetBitContext *gb, int length) { int sb, j, k, n, ch; for (ch = 0; ch < q->nb_channels; ch++) { init_quantized_coeffs_elem0(q->quantized_coeffs[ch][0], gb, length); if (BITS_LEFT(length,gb) < 16) { memset(q->quantized_coeffs[ch][0], 0, 8); break; } } n = q->sub_sampling + 1; for (sb = 0; sb < n; sb++) for (ch = 0; ch < q->nb_channels; ch++) for (j = 0; j < 8; j++) { if (BITS_LEFT(length,gb) < 1) break; if (get_bits1(gb)) { for (k=0; k < 8; k++) { if (BITS_LEFT(length,gb) < 16) break; q->tone_level_idx_hi1[ch][sb][j][k] = qdm2_get_vlc(gb, &vlc_tab_tone_level_idx_hi1, 0, 2); } } else { for (k=0; k < 8; k++) q->tone_level_idx_hi1[ch][sb][j][k] = 0; } } n = QDM2_SB_USED(q->sub_sampling) - 4; for (sb = 0; sb < n; sb++) for (ch = 0; ch < q->nb_channels; ch++) { if (BITS_LEFT(length,gb) < 16) break; q->tone_level_idx_hi2[ch][sb] = qdm2_get_vlc(gb, &vlc_tab_tone_level_idx_hi2, 0, 2); if (sb > 19) q->tone_level_idx_hi2[ch][sb] -= 16; else for (j = 0; j < 8; j++) q->tone_level_idx_mid[ch][sb][j] = -16; } n = QDM2_SB_USED(q->sub_sampling) - 5; for (sb = 0; sb < n; sb++) for (ch = 0; ch < q->nb_channels; ch++) for (j = 0; j < 8; j++) { if (BITS_LEFT(length,gb) < 16) break; q->tone_level_idx_mid[ch][sb][j] = qdm2_get_vlc(gb, &vlc_tab_tone_level_idx_mid, 0, 2) - 32; } } /** * Process subpacket 9, init quantized_coeffs with data from it * * @param q context * @param node pointer to node with packet */ static void process_subpacket_9 (QDM2Context *q, QDM2SubPNode *node) { GetBitContext gb; int i, j, k, n, ch, run, level, diff; init_get_bits(&gb, node->packet->data, node->packet->size*8); n = coeff_per_sb_for_avg[q->coeff_per_sb_select][QDM2_SB_USED(q->sub_sampling) - 1] + 1; // same as averagesomething function for (i = 1; i < n; i++) for (ch=0; ch < q->nb_channels; ch++) { level = qdm2_get_vlc(&gb, &vlc_tab_level, 0, 2); q->quantized_coeffs[ch][i][0] = level; for (j = 0; j < (8 - 1); ) { run = qdm2_get_vlc(&gb, &vlc_tab_run, 0, 1) + 1; diff = qdm2_get_se_vlc(&vlc_tab_diff, &gb, 2); for (k = 1; k <= run; k++) q->quantized_coeffs[ch][i][j + k] = (level + ((k*diff) / run)); level += diff; j += run; } } for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < 8; i++) q->quantized_coeffs[ch][0][i] = 0; } /** * Process subpacket 10 if not null, else * * @param q context * @param node pointer to node with packet * @param length packet length in bits */ static void process_subpacket_10 (QDM2Context *q, QDM2SubPNode *node, int length) { GetBitContext gb; init_get_bits(&gb, ((node == NULL) ? empty_buffer : node->packet->data), ((node == NULL) ? 0 : node->packet->size*8)); if (length != 0) { init_tone_level_dequantization(q, &gb, length); fill_tone_level_array(q, 1); } else { fill_tone_level_array(q, 0); } } /** * Process subpacket 11 * * @param q context * @param node pointer to node with packet * @param length packet length in bit */ static void process_subpacket_11 (QDM2Context *q, QDM2SubPNode *node, int length) { GetBitContext gb; init_get_bits(&gb, ((node == NULL) ? empty_buffer : node->packet->data), ((node == NULL) ? 0 : node->packet->size*8)); if (length >= 32) { int c = get_bits (&gb, 13); if (c > 3) fill_coding_method_array (q->tone_level_idx, q->tone_level_idx_temp, q->coding_method, q->nb_channels, 8*c, q->superblocktype_2_3, q->cm_table_select); } synthfilt_build_sb_samples(q, &gb, length, 0, 8); } /** * Process subpacket 12 * * @param q context * @param node pointer to node with packet * @param length packet length in bits */ static void process_subpacket_12 (QDM2Context *q, QDM2SubPNode *node, int length) { GetBitContext gb; init_get_bits(&gb, ((node == NULL) ? empty_buffer : node->packet->data), ((node == NULL) ? 0 : node->packet->size*8)); synthfilt_build_sb_samples(q, &gb, length, 8, QDM2_SB_USED(q->sub_sampling)); } /* * Process new subpackets for synthesis filter * * @param q context * @param list list with synthesis filter packets (list D) */ static void process_synthesis_subpackets (QDM2Context *q, QDM2SubPNode *list) { QDM2SubPNode *nodes[4]; nodes[0] = qdm2_search_subpacket_type_in_list(list, 9); if (nodes[0] != NULL) process_subpacket_9(q, nodes[0]); nodes[1] = qdm2_search_subpacket_type_in_list(list, 10); if (nodes[1] != NULL) process_subpacket_10(q, nodes[1], nodes[1]->packet->size << 3); else process_subpacket_10(q, NULL, 0); nodes[2] = qdm2_search_subpacket_type_in_list(list, 11); if (nodes[0] != NULL && nodes[1] != NULL && nodes[2] != NULL) process_subpacket_11(q, nodes[2], (nodes[2]->packet->size << 3)); else process_subpacket_11(q, NULL, 0); nodes[3] = qdm2_search_subpacket_type_in_list(list, 12); if (nodes[0] != NULL && nodes[1] != NULL && nodes[3] != NULL) process_subpacket_12(q, nodes[3], (nodes[3]->packet->size << 3)); else process_subpacket_12(q, NULL, 0); } /* * Decode superblock, fill packet lists. * * @param q context */ static void qdm2_decode_super_block (QDM2Context *q) { GetBitContext gb; QDM2SubPacket header, *packet; int i, packet_bytes, sub_packet_size, sub_packets_D; unsigned int next_index = 0; memset(q->tone_level_idx_hi1, 0, sizeof(q->tone_level_idx_hi1)); memset(q->tone_level_idx_mid, 0, sizeof(q->tone_level_idx_mid)); memset(q->tone_level_idx_hi2, 0, sizeof(q->tone_level_idx_hi2)); q->sub_packets_B = 0; sub_packets_D = 0; average_quantized_coeffs(q); // average elements in quantized_coeffs[max_ch][10][8] init_get_bits(&gb, q->compressed_data, q->compressed_size*8); qdm2_decode_sub_packet_header(&gb, &header); if (header.type < 2 || header.type >= 8) { q->has_errors = 1; av_log(NULL,AV_LOG_ERROR,"bad superblock type\n"); return; } q->superblocktype_2_3 = (header.type == 2 || header.type == 3); packet_bytes = (q->compressed_size - get_bits_count(&gb) / 8); init_get_bits(&gb, header.data, header.size*8); if (header.type == 2 || header.type == 4 || header.type == 5) { int csum = 257 * get_bits(&gb, 8) + 2 * get_bits(&gb, 8); csum = qdm2_packet_checksum(q->compressed_data, q->checksum_size, csum); if (csum != 0) { q->has_errors = 1; av_log(NULL,AV_LOG_ERROR,"bad packet checksum\n"); return; } } q->sub_packet_list_B[0].packet = NULL; q->sub_packet_list_D[0].packet = NULL; for (i = 0; i < 6; i++) if (--q->fft_level_exp[i] < 0) q->fft_level_exp[i] = 0; for (i = 0; packet_bytes > 0; i++) { int j; q->sub_packet_list_A[i].next = NULL; if (i > 0) { q->sub_packet_list_A[i - 1].next = &q->sub_packet_list_A[i]; /* seek to next block */ init_get_bits(&gb, header.data, header.size*8); skip_bits(&gb, next_index*8); if (next_index >= header.size) break; } /* decode subpacket */ packet = &q->sub_packets[i]; qdm2_decode_sub_packet_header(&gb, packet); next_index = packet->size + get_bits_count(&gb) / 8; sub_packet_size = ((packet->size > 0xff) ? 1 : 0) + packet->size + 2; if (packet->type == 0) break; if (sub_packet_size > packet_bytes) { if (packet->type != 10 && packet->type != 11 && packet->type != 12) break; packet->size += packet_bytes - sub_packet_size; } packet_bytes -= sub_packet_size; /* add subpacket to 'all subpackets' list */ q->sub_packet_list_A[i].packet = packet; /* add subpacket to related list */ if (packet->type == 8) { SAMPLES_NEEDED_2("packet type 8"); return; } else if (packet->type >= 9 && packet->type <= 12) { /* packets for MPEG Audio like Synthesis Filter */ QDM2_LIST_ADD(q->sub_packet_list_D, sub_packets_D, packet); } else if (packet->type == 13) { for (j = 0; j < 6; j++) q->fft_level_exp[j] = get_bits(&gb, 6); } else if (packet->type == 14) { for (j = 0; j < 6; j++) q->fft_level_exp[j] = qdm2_get_vlc(&gb, &fft_level_exp_vlc, 0, 2); } else if (packet->type == 15) { SAMPLES_NEEDED_2("packet type 15") return; } else if (packet->type >= 16 && packet->type < 48 && !fft_subpackets[packet->type - 16]) { /* packets for FFT */ QDM2_LIST_ADD(q->sub_packet_list_B, q->sub_packets_B, packet); } } // Packet bytes loop /* **************************************************************** */ if (q->sub_packet_list_D[0].packet != NULL) { process_synthesis_subpackets(q, q->sub_packet_list_D); q->do_synth_filter = 1; } else if (q->do_synth_filter) { process_subpacket_10(q, NULL, 0); process_subpacket_11(q, NULL, 0); process_subpacket_12(q, NULL, 0); } /* **************************************************************** */ } static void qdm2_fft_init_coefficient (QDM2Context *q, int sub_packet, int offset, int duration, int channel, int exp, int phase) { if (q->fft_coefs_min_index[duration] < 0) q->fft_coefs_min_index[duration] = q->fft_coefs_index; q->fft_coefs[q->fft_coefs_index].sub_packet = ((sub_packet >= 16) ? (sub_packet - 16) : sub_packet); q->fft_coefs[q->fft_coefs_index].channel = channel; q->fft_coefs[q->fft_coefs_index].offset = offset; q->fft_coefs[q->fft_coefs_index].exp = exp; q->fft_coefs[q->fft_coefs_index].phase = phase; q->fft_coefs_index++; } static void qdm2_fft_decode_tones (QDM2Context *q, int duration, GetBitContext *gb, int b) { int channel, stereo, phase, exp; int local_int_4, local_int_8, stereo_phase, local_int_10; int local_int_14, stereo_exp, local_int_20, local_int_28; int n, offset; local_int_4 = 0; local_int_28 = 0; local_int_20 = 2; local_int_8 = (4 - duration); local_int_10 = 1 << (q->group_order - duration - 1); offset = 1; while (1) { if (q->superblocktype_2_3) { while ((n = qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2)) < 2) { offset = 1; if (n == 0) { local_int_4 += local_int_10; local_int_28 += (1 << local_int_8); } else { local_int_4 += 8*local_int_10; local_int_28 += (8 << local_int_8); } } offset += (n - 2); } else { offset += qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2); while (offset >= (local_int_10 - 1)) { offset += (1 - (local_int_10 - 1)); local_int_4 += local_int_10; local_int_28 += (1 << local_int_8); } } if (local_int_4 >= q->group_size) return; local_int_14 = (offset >> local_int_8); if (q->nb_channels > 1) { channel = get_bits1(gb); stereo = get_bits1(gb); } else { channel = 0; stereo = 0; } exp = qdm2_get_vlc(gb, (b ? &fft_level_exp_vlc : &fft_level_exp_alt_vlc), 0, 2); exp += q->fft_level_exp[fft_level_index_table[local_int_14]]; exp = (exp < 0) ? 0 : exp; phase = get_bits(gb, 3); stereo_exp = 0; stereo_phase = 0; if (stereo) { stereo_exp = (exp - qdm2_get_vlc(gb, &fft_stereo_exp_vlc, 0, 1)); stereo_phase = (phase - qdm2_get_vlc(gb, &fft_stereo_phase_vlc, 0, 1)); if (stereo_phase < 0) stereo_phase += 8; } if (q->frequency_range > (local_int_14 + 1)) { int sub_packet = (local_int_20 + local_int_28); qdm2_fft_init_coefficient(q, sub_packet, offset, duration, channel, exp, phase); if (stereo) qdm2_fft_init_coefficient(q, sub_packet, offset, duration, (1 - channel), stereo_exp, stereo_phase); } offset++; } } static void qdm2_decode_fft_packets (QDM2Context *q) { int i, j, min, max, value, type, unknown_flag; GetBitContext gb; if (q->sub_packet_list_B[0].packet == NULL) return; /* reset minimum indexes for FFT coefficients */ q->fft_coefs_index = 0; for (i=0; i < 5; i++) q->fft_coefs_min_index[i] = -1; /* process subpackets ordered by type, largest type first */ for (i = 0, max = 256; i < q->sub_packets_B; i++) { QDM2SubPacket *packet= NULL; /* find subpacket with largest type less than max */ for (j = 0, min = 0; j < q->sub_packets_B; j++) { value = q->sub_packet_list_B[j].packet->type; if (value > min && value < max) { min = value; packet = q->sub_packet_list_B[j].packet; } } max = min; /* check for errors (?) */ if (!packet) return; if (i == 0 && (packet->type < 16 || packet->type >= 48 || fft_subpackets[packet->type - 16])) return; /* decode FFT tones */ init_get_bits (&gb, packet->data, packet->size*8); if (packet->type >= 32 && packet->type < 48 && !fft_subpackets[packet->type - 16]) unknown_flag = 1; else unknown_flag = 0; type = packet->type; if ((type >= 17 && type < 24) || (type >= 33 && type < 40)) { int duration = q->sub_sampling + 5 - (type & 15); if (duration >= 0 && duration < 4) qdm2_fft_decode_tones(q, duration, &gb, unknown_flag); } else if (type == 31) { for (j=0; j < 4; j++) qdm2_fft_decode_tones(q, j, &gb, unknown_flag); } else if (type == 46) { for (j=0; j < 6; j++) q->fft_level_exp[j] = get_bits(&gb, 6); for (j=0; j < 4; j++) qdm2_fft_decode_tones(q, j, &gb, unknown_flag); } } // Loop on B packets /* calculate maximum indexes for FFT coefficients */ for (i = 0, j = -1; i < 5; i++) if (q->fft_coefs_min_index[i] >= 0) { if (j >= 0) q->fft_coefs_max_index[j] = q->fft_coefs_min_index[i]; j = i; } if (j >= 0) q->fft_coefs_max_index[j] = q->fft_coefs_index; } static void qdm2_fft_generate_tone (QDM2Context *q, FFTTone *tone) { float level, f[6]; int i; QDM2Complex c; const double iscale = 2.0*M_PI / 512.0; tone->phase += tone->phase_shift; /* calculate current level (maximum amplitude) of tone */ level = fft_tone_envelope_table[tone->duration][tone->time_index] * tone->level; c.im = level * sin(tone->phase*iscale); c.re = level * cos(tone->phase*iscale); /* generate FFT coefficients for tone */ if (tone->duration >= 3 || tone->cutoff >= 3) { tone->complex[0].im += c.im; tone->complex[0].re += c.re; tone->complex[1].im -= c.im; tone->complex[1].re -= c.re; } else { f[1] = -tone->table[4]; f[0] = tone->table[3] - tone->table[0]; f[2] = 1.0 - tone->table[2] - tone->table[3]; f[3] = tone->table[1] + tone->table[4] - 1.0; f[4] = tone->table[0] - tone->table[1]; f[5] = tone->table[2]; for (i = 0; i < 2; i++) { tone->complex[fft_cutoff_index_table[tone->cutoff][i]].re += c.re * f[i]; tone->complex[fft_cutoff_index_table[tone->cutoff][i]].im += c.im *((tone->cutoff <= i) ? -f[i] : f[i]); } for (i = 0; i < 4; i++) { tone->complex[i].re += c.re * f[i+2]; tone->complex[i].im += c.im * f[i+2]; } } /* copy the tone if it has not yet died out */ if (++tone->time_index < ((1 << (5 - tone->duration)) - 1)) { memcpy(&q->fft_tones[q->fft_tone_end], tone, sizeof(FFTTone)); q->fft_tone_end = (q->fft_tone_end + 1) % 1000; } } static void qdm2_fft_tone_synthesizer (QDM2Context *q, int sub_packet) { int i, j, ch; const double iscale = 0.25 * M_PI; for (ch = 0; ch < q->channels; ch++) { memset(q->fft.complex[ch], 0, q->fft_size * sizeof(QDM2Complex)); } /* apply FFT tones with duration 4 (1 FFT period) */ if (q->fft_coefs_min_index[4] >= 0) for (i = q->fft_coefs_min_index[4]; i < q->fft_coefs_max_index[4]; i++) { float level; QDM2Complex c; if (q->fft_coefs[i].sub_packet != sub_packet) break; ch = (q->channels == 1) ? 0 : q->fft_coefs[i].channel; level = (q->fft_coefs[i].exp < 0) ? 0.0 : fft_tone_level_table[q->superblocktype_2_3 ? 0 : 1][q->fft_coefs[i].exp & 63]; c.re = level * cos(q->fft_coefs[i].phase * iscale); c.im = level * sin(q->fft_coefs[i].phase * iscale); q->fft.complex[ch][q->fft_coefs[i].offset + 0].re += c.re; q->fft.complex[ch][q->fft_coefs[i].offset + 0].im += c.im; q->fft.complex[ch][q->fft_coefs[i].offset + 1].re -= c.re; q->fft.complex[ch][q->fft_coefs[i].offset + 1].im -= c.im; } /* generate existing FFT tones */ for (i = q->fft_tone_end; i != q->fft_tone_start; ) { qdm2_fft_generate_tone(q, &q->fft_tones[q->fft_tone_start]); q->fft_tone_start = (q->fft_tone_start + 1) % 1000; } /* create and generate new FFT tones with duration 0 (long) to 3 (short) */ for (i = 0; i < 4; i++) if (q->fft_coefs_min_index[i] >= 0) { for (j = q->fft_coefs_min_index[i]; j < q->fft_coefs_max_index[i]; j++) { int offset, four_i; FFTTone tone; if (q->fft_coefs[j].sub_packet != sub_packet) break; four_i = (4 - i); offset = q->fft_coefs[j].offset >> four_i; ch = (q->channels == 1) ? 0 : q->fft_coefs[j].channel; if (offset < q->frequency_range) { if (offset < 2) tone.cutoff = offset; else tone.cutoff = (offset >= 60) ? 3 : 2; tone.level = (q->fft_coefs[j].exp < 0) ? 0.0 : fft_tone_level_table[q->superblocktype_2_3 ? 0 : 1][q->fft_coefs[j].exp & 63]; tone.complex = &q->fft.complex[ch][offset]; tone.table = fft_tone_sample_table[i][q->fft_coefs[j].offset - (offset << four_i)]; tone.phase = 64 * q->fft_coefs[j].phase - (offset << 8) - 128; tone.phase_shift = (2 * q->fft_coefs[j].offset + 1) << (7 - four_i); tone.duration = i; tone.time_index = 0; qdm2_fft_generate_tone(q, &tone); } } q->fft_coefs_min_index[i] = j; } } static void qdm2_calculate_fft (QDM2Context *q, int channel, int sub_packet) { const float gain = (q->channels == 1 && q->nb_channels == 2) ? 0.5f : 1.0f; int i; q->fft.complex[channel][0].re *= 2.0f; q->fft.complex[channel][0].im = 0.0f; ff_rdft_calc(&q->rdft_ctx, (FFTSample *)q->fft.complex[channel]); /* add samples to output buffer */ for (i = 0; i < ((q->fft_frame_size + 15) & ~15); i++) q->output_buffer[q->channels * i + channel] += ((float *) q->fft.complex[channel])[i] * gain; } /** * @param q context * @param index subpacket number */ static void qdm2_synthesis_filter (QDM2Context *q, int index) { OUT_INT samples[MPA_MAX_CHANNELS * MPA_FRAME_SIZE]; int i, k, ch, sb_used, sub_sampling, dither_state = 0; /* copy sb_samples */ sb_used = QDM2_SB_USED(q->sub_sampling); for (ch = 0; ch < q->channels; ch++) for (i = 0; i < 8; i++) for (k=sb_used; k < SBLIMIT; k++) q->sb_samples[ch][(8 * index) + i][k] = 0; for (ch = 0; ch < q->nb_channels; ch++) { OUT_INT *samples_ptr = samples + ch; for (i = 0; i < 8; i++) { ff_mpa_synth_filter(q->synth_buf[ch], &(q->synth_buf_offset[ch]), ff_mpa_synth_window, &dither_state, samples_ptr, q->nb_channels, q->sb_samples[ch][(8 * index) + i]); samples_ptr += 32 * q->nb_channels; } } /* add samples to output buffer */ sub_sampling = (4 >> q->sub_sampling); for (ch = 0; ch < q->channels; ch++) for (i = 0; i < q->frame_size; i++) q->output_buffer[q->channels * i + ch] += (float)(samples[q->nb_channels * sub_sampling * i + ch] >> (sizeof(OUT_INT)*8-16)); } /** * Init static data (does not depend on specific file) * * @param q context */ static av_cold void qdm2_init(QDM2Context *q) { static int initialized = 0; if (initialized != 0) return; initialized = 1; qdm2_init_vlc(); ff_mpa_synth_init(ff_mpa_synth_window); softclip_table_init(); rnd_table_init(); init_noise_samples(); av_log(NULL, AV_LOG_DEBUG, "init done\n"); } #if 0 static void dump_context(QDM2Context *q) { int i; #define PRINT(a,b) av_log(NULL,AV_LOG_DEBUG," %s = %d\n", a, b); PRINT("compressed_data",q->compressed_data); PRINT("compressed_size",q->compressed_size); PRINT("frame_size",q->frame_size); PRINT("checksum_size",q->checksum_size); PRINT("channels",q->channels); PRINT("nb_channels",q->nb_channels); PRINT("fft_frame_size",q->fft_frame_size); PRINT("fft_size",q->fft_size); PRINT("sub_sampling",q->sub_sampling); PRINT("fft_order",q->fft_order); PRINT("group_order",q->group_order); PRINT("group_size",q->group_size); PRINT("sub_packet",q->sub_packet); PRINT("frequency_range",q->frequency_range); PRINT("has_errors",q->has_errors); PRINT("fft_tone_end",q->fft_tone_end); PRINT("fft_tone_start",q->fft_tone_start); PRINT("fft_coefs_index",q->fft_coefs_index); PRINT("coeff_per_sb_select",q->coeff_per_sb_select); PRINT("cm_table_select",q->cm_table_select); PRINT("noise_idx",q->noise_idx); for (i = q->fft_tone_start; i < q->fft_tone_end; i++) { FFTTone *t = &q->fft_tones[i]; av_log(NULL,AV_LOG_DEBUG,"Tone (%d) dump:\n", i); av_log(NULL,AV_LOG_DEBUG," level = %f\n", t->level); // PRINT(" level", t->level); PRINT(" phase", t->phase); PRINT(" phase_shift", t->phase_shift); PRINT(" duration", t->duration); PRINT(" samples_im", t->samples_im); PRINT(" samples_re", t->samples_re); PRINT(" table", t->table); } } #endif /** * Init parameters from codec extradata */ static av_cold int qdm2_decode_init(AVCodecContext *avctx) { QDM2Context *s = avctx->priv_data; uint8_t *extradata; int extradata_size; int tmp_val, tmp, size; /* extradata parsing Structure: wave { frma (QDM2) QDCA QDCP } 32 size (including this field) 32 tag (=frma) 32 type (=QDM2 or QDMC) 32 size (including this field, in bytes) 32 tag (=QDCA) // maybe mandatory parameters 32 unknown (=1) 32 channels (=2) 32 samplerate (=44100) 32 bitrate (=96000) 32 block size (=4096) 32 frame size (=256) (for one channel) 32 packet size (=1300) 32 size (including this field, in bytes) 32 tag (=QDCP) // maybe some tuneable parameters 32 float1 (=1.0) 32 zero ? 32 float2 (=1.0) 32 float3 (=1.0) 32 unknown (27) 32 unknown (8) 32 zero ? */ if (!avctx->extradata || (avctx->extradata_size < 48)) { av_log(avctx, AV_LOG_ERROR, "extradata missing or truncated\n"); return -1; } extradata = avctx->extradata; extradata_size = avctx->extradata_size; while (extradata_size > 7) { if (!memcmp(extradata, "frmaQDM", 7)) break; extradata++; extradata_size--; } if (extradata_size < 12) { av_log(avctx, AV_LOG_ERROR, "not enough extradata (%i)\n", extradata_size); return -1; } if (memcmp(extradata, "frmaQDM", 7)) { av_log(avctx, AV_LOG_ERROR, "invalid headers, QDM? not found\n"); return -1; } if (extradata[7] == 'C') { // s->is_qdmc = 1; av_log(avctx, AV_LOG_ERROR, "stream is QDMC version 1, which is not supported\n"); return -1; } extradata += 8; extradata_size -= 8; size = AV_RB32(extradata); if(size > extradata_size){ av_log(avctx, AV_LOG_ERROR, "extradata size too small, %i < %i\n", extradata_size, size); return -1; } extradata += 4; av_log(avctx, AV_LOG_DEBUG, "size: %d\n", size); if (AV_RB32(extradata) != MKBETAG('Q','D','C','A')) { av_log(avctx, AV_LOG_ERROR, "invalid extradata, expecting QDCA\n"); return -1; } extradata += 8; avctx->channels = s->nb_channels = s->channels = AV_RB32(extradata); extradata += 4; avctx->sample_rate = AV_RB32(extradata); extradata += 4; avctx->bit_rate = AV_RB32(extradata); extradata += 4; s->group_size = AV_RB32(extradata); extradata += 4; s->fft_size = AV_RB32(extradata); extradata += 4; s->checksum_size = AV_RB32(extradata); s->fft_order = av_log2(s->fft_size) + 1; s->fft_frame_size = 2 * s->fft_size; // complex has two floats // something like max decodable tones s->group_order = av_log2(s->group_size) + 1; s->frame_size = s->group_size / 16; // 16 iterations per super block s->sub_sampling = s->fft_order - 7; s->frequency_range = 255 / (1 << (2 - s->sub_sampling)); switch ((s->sub_sampling * 2 + s->channels - 1)) { case 0: tmp = 40; break; case 1: tmp = 48; break; case 2: tmp = 56; break; case 3: tmp = 72; break; case 4: tmp = 80; break; case 5: tmp = 100;break; default: tmp=s->sub_sampling; break; } tmp_val = 0; if ((tmp * 1000) < avctx->bit_rate) tmp_val = 1; if ((tmp * 1440) < avctx->bit_rate) tmp_val = 2; if ((tmp * 1760) < avctx->bit_rate) tmp_val = 3; if ((tmp * 2240) < avctx->bit_rate) tmp_val = 4; s->cm_table_select = tmp_val; if (s->sub_sampling == 0) tmp = 7999; else tmp = ((-(s->sub_sampling -1)) & 8000) + 20000; /* 0: 7999 -> 0 1: 20000 -> 2 2: 28000 -> 2 */ if (tmp < 8000) s->coeff_per_sb_select = 0; else if (tmp <= 16000) s->coeff_per_sb_select = 1; else s->coeff_per_sb_select = 2; // Fail on unknown fft order if ((s->fft_order < 7) || (s->fft_order > 9)) { av_log(avctx, AV_LOG_ERROR, "Unknown FFT order (%d), contact the developers!\n", s->fft_order); return -1; } ff_rdft_init(&s->rdft_ctx, s->fft_order, IDFT_C2R); qdm2_init(s); avctx->sample_fmt = SAMPLE_FMT_S16; // dump_context(s); return 0; } static av_cold int qdm2_decode_close(AVCodecContext *avctx) { QDM2Context *s = avctx->priv_data; ff_rdft_end(&s->rdft_ctx); return 0; } static void qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out) { int ch, i; const int frame_size = (q->frame_size * q->channels); /* select input buffer */ q->compressed_data = in; q->compressed_size = q->checksum_size; // dump_context(q); /* copy old block, clear new block of output samples */ memmove(q->output_buffer, &q->output_buffer[frame_size], frame_size * sizeof(float)); memset(&q->output_buffer[frame_size], 0, frame_size * sizeof(float)); /* decode block of QDM2 compressed data */ if (q->sub_packet == 0) { q->has_errors = 0; // zero it for a new super block av_log(NULL,AV_LOG_DEBUG,"Superblock follows\n"); qdm2_decode_super_block(q); } /* parse subpackets */ if (!q->has_errors) { if (q->sub_packet == 2) qdm2_decode_fft_packets(q); qdm2_fft_tone_synthesizer(q, q->sub_packet); } /* sound synthesis stage 1 (FFT) */ for (ch = 0; ch < q->channels; ch++) { qdm2_calculate_fft(q, ch, q->sub_packet); if (!q->has_errors && q->sub_packet_list_C[0].packet != NULL) { SAMPLES_NEEDED_2("has errors, and C list is not empty") return; } } /* sound synthesis stage 2 (MPEG audio like synthesis filter) */ if (!q->has_errors && q->do_synth_filter) qdm2_synthesis_filter(q, q->sub_packet); q->sub_packet = (q->sub_packet + 1) % 16; /* clip and convert output float[] to 16bit signed samples */ for (i = 0; i < frame_size; i++) { int value = (int)q->output_buffer[i]; if (value > SOFTCLIP_THRESHOLD) value = (value > HARDCLIP_THRESHOLD) ? 32767 : softclip_table[ value - SOFTCLIP_THRESHOLD]; else if (value < -SOFTCLIP_THRESHOLD) value = (value < -HARDCLIP_THRESHOLD) ? -32767 : -softclip_table[-value - SOFTCLIP_THRESHOLD]; out[i] = value; } } static int qdm2_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; QDM2Context *s = avctx->priv_data; if(!buf) return 0; if(buf_size < s->checksum_size) return -1; *data_size = s->channels * s->frame_size * sizeof(int16_t); av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n", buf_size, buf, s->checksum_size, data, *data_size); qdm2_decode(s, buf, data); // reading only when next superblock found if (s->sub_packet == 0) { return s->checksum_size; } return 0; } AVCodec qdm2_decoder = { .name = "qdm2", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_QDM2, .priv_data_size = sizeof(QDM2Context), .init = qdm2_decode_init, .close = qdm2_decode_close, .decode = qdm2_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"), };
123linslouis-android-video-cutter
jni/libavcodec/qdm2.c
C
asf20
68,619
/* * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "mpegaudio.h" static int mp3_header_compress(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe){ uint32_t header, extraheader; int mode_extension, header_size; if(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){ av_log(avctx, AV_LOG_ERROR, "not standards compliant\n"); return -1; } header = AV_RB32(buf); mode_extension= (header>>4)&3; if(ff_mpa_check_header(header) < 0 || (header&0x60000) != 0x20000){ output_unchanged: *poutbuf= (uint8_t *) buf; *poutbuf_size= buf_size; av_log(avctx, AV_LOG_INFO, "cannot compress %08X\n", header); return 0; } if(avctx->extradata_size == 0){ avctx->extradata_size=15; avctx->extradata= av_malloc(avctx->extradata_size); strcpy(avctx->extradata, "FFCMP3 0.0"); memcpy(avctx->extradata+11, buf, 4); } if(avctx->extradata_size != 15){ av_log(avctx, AV_LOG_ERROR, "Extradata invalid\n"); return -1; } extraheader = AV_RB32(avctx->extradata+11); if((extraheader&MP3_MASK) != (header&MP3_MASK)) goto output_unchanged; header_size= (header&0x10000) ? 4 : 6; *poutbuf_size= buf_size - header_size; *poutbuf= av_malloc(buf_size - header_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(*poutbuf, buf + header_size, buf_size - header_size + FF_INPUT_BUFFER_PADDING_SIZE); if(avctx->channels==2){ if((header & (3<<19)) != 3<<19){ (*poutbuf)[1] &= 0x3F; (*poutbuf)[1] |= mode_extension<<6; FFSWAP(int, (*poutbuf)[1], (*poutbuf)[2]); }else{ (*poutbuf)[1] &= 0x8F; (*poutbuf)[1] |= mode_extension<<4; } } return 1; } AVBitStreamFilter mp3_header_compress_bsf={ "mp3comp", 0, mp3_header_compress, };
123linslouis-android-video-cutter
jni/libavcodec/mp3_header_compress_bsf.c
C
asf20
2,835
/* * S3 Texture Compression (S3TC) decoding functions * Copyright (c) 2007 by Ivo van Poorten * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_S3TC_H #define AVCODEC_S3TC_H #include <stdint.h> #define FF_S3TC_DXT1 0x31545844 #define FF_S3TC_DXT3 0x33545844 /** * Decode DXT1 encoded data to RGB32 * @param *src source buffer, has to be aligned on a 4-byte boundary * @param *dst destination buffer * @param w width of output image * @param h height of output image * @param stride line size of output image */ void ff_decode_dxt1(const uint8_t *src, uint8_t *dst, const unsigned int w, const unsigned int h, const unsigned int stride); /** * Decode DXT3 encoded data to RGB32 * @param *src source buffer, has to be aligned on a 4-byte boundary * @param *dst destination buffer * @param w width of output image * @param h height of output image * @param stride line size of output image */ void ff_decode_dxt3(const uint8_t *src, uint8_t *dst, const unsigned int w, const unsigned int h, const unsigned int stride); #endif /* AVCODEC_S3TC_H */
123linslouis-android-video-cutter
jni/libavcodec/s3tc.h
C
asf20
1,880
/* * G.729 decoder * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_G729_H #define AVCODEC_G729_H /** * subframe size */ #define SUBFRAME_SIZE 40 #endif // AVCODEC_G729_H
123linslouis-android-video-cutter
jni/libavcodec/g729.h
C
asf20
948
/* * IFF PBM/ILBM bitmap decoder * Copyright (c) 2010 Peter Ross <pross@xvid.org> * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * IFF PBM/ILBM bitmap decoder */ #include "bytestream.h" #include "avcodec.h" #include "get_bits.h" #include "iff.h" typedef struct { AVFrame frame; int planesize; uint8_t * planebuf; } IffContext; /** * Convert CMAP buffer (stored in extradata) to lavc palette format */ int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal) { int count, i; if (avctx->bits_per_coded_sample > 8) { av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n"); return AVERROR_INVALIDDATA; } count = 1 << avctx->bits_per_coded_sample; if (avctx->extradata_size < count * 3) { av_log(avctx, AV_LOG_ERROR, "palette data underflow\n"); return AVERROR_INVALIDDATA; } for (i=0; i < count; i++) { pal[i] = 0xFF000000 | AV_RB24( avctx->extradata + i*3 ); } return 0; } static av_cold int decode_init(AVCodecContext *avctx) { IffContext *s = avctx->priv_data; int err; if (avctx->bits_per_coded_sample <= 8) { avctx->pix_fmt = PIX_FMT_PAL8; } else if (avctx->bits_per_coded_sample <= 32) { avctx->pix_fmt = PIX_FMT_BGR32; } else { return AVERROR_INVALIDDATA; } s->planesize = avctx->width >> 3; s->planebuf = av_malloc(s->planesize + FF_INPUT_BUFFER_PADDING_SIZE); if (!s->planebuf) return AVERROR(ENOMEM); s->frame.reference = 1; if ((err = avctx->get_buffer(avctx, &s->frame) < 0)) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return err; } return avctx->bits_per_coded_sample <= 8 ? ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1]) : 0; } /** * Decode interleaved plane buffer up to 8bpp * @param dst Destination buffer * @param buf Source buffer * @param buf_size * @param bps bits_per_coded_sample (must be <= 8) * @param plane plane number to decode as */ static void decodeplane8(uint8_t *dst, const uint8_t *const buf, int buf_size, int bps, int plane) { GetBitContext gb; int i; const int b = (buf_size * 8) + bps - 1; init_get_bits(&gb, buf, buf_size * 8); for(i = 0; i < b; i++) { dst[i] |= get_bits1(&gb) << plane; } } /** * Decode interleaved plane buffer up to 24bpp * @param dst Destination buffer * @param buf Source buffer * @param buf_size * @param bps bits_per_coded_sample * @param plane plane number to decode as */ static void decodeplane32(uint32_t *dst, const uint8_t *const buf, int buf_size, int bps, int plane) { GetBitContext gb; int i; const int b = (buf_size * 8) + bps - 1; init_get_bits(&gb, buf, buf_size * 8); for(i = 0; i < b; i++) { dst[i] |= get_bits1(&gb) << plane; } } static int decode_frame_ilbm(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { IffContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; int y, plane; if (avctx->reget_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if (avctx->pix_fmt == PIX_FMT_PAL8) { for(y = 0; y < avctx->height; y++ ) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; memset(row, 0, avctx->width); for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) { decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), avctx->bits_per_coded_sample, plane); buf += s->planesize; } } } else { // PIX_FMT_BGR32 for(y = 0; y < avctx->height; y++ ) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; memset(row, 0, avctx->width << 2); for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) { decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), avctx->bits_per_coded_sample, plane); buf += s->planesize; } } } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; } static int decode_frame_byterun1(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { IffContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; int y, plane, x; if (avctx->reget_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved if (avctx->pix_fmt == PIX_FMT_PAL8) { for(y = 0; y < avctx->height ; y++ ) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; memset(row, 0, avctx->width); for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) { for(x = 0; x < s->planesize && buf < buf_end; ) { int8_t value = *buf++; unsigned length; if (value >= 0) { length = value + 1; memcpy(s->planebuf + x, buf, FFMIN3(length, s->planesize - x, buf_end - buf)); buf += length; } else if (value > -128) { length = -value + 1; memset(s->planebuf + x, *buf++, FFMIN(length, s->planesize - x)); } else { //noop continue; } x += length; } decodeplane8(row, s->planebuf, s->planesize, avctx->bits_per_coded_sample, plane); } } } else { //PIX_FMT_BGR32 for(y = 0; y < avctx->height ; y++ ) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; memset(row, 0, avctx->width << 2); for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) { for(x = 0; x < s->planesize && buf < buf_end; ) { int8_t value = *buf++; unsigned length; if (value >= 0) { length = value + 1; memcpy(s->planebuf + x, buf, FFMIN3(length, s->planesize - x, buf_end - buf)); buf += length; } else if (value > -128) { length = -value + 1; memset(s->planebuf + x, *buf++, FFMIN(length, s->planesize - x)); } else { // noop continue; } x += length; } decodeplane32((uint32_t *) row, s->planebuf, s->planesize, avctx->bits_per_coded_sample, plane); } } } } else { for(y = 0; y < avctx->height ; y++ ) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; for(x = 0; x < avctx->width && buf < buf_end; ) { int8_t value = *buf++; unsigned length; if (value >= 0) { length = value + 1; memcpy(row + x, buf, FFMIN3(length, buf_end - buf, avctx->width - x)); buf += length; } else if (value > -128) { length = -value + 1; memset(row + x, *buf++, FFMIN(length, avctx->width - x)); } else { //noop continue; } x += length; } } } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; } static av_cold int decode_end(AVCodecContext *avctx) { IffContext *s = avctx->priv_data; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); av_freep(&s->planebuf); return 0; } AVCodec iff_ilbm_decoder = { "iff_ilbm", AVMEDIA_TYPE_VIDEO, CODEC_ID_IFF_ILBM, sizeof(IffContext), decode_init, NULL, decode_end, decode_frame_ilbm, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("IFF ILBM"), }; AVCodec iff_byterun1_decoder = { "iff_byterun1", AVMEDIA_TYPE_VIDEO, CODEC_ID_IFF_BYTERUN1, sizeof(IffContext), decode_init, NULL, decode_end, decode_frame_byterun1, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"), };
123linslouis-android-video-cutter
jni/libavcodec/iff.c
C
asf20
9,680
/* * AAC encoder * Copyright (C) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AACENC_H #define AVCODEC_AACENC_H #include "avcodec.h" #include "put_bits.h" #include "dsputil.h" #include "aac.h" #include "psymodel.h" struct AACEncContext; typedef struct AACCoefficientsEncoder { void (*search_for_quantizers)(AVCodecContext *avctx, struct AACEncContext *s, SingleChannelElement *sce, const float lambda); void (*encode_window_bands_info)(struct AACEncContext *s, SingleChannelElement *sce, int win, int group_len, const float lambda); void (*quantize_and_encode_band)(struct AACEncContext *s, PutBitContext *pb, const float *in, int size, int scale_idx, int cb, const float lambda); void (*search_for_ms)(struct AACEncContext *s, ChannelElement *cpe, const float lambda); } AACCoefficientsEncoder; extern AACCoefficientsEncoder ff_aac_coders[]; /** * AAC encoder context */ typedef struct AACEncContext { PutBitContext pb; FFTContext mdct1024; ///< long (1024 samples) frame transform context FFTContext mdct128; ///< short (128 samples) frame transform context DSPContext dsp; DECLARE_ALIGNED(16, FFTSample, output)[2048]; ///< temporary buffer for MDCT input coefficients int16_t* samples; ///< saved preprocessed input int samplerate_index; ///< MPEG-4 samplerate index ChannelElement *cpe; ///< channel elements FFPsyContext psy; struct FFPsyPreprocessContext* psypp; AACCoefficientsEncoder *coder; int cur_channel; int last_frame; float lambda; DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients DECLARE_ALIGNED(16, float, scoefs)[1024]; ///< scaled coefficients } AACEncContext; #endif /* AVCODEC_AACENC_H */
123linslouis-android-video-cutter
jni/libavcodec/aacenc.h
C
asf20
2,724
/* * PNG image format * Copyright (c) 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PNG_H #define AVCODEC_PNG_H #include <stdint.h> #define PNG_COLOR_MASK_PALETTE 1 #define PNG_COLOR_MASK_COLOR 2 #define PNG_COLOR_MASK_ALPHA 4 #define PNG_COLOR_TYPE_GRAY 0 #define PNG_COLOR_TYPE_PALETTE (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE) #define PNG_COLOR_TYPE_RGB (PNG_COLOR_MASK_COLOR) #define PNG_COLOR_TYPE_RGB_ALPHA (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_ALPHA) #define PNG_COLOR_TYPE_GRAY_ALPHA (PNG_COLOR_MASK_ALPHA) #define PNG_FILTER_TYPE_LOCO 64 #define PNG_FILTER_VALUE_NONE 0 #define PNG_FILTER_VALUE_SUB 1 #define PNG_FILTER_VALUE_UP 2 #define PNG_FILTER_VALUE_AVG 3 #define PNG_FILTER_VALUE_PAETH 4 #define PNG_FILTER_VALUE_MIXED 5 #define PNG_IHDR 0x0001 #define PNG_IDAT 0x0002 #define PNG_ALLIMAGE 0x0004 #define PNG_PLTE 0x0008 #define NB_PASSES 7 extern const uint8_t ff_pngsig[8]; extern const uint8_t ff_mngsig[8]; /* Mask to determine which y pixels are valid in a pass */ extern const uint8_t ff_png_pass_ymask[NB_PASSES]; /* minimum x value */ extern const uint8_t ff_png_pass_xmin[NB_PASSES]; /* x shift to get row width */ extern const uint8_t ff_png_pass_xshift[NB_PASSES]; /* Mask to determine which pixels are valid in a pass */ extern const uint8_t ff_png_pass_mask[NB_PASSES]; void *ff_png_zalloc(void *opaque, unsigned int items, unsigned int size); void ff_png_zfree(void *opaque, void *ptr); int ff_png_get_nb_channels(int color_type); /* compute the row size of an interleaved pass */ int ff_png_pass_row_size(int pass, int bits_per_pixel, int width); void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp); #endif /* AVCODEC_PNG_H */
123linslouis-android-video-cutter
jni/libavcodec/png.h
C
asf20
2,526
/* * MJPEG decoder * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2003 Alex Beregszaszi * Copyright (c) 2003-2004 Michael Niedermayer * * Support for external huffman table, various fixes (AVID workaround), * aspecting, new decode_frame mechanism and apple mjpeg-b support * by Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MJPEG decoder. */ //#define DEBUG #include <assert.h> #include "avcodec.h" #include "dsputil.h" #include "mjpeg.h" #include "mjpegdec.h" #include "jpeglsdec.h" static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac) { uint8_t huff_size[256+16]; uint16_t huff_code[256+16]; assert(nb_codes <= 256); memset(huff_size, 0, sizeof(huff_size)); ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table); if(is_ac){ memmove(huff_size+16, huff_size, sizeof(uint8_t)*nb_codes); memmove(huff_code+16, huff_code, sizeof(uint16_t)*nb_codes); memset(huff_size, 0, sizeof(uint8_t)*16); memset(huff_code, 0, sizeof(uint16_t)*16); nb_codes += 16; } return init_vlc(vlc, 9, nb_codes, huff_size, 1, 1, huff_code, 2, 2, use_static); } static void build_basic_mjpeg_vlc(MJpegDecodeContext * s) { build_vlc(&s->vlcs[0][0], ff_mjpeg_bits_dc_luminance, ff_mjpeg_val_dc, 12, 0, 0); build_vlc(&s->vlcs[0][1], ff_mjpeg_bits_dc_chrominance, ff_mjpeg_val_dc, 12, 0, 0); build_vlc(&s->vlcs[1][0], ff_mjpeg_bits_ac_luminance, ff_mjpeg_val_ac_luminance, 251, 0, 1); build_vlc(&s->vlcs[1][1], ff_mjpeg_bits_ac_chrominance, ff_mjpeg_val_ac_chrominance, 251, 0, 1); } av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx) { MJpegDecodeContext *s = avctx->priv_data; s->avctx = avctx; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); s->buffer_size = 0; s->buffer = NULL; s->start_code = -1; s->first_picture = 1; s->org_height = avctx->coded_height; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; build_basic_mjpeg_vlc(s); if (avctx->flags & CODEC_FLAG_EXTERN_HUFF) { av_log(avctx, AV_LOG_INFO, "mjpeg: using external huffman table\n"); init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size*8); if (ff_mjpeg_decode_dht(s)) { av_log(avctx, AV_LOG_ERROR, "mjpeg: error using external huffman table, switching back to internal\n"); build_basic_mjpeg_vlc(s); } } if (avctx->extradata_size > 9 && AV_RL32(avctx->extradata + 4) == MKTAG('f','i','e','l')) { if (avctx->extradata[9] == 6) { /* quicktime icefloe 019 */ s->interlace_polarity = 1; /* bottom field first */ av_log(avctx, AV_LOG_DEBUG, "mjpeg bottom field first\n"); } } if (avctx->codec->id == CODEC_ID_AMV) s->flipped = 1; return 0; } /* quantize tables */ int ff_mjpeg_decode_dqt(MJpegDecodeContext *s) { int len, index, i, j; len = get_bits(&s->gb, 16) - 2; while (len >= 65) { /* only 8 bit precision handled */ if (get_bits(&s->gb, 4) != 0) { av_log(s->avctx, AV_LOG_ERROR, "dqt: 16bit precision\n"); return -1; } index = get_bits(&s->gb, 4); if (index >= 4) return -1; av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index); /* read quant table */ for(i=0;i<64;i++) { j = s->scantable.permutated[i]; s->quant_matrixes[index][j] = get_bits(&s->gb, 8); } //XXX FIXME finetune, and perhaps add dc too s->qscale[index]= FFMAX( s->quant_matrixes[index][s->scantable.permutated[1]], s->quant_matrixes[index][s->scantable.permutated[8]]) >> 1; av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n", index, s->qscale[index]); len -= 65; } return 0; } /* decode huffman tables and build VLC decoders */ int ff_mjpeg_decode_dht(MJpegDecodeContext *s) { int len, index, i, class, n, v, code_max; uint8_t bits_table[17]; uint8_t val_table[256]; len = get_bits(&s->gb, 16) - 2; while (len > 0) { if (len < 17) return -1; class = get_bits(&s->gb, 4); if (class >= 2) return -1; index = get_bits(&s->gb, 4); if (index >= 4) return -1; n = 0; for(i=1;i<=16;i++) { bits_table[i] = get_bits(&s->gb, 8); n += bits_table[i]; } len -= 17; if (len < n || n > 256) return -1; code_max = 0; for(i=0;i<n;i++) { v = get_bits(&s->gb, 8); if (v > code_max) code_max = v; val_table[i] = v; } len -= n; /* build VLC and flush previous vlc if present */ free_vlc(&s->vlcs[class][index]); av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n", class, index, code_max + 1); if(build_vlc(&s->vlcs[class][index], bits_table, val_table, code_max + 1, 0, class > 0) < 0){ return -1; } } return 0; } int ff_mjpeg_decode_sof(MJpegDecodeContext *s) { int len, nb_components, i, width, height, pix_fmt_id; /* XXX: verify len field validity */ len = get_bits(&s->gb, 16); s->bits= get_bits(&s->gb, 8); if(s->pegasus_rct) s->bits=9; if(s->bits==9 && !s->pegasus_rct) s->rct=1; //FIXME ugly if (s->bits != 8 && !s->lossless){ av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n"); return -1; } height = get_bits(&s->gb, 16); width = get_bits(&s->gb, 16); //HACK for odd_height.mov if(s->interlaced && s->width == width && s->height == height + 1) height= s->height; av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height); if(avcodec_check_dimensions(s->avctx, width, height)) return -1; nb_components = get_bits(&s->gb, 8); if (nb_components <= 0 || nb_components > MAX_COMPONENTS) return -1; if (s->ls && !(s->bits <= 8 || nb_components == 1)){ av_log(s->avctx, AV_LOG_ERROR, "only <= 8 bits/component or 16-bit gray accepted for JPEG-LS\n"); return -1; } s->nb_components = nb_components; s->h_max = 1; s->v_max = 1; for(i=0;i<nb_components;i++) { /* component id */ s->component_id[i] = get_bits(&s->gb, 8) - 1; s->h_count[i] = get_bits(&s->gb, 4); s->v_count[i] = get_bits(&s->gb, 4); /* compute hmax and vmax (only used in interleaved case) */ if (s->h_count[i] > s->h_max) s->h_max = s->h_count[i]; if (s->v_count[i] > s->v_max) s->v_max = s->v_count[i]; s->quant_index[i] = get_bits(&s->gb, 8); if (s->quant_index[i] >= 4) return -1; av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n", i, s->h_count[i], s->v_count[i], s->component_id[i], s->quant_index[i]); } if(s->ls && (s->h_max > 1 || s->v_max > 1)) { av_log(s->avctx, AV_LOG_ERROR, "Subsampling in JPEG-LS is not supported.\n"); return -1; } if(s->v_max==1 && s->h_max==1 && s->lossless==1) s->rgb=1; /* if different size, realloc/alloc picture */ /* XXX: also check h_count and v_count */ if (width != s->width || height != s->height) { av_freep(&s->qscale_table); s->width = width; s->height = height; s->interlaced = 0; /* test interlaced mode */ if (s->first_picture && s->org_height != 0 && s->height < ((s->org_height * 3) / 4)) { s->interlaced = 1; s->bottom_field = s->interlace_polarity; s->picture.interlaced_frame = 1; s->picture.top_field_first = !s->interlace_polarity; height *= 2; } avcodec_set_dimensions(s->avctx, width, height); s->qscale_table= av_mallocz((s->width+15)/16); s->first_picture = 0; } if(s->interlaced && (s->bottom_field == !s->interlace_polarity)) return 0; /* XXX: not complete test ! */ pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) | (s->h_count[1] << 20) | (s->v_count[1] << 16) | (s->h_count[2] << 12) | (s->v_count[2] << 8) | (s->h_count[3] << 4) | s->v_count[3]; av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id); //NOTE we do not allocate pictures large enough for the possible padding of h/v_count being 4 if(!(pix_fmt_id & 0xD0D0D0D0)) pix_fmt_id-= (pix_fmt_id & 0xF0F0F0F0)>>1; if(!(pix_fmt_id & 0x0D0D0D0D)) pix_fmt_id-= (pix_fmt_id & 0x0F0F0F0F)>>1; switch(pix_fmt_id){ case 0x11111100: if(s->rgb){ s->avctx->pix_fmt = PIX_FMT_BGRA; }else s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV444P : PIX_FMT_YUVJ444P; assert(s->nb_components==3); break; case 0x11000000: s->avctx->pix_fmt = PIX_FMT_GRAY8; break; case 0x12111100: s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV440P : PIX_FMT_YUVJ440P; break; case 0x21111100: s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV422P : PIX_FMT_YUVJ422P; break; case 0x22111100: s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV420P : PIX_FMT_YUVJ420P; break; default: av_log(s->avctx, AV_LOG_ERROR, "Unhandled pixel format 0x%x\n", pix_fmt_id); return -1; } if(s->ls){ if(s->nb_components > 1) s->avctx->pix_fmt = PIX_FMT_RGB24; else if(s->bits <= 8) s->avctx->pix_fmt = PIX_FMT_GRAY8; else s->avctx->pix_fmt = PIX_FMT_GRAY16; } if(s->picture.data[0]) s->avctx->release_buffer(s->avctx, &s->picture); s->picture.reference= 0; if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.pict_type= FF_I_TYPE; s->picture.key_frame= 1; s->got_picture = 1; for(i=0; i<3; i++){ s->linesize[i]= s->picture.linesize[i] << s->interlaced; } // printf("%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height); if (len != (8+(3*nb_components))) { av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len); } /* totally blank picture as progressive JPEG will only add details to it */ if(s->progressive){ int bw = (width + s->h_max*8-1) / (s->h_max*8); int bh = (height + s->v_max*8-1) / (s->v_max*8); for(i=0; i<s->nb_components; i++) { int size = bw * bh * s->h_count[i] * s->v_count[i]; av_freep(&s->blocks[i]); av_freep(&s->last_nnz[i]); s->blocks[i] = av_malloc(size * sizeof(**s->blocks)); s->last_nnz[i] = av_mallocz(size * sizeof(**s->last_nnz)); s->block_stride[i] = bw * s->h_count[i]; } memset(s->coefs_finished, 0, sizeof(s->coefs_finished)); } return 0; } static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index) { int code; code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2); if (code < 0) { av_log(s->avctx, AV_LOG_WARNING, "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n", 0, dc_index, &s->vlcs[0][dc_index]); return 0xffff; } if(code) return get_xbits(&s->gb, code); else return 0; } /* decode block and dequantize */ static int decode_block(MJpegDecodeContext *s, DCTELEM *block, int component, int dc_index, int ac_index, int16_t *quant_matrix) { int code, i, j, level, val; /* DC coef */ val = mjpeg_decode_dc(s, dc_index); if (val == 0xffff) { av_log(s->avctx, AV_LOG_ERROR, "error dc\n"); return -1; } val = val * quant_matrix[0] + s->last_dc[component]; s->last_dc[component] = val; block[0] = val; /* AC coefs */ i = 0; {OPEN_READER(re, &s->gb) for(;;) { UPDATE_CACHE(re, &s->gb); GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2) /* EOB */ if (code == 0x10) break; i += ((unsigned)code) >> 4; if(code != 0x100){ code &= 0xf; if(code > MIN_CACHE_BITS - 16){ UPDATE_CACHE(re, &s->gb) } { int cache=GET_CACHE(re,&s->gb); int sign=(~cache)>>31; level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign; } LAST_SKIP_BITS(re, &s->gb, code) if (i >= 63) { if(i == 63){ j = s->scantable.permutated[63]; block[j] = level * quant_matrix[j]; break; } av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); return -1; } j = s->scantable.permutated[i]; block[j] = level * quant_matrix[j]; } } CLOSE_READER(re, &s->gb)} return 0; } static int decode_dc_progressive(MJpegDecodeContext *s, DCTELEM *block, int component, int dc_index, int16_t *quant_matrix, int Al) { int val; s->dsp.clear_block(block); val = mjpeg_decode_dc(s, dc_index); if (val == 0xffff) { av_log(s->avctx, AV_LOG_ERROR, "error dc\n"); return -1; } val = (val * quant_matrix[0] << Al) + s->last_dc[component]; s->last_dc[component] = val; block[0] = val; return 0; } /* decode block and dequantize - progressive JPEG version */ static int decode_block_progressive(MJpegDecodeContext *s, DCTELEM *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN) { int code, i, j, level, val, run; if(*EOBRUN){ (*EOBRUN)--; return 0; } {OPEN_READER(re, &s->gb) for(i=ss;;i++) { UPDATE_CACHE(re, &s->gb); GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2) /* Progressive JPEG use AC coeffs from zero and this decoder sets offset 16 by default */ code -= 16; if(code & 0xF) { i += ((unsigned) code) >> 4; code &= 0xf; if(code > MIN_CACHE_BITS - 16){ UPDATE_CACHE(re, &s->gb) } { int cache=GET_CACHE(re,&s->gb); int sign=(~cache)>>31; level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign; } LAST_SKIP_BITS(re, &s->gb, code) if (i >= se) { if(i == se){ j = s->scantable.permutated[se]; block[j] = level * quant_matrix[j] << Al; break; } av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); return -1; } j = s->scantable.permutated[i]; block[j] = level * quant_matrix[j] << Al; }else{ run = ((unsigned) code) >> 4; if(run == 0xF){// ZRL - skip 15 coefficients i += 15; }else{ val = run; run = (1 << run); UPDATE_CACHE(re, &s->gb); run += (GET_CACHE(re, &s->gb) >> (32 - val)) & (run - 1); if(val) LAST_SKIP_BITS(re, &s->gb, val); *EOBRUN = run - 1; break; } } } CLOSE_READER(re, &s->gb)} if(i > *last_nnz) *last_nnz = i; return 0; } #define REFINE_BIT(j) {\ UPDATE_CACHE(re, &s->gb);\ sign = block[j]>>15;\ block[j] += SHOW_UBITS(re, &s->gb, 1) * ((quant_matrix[j]^sign)-sign) << Al;\ LAST_SKIP_BITS(re, &s->gb, 1);\ } #define ZERO_RUN \ for(;;i++) {\ if(i > last) {\ i += run;\ if(i > se) {\ av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);\ return -1;\ }\ break;\ }\ j = s->scantable.permutated[i];\ if(block[j])\ REFINE_BIT(j)\ else if(run-- == 0)\ break;\ } /* decode block and dequantize - progressive JPEG refinement pass */ static int decode_block_refinement(MJpegDecodeContext *s, DCTELEM *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN) { int code, i=ss, j, sign, val, run; int last = FFMIN(se, *last_nnz); OPEN_READER(re, &s->gb); if(*EOBRUN) (*EOBRUN)--; else { for(;;i++) { UPDATE_CACHE(re, &s->gb); GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2) /* Progressive JPEG use AC coeffs from zero and this decoder sets offset 16 by default */ code -= 16; if(code & 0xF) { run = ((unsigned) code) >> 4; UPDATE_CACHE(re, &s->gb); val = SHOW_UBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); ZERO_RUN; j = s->scantable.permutated[i]; val--; block[j] = ((quant_matrix[j]^val)-val) << Al; if(i == se) { if(i > *last_nnz) *last_nnz = i; CLOSE_READER(re, &s->gb) return 0; } }else{ run = ((unsigned) code) >> 4; if(run == 0xF){ ZERO_RUN; }else{ val = run; run = (1 << run); if(val) { UPDATE_CACHE(re, &s->gb); run += SHOW_UBITS(re, &s->gb, val); LAST_SKIP_BITS(re, &s->gb, val); } *EOBRUN = run - 1; break; } } } if(i > *last_nnz) *last_nnz = i; } for(;i<=last;i++) { j = s->scantable.permutated[i]; if(block[j]) REFINE_BIT(j) } CLOSE_READER(re, &s->gb); return 0; } #undef REFINE_BIT #undef ZERO_RUN static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point_transform){ int i, mb_x, mb_y; uint16_t (*buffer)[4]; int left[3], top[3], topleft[3]; const int linesize= s->linesize[0]; const int mask= (1<<s->bits)-1; av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0])); buffer= s->ljpeg_buffer; for(i=0; i<3; i++){ buffer[0][i]= 1 << (s->bits + point_transform - 1); } for(mb_y = 0; mb_y < s->mb_height; mb_y++) { const int modified_predictor= mb_y ? predictor : 1; uint8_t *ptr = s->picture.data[0] + (linesize * mb_y); if (s->interlaced && s->bottom_field) ptr += linesize >> 1; for(i=0; i<3; i++){ top[i]= left[i]= topleft[i]= buffer[0][i]; } for(mb_x = 0; mb_x < s->mb_width; mb_x++) { if (s->restart_interval && !s->restart_count) s->restart_count = s->restart_interval; for(i=0;i<3;i++) { int pred; topleft[i]= top[i]; top[i]= buffer[mb_x][i]; PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); left[i]= buffer[mb_x][i]= mask & (pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform)); } if (s->restart_interval && !--s->restart_count) { align_get_bits(&s->gb); skip_bits(&s->gb, 16); /* skip RSTn */ } } if(s->rct){ for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[4*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200)>>2); ptr[4*mb_x+0] = buffer[mb_x][1] + ptr[4*mb_x+1]; ptr[4*mb_x+2] = buffer[mb_x][2] + ptr[4*mb_x+1]; } }else if(s->pegasus_rct){ for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[4*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2])>>2); ptr[4*mb_x+0] = buffer[mb_x][1] + ptr[4*mb_x+1]; ptr[4*mb_x+2] = buffer[mb_x][2] + ptr[4*mb_x+1]; } }else{ for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[4*mb_x+0] = buffer[mb_x][2]; ptr[4*mb_x+1] = buffer[mb_x][1]; ptr[4*mb_x+2] = buffer[mb_x][0]; } } } return 0; } static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform){ int i, mb_x, mb_y; const int nb_components=3; for(mb_y = 0; mb_y < s->mb_height; mb_y++) { for(mb_x = 0; mb_x < s->mb_width; mb_x++) { if (s->restart_interval && !s->restart_count) s->restart_count = s->restart_interval; if(mb_x==0 || mb_y==0 || s->interlaced){ for(i=0;i<nb_components;i++) { uint8_t *ptr; int n, h, v, x, y, c, j, linesize; n = s->nb_blocks[i]; c = s->comp_index[i]; h = s->h_scount[i]; v = s->v_scount[i]; x = 0; y = 0; linesize= s->linesize[c]; for(j=0; j<n; j++) { int pred; ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap if(y==0 && mb_y==0){ if(x==0 && mb_x==0){ pred= 128 << point_transform; }else{ pred= ptr[-1]; } }else{ if(x==0 && mb_x==0){ pred= ptr[-linesize]; }else{ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); } } if (s->interlaced && s->bottom_field) ptr += linesize >> 1; *ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform); if (++x == h) { x = 0; y++; } } } }else{ for(i=0;i<nb_components;i++) { uint8_t *ptr; int n, h, v, x, y, c, j, linesize; n = s->nb_blocks[i]; c = s->comp_index[i]; h = s->h_scount[i]; v = s->v_scount[i]; x = 0; y = 0; linesize= s->linesize[c]; for(j=0; j<n; j++) { int pred; ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); *ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform); if (++x == h) { x = 0; y++; } } } } if (s->restart_interval && !--s->restart_count) { align_get_bits(&s->gb); skip_bits(&s->gb, 16); /* skip RSTn */ } } } return 0; } static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al){ int i, mb_x, mb_y; uint8_t* data[MAX_COMPONENTS]; int linesize[MAX_COMPONENTS]; if(s->flipped && s->avctx->flags & CODEC_FLAG_EMU_EDGE) { av_log(s->avctx, AV_LOG_ERROR, "Can not flip image with CODEC_FLAG_EMU_EDGE set!\n"); s->flipped = 0; } for(i=0; i < nb_components; i++) { int c = s->comp_index[i]; data[c] = s->picture.data[c]; linesize[c]=s->linesize[c]; s->coefs_finished[c] |= 1; if(s->flipped) { //picture should be flipped upside-down for this codec data[c] += (linesize[c] * (s->v_scount[i] * (8 * s->mb_height -((s->height/s->v_max)&7)) - 1 )); linesize[c] *= -1; } } for(mb_y = 0; mb_y < s->mb_height; mb_y++) { for(mb_x = 0; mb_x < s->mb_width; mb_x++) { if (s->restart_interval && !s->restart_count) s->restart_count = s->restart_interval; for(i=0;i<nb_components;i++) { uint8_t *ptr; int n, h, v, x, y, c, j; n = s->nb_blocks[i]; c = s->comp_index[i]; h = s->h_scount[i]; v = s->v_scount[i]; x = 0; y = 0; for(j=0;j<n;j++) { ptr = data[c] + (((linesize[c] * (v * mb_y + y) * 8) + (h * mb_x + x) * 8) >> s->avctx->lowres); if(s->interlaced && s->bottom_field) ptr += linesize[c] >> 1; if(!s->progressive) { s->dsp.clear_block(s->block); if(decode_block(s, s->block, i, s->dc_index[i], s->ac_index[i], s->quant_matrixes[ s->quant_index[c] ]) < 0) { av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x); return -1; } s->dsp.idct_put(ptr, linesize[c], s->block); } else { int block_idx = s->block_stride[c] * (v * mb_y + y) + (h * mb_x + x); DCTELEM *block = s->blocks[c][block_idx]; if(Ah) block[0] += get_bits1(&s->gb) * s->quant_matrixes[ s->quant_index[c] ][0] << Al; else if(decode_dc_progressive(s, block, i, s->dc_index[i], s->quant_matrixes[ s->quant_index[c] ], Al) < 0) { av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x); return -1; } } // av_log(s->avctx, AV_LOG_DEBUG, "mb: %d %d processed\n", mb_y, mb_x); //av_log(NULL, AV_LOG_DEBUG, "%d %d %d %d %d %d %d %d \n", mb_x, mb_y, x, y, c, s->bottom_field, (v * mb_y + y) * 8, (h * mb_x + x) * 8); if (++x == h) { x = 0; y++; } } } if (s->restart_interval && !--s->restart_count) { align_get_bits(&s->gb); skip_bits(&s->gb, 16); /* skip RSTn */ for (i=0; i<nb_components; i++) /* reset dc */ s->last_dc[i] = 1024; } } } return 0; } static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al){ int mb_x, mb_y; int EOBRUN = 0; int c = s->comp_index[0]; uint8_t* data = s->picture.data[c]; int linesize = s->linesize[c]; int last_scan = 0; int16_t *quant_matrix = s->quant_matrixes[ s->quant_index[c] ]; if(!Al) { s->coefs_finished[c] |= (1LL<<(se+1))-(1LL<<ss); last_scan = !~s->coefs_finished[c]; } if(s->interlaced && s->bottom_field) data += linesize >> 1; for(mb_y = 0; mb_y < s->mb_height; mb_y++) { uint8_t *ptr = data + (mb_y*linesize*8 >> s->avctx->lowres); int block_idx = mb_y * s->block_stride[c]; DCTELEM (*block)[64] = &s->blocks[c][block_idx]; uint8_t *last_nnz = &s->last_nnz[c][block_idx]; for(mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) { int ret; if(Ah) ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0], quant_matrix, ss, se, Al, &EOBRUN); else ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0], quant_matrix, ss, se, Al, &EOBRUN); if(ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x); return -1; } if(last_scan) { s->dsp.idct_put(ptr, linesize, *block); ptr += 8 >> s->avctx->lowres; } } } return 0; } int ff_mjpeg_decode_sos(MJpegDecodeContext *s) { int len, nb_components, i, h, v, predictor, point_transform; int index, id; const int block_size= s->lossless ? 1 : 8; int ilv, prev_shift; /* XXX: verify len field validity */ len = get_bits(&s->gb, 16); nb_components = get_bits(&s->gb, 8); if (nb_components == 0 || nb_components > MAX_COMPONENTS){ av_log(s->avctx, AV_LOG_ERROR, "decode_sos: nb_components (%d) unsupported\n", nb_components); return -1; } if (len != 6+2*nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len); return -1; } for(i=0;i<nb_components;i++) { id = get_bits(&s->gb, 8) - 1; av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id); /* find component index */ for(index=0;index<s->nb_components;index++) if (id == s->component_id[index]) break; if (index == s->nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: index(%d) out of components\n", index); return -1; } /* Metasoft MJPEG codec has Cb and Cr swapped */ if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J') && nb_components == 3 && s->nb_components == 3 && i) index = 3 - i; s->comp_index[i] = index; s->nb_blocks[i] = s->h_count[index] * s->v_count[index]; s->h_scount[i] = s->h_count[index]; s->v_scount[i] = s->v_count[index]; s->dc_index[i] = get_bits(&s->gb, 4); s->ac_index[i] = get_bits(&s->gb, 4); if (s->dc_index[i] < 0 || s->ac_index[i] < 0 || s->dc_index[i] >= 4 || s->ac_index[i] >= 4) goto out_of_range; if (!s->vlcs[0][s->dc_index[i]].table || !s->vlcs[1][s->ac_index[i]].table) goto out_of_range; } predictor= get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */ ilv= get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */ prev_shift = get_bits(&s->gb, 4); /* Ah */ point_transform= get_bits(&s->gb, 4); /* Al */ for(i=0;i<nb_components;i++) s->last_dc[i] = 1024; if (nb_components > 1) { /* interleaved stream */ s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size); s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size); } else if(!s->ls) { /* skip this for JPEG-LS */ h = s->h_max / s->h_scount[0]; v = s->v_max / s->v_scount[0]; s->mb_width = (s->width + h * block_size - 1) / (h * block_size); s->mb_height = (s->height + v * block_size - 1) / (v * block_size); s->nb_blocks[0] = 1; s->h_scount[0] = 1; s->v_scount[0] = 1; } if(s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d %s\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "", predictor, point_transform, ilv, s->bits, s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : "")); /* mjpeg-b can have padding bytes between sos and image data, skip them */ for (i = s->mjpb_skiptosod; i > 0; i--) skip_bits(&s->gb, 8); if(s->lossless){ if(CONFIG_JPEGLS_DECODER && s->ls){ // for(){ // reset_ls_coding_parameters(s, 0); if(ff_jpegls_decode_picture(s, predictor, point_transform, ilv) < 0) return -1; }else{ if(s->rgb){ if(ljpeg_decode_rgb_scan(s, predictor, point_transform) < 0) return -1; }else{ if(ljpeg_decode_yuv_scan(s, predictor, point_transform) < 0) return -1; } } }else{ if(s->progressive && predictor) { if(mjpeg_decode_scan_progressive_ac(s, predictor, ilv, prev_shift, point_transform) < 0) return -1; } else { if(mjpeg_decode_scan(s, nb_components, prev_shift, point_transform) < 0) return -1; } } emms_c(); return 0; out_of_range: av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n"); return -1; } static int mjpeg_decode_dri(MJpegDecodeContext *s) { if (get_bits(&s->gb, 16) != 4) return -1; s->restart_interval = get_bits(&s->gb, 16); s->restart_count = 0; av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n", s->restart_interval); return 0; } static int mjpeg_decode_app(MJpegDecodeContext *s) { int len, id, i; len = get_bits(&s->gb, 16); if (len < 5) return -1; if(8*len + get_bits_count(&s->gb) > s->gb.size_in_bits) return -1; id = (get_bits(&s->gb, 16) << 16) | get_bits(&s->gb, 16); id = be2me_32(id); len -= 6; if(s->avctx->debug & FF_DEBUG_STARTCODE){ av_log(s->avctx, AV_LOG_DEBUG, "APPx %8X\n", id); } /* buggy AVID, it puts EOI only at every 10th frame */ /* also this fourcc is used by non-avid files too, it holds some informations, but it's always present in AVID creates files */ if (id == AV_RL32("AVI1")) { /* structure: 4bytes AVI1 1bytes polarity 1bytes always zero 4bytes field_size 4bytes field_size_less_padding */ s->buggy_avid = 1; // if (s->first_picture) // printf("mjpeg: workarounding buggy AVID\n"); i = get_bits(&s->gb, 8); if (i==2) s->bottom_field= 1; else if(i==1) s->bottom_field= 0; #if 0 skip_bits(&s->gb, 8); skip_bits(&s->gb, 32); skip_bits(&s->gb, 32); len -= 10; #endif // if (s->interlace_polarity) // printf("mjpeg: interlace polarity: %d\n", s->interlace_polarity); goto out; } // len -= 2; if (id == AV_RL32("JFIF")) { int t_w, t_h, v1, v2; skip_bits(&s->gb, 8); /* the trailing zero-byte */ v1= get_bits(&s->gb, 8); v2= get_bits(&s->gb, 8); skip_bits(&s->gb, 8); s->avctx->sample_aspect_ratio.num= get_bits(&s->gb, 16); s->avctx->sample_aspect_ratio.den= get_bits(&s->gb, 16); if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n", v1, v2, s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den ); t_w = get_bits(&s->gb, 8); t_h = get_bits(&s->gb, 8); if (t_w && t_h) { /* skip thumbnail */ if (len-10-(t_w*t_h*3) > 0) len -= t_w*t_h*3; } len -= 10; goto out; } if (id == AV_RL32("Adob") && (get_bits(&s->gb, 8) == 'e')) { if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found\n"); skip_bits(&s->gb, 16); /* version */ skip_bits(&s->gb, 16); /* flags0 */ skip_bits(&s->gb, 16); /* flags1 */ skip_bits(&s->gb, 8); /* transform */ len -= 7; goto out; } if (id == AV_RL32("LJIF")){ if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "Pegasus lossless jpeg header found\n"); skip_bits(&s->gb, 16); /* version ? */ skip_bits(&s->gb, 16); /* unknwon always 0? */ skip_bits(&s->gb, 16); /* unknwon always 0? */ skip_bits(&s->gb, 16); /* unknwon always 0? */ switch( get_bits(&s->gb, 8)){ case 1: s->rgb= 1; s->pegasus_rct=0; break; case 2: s->rgb= 1; s->pegasus_rct=1; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n"); } len -= 9; goto out; } /* Apple MJPEG-A */ if ((s->start_code == APP1) && (len > (0x28 - 8))) { id = (get_bits(&s->gb, 16) << 16) | get_bits(&s->gb, 16); id = be2me_32(id); len -= 4; if (id == AV_RL32("mjpg")) /* Apple MJPEG-A */ { #if 0 skip_bits(&s->gb, 32); /* field size */ skip_bits(&s->gb, 32); /* pad field size */ skip_bits(&s->gb, 32); /* next off */ skip_bits(&s->gb, 32); /* quant off */ skip_bits(&s->gb, 32); /* huff off */ skip_bits(&s->gb, 32); /* image off */ skip_bits(&s->gb, 32); /* scan off */ skip_bits(&s->gb, 32); /* data off */ #endif if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n"); } } out: /* slow but needed for extreme adobe jpegs */ if (len < 0) av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error, decode_app parser read over the end\n"); while(--len > 0) skip_bits(&s->gb, 8); return 0; } static int mjpeg_decode_com(MJpegDecodeContext *s) { int len = get_bits(&s->gb, 16); if (len >= 2 && 8*len - 16 + get_bits_count(&s->gb) <= s->gb.size_in_bits) { char *cbuf = av_malloc(len - 1); if (cbuf) { int i; for (i = 0; i < len - 2; i++) cbuf[i] = get_bits(&s->gb, 8); if (i > 0 && cbuf[i-1] == '\n') cbuf[i-1] = 0; else cbuf[i] = 0; if(s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg comment: '%s'\n", cbuf); /* buggy avid, it puts EOI only at every 10th frame */ if (!strcmp(cbuf, "AVID")) { s->buggy_avid = 1; // if (s->first_picture) // printf("mjpeg: workarounding buggy AVID\n"); } else if(!strcmp(cbuf, "CS=ITU601")){ s->cs_itu601= 1; } else if((len > 20 && !strncmp(cbuf, "Intel(R) JPEG Library", 21)) || (len > 19 && !strncmp(cbuf, "Metasoft MJPEG Codec", 20))){ s->flipped = 1; } av_free(cbuf); } } return 0; } #if 0 static int valid_marker_list[] = { /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f */ /* 0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* f */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, } #endif /* return the 8 bit start code value and update the search state. Return -1 if no start code found */ static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end) { const uint8_t *buf_ptr; unsigned int v, v2; int val; #ifdef DEBUG int skipped=0; #endif buf_ptr = *pbuf_ptr; while (buf_ptr < buf_end) { v = *buf_ptr++; v2 = *buf_ptr; if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) { val = *buf_ptr++; goto found; } #ifdef DEBUG skipped++; #endif } val = -1; found: dprintf(NULL, "find_marker skipped %d bytes\n", skipped); *pbuf_ptr = buf_ptr; return val; } int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MJpegDecodeContext *s = avctx->priv_data; const uint8_t *buf_end, *buf_ptr; int start_code; AVFrame *picture = data; s->got_picture = 0; // picture from previous image can not be reused buf_ptr = buf; buf_end = buf + buf_size; while (buf_ptr < buf_end) { /* find start next marker */ start_code = find_marker(&buf_ptr, buf_end); { /* EOF */ if (start_code < 0) { goto the_end; } else { av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n", start_code, buf_end - buf_ptr); if ((buf_end - buf_ptr) > s->buffer_size) { av_free(s->buffer); s->buffer_size = buf_end-buf_ptr; s->buffer = av_malloc(s->buffer_size + FF_INPUT_BUFFER_PADDING_SIZE); av_log(avctx, AV_LOG_DEBUG, "buffer too small, expanding to %d bytes\n", s->buffer_size); } /* unescape buffer of SOS, use special treatment for JPEG-LS */ if (start_code == SOS && !s->ls) { const uint8_t *src = buf_ptr; uint8_t *dst = s->buffer; while (src<buf_end) { uint8_t x = *(src++); *(dst++) = x; if (avctx->codec_id != CODEC_ID_THP) { if (x == 0xff) { while (src < buf_end && x == 0xff) x = *(src++); if (x >= 0xd0 && x <= 0xd7) *(dst++) = x; else if (x) break; } } } init_get_bits(&s->gb, s->buffer, (dst - s->buffer)*8); av_log(avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n", (buf_end - buf_ptr) - (dst - s->buffer)); } else if(start_code == SOS && s->ls){ const uint8_t *src = buf_ptr; uint8_t *dst = s->buffer; int bit_count = 0; int t = 0, b = 0; PutBitContext pb; s->cur_scan++; /* find marker */ while (src + t < buf_end){ uint8_t x = src[t++]; if (x == 0xff){ while((src + t < buf_end) && x == 0xff) x = src[t++]; if (x & 0x80) { t -= 2; break; } } } bit_count = t * 8; init_put_bits(&pb, dst, t); /* unescape bitstream */ while(b < t){ uint8_t x = src[b++]; put_bits(&pb, 8, x); if(x == 0xFF){ x = src[b++]; put_bits(&pb, 7, x); bit_count--; } } flush_put_bits(&pb); init_get_bits(&s->gb, dst, bit_count); } else init_get_bits(&s->gb, buf_ptr, (buf_end - buf_ptr)*8); s->start_code = start_code; if(s->avctx->debug & FF_DEBUG_STARTCODE){ av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code); } /* process markers */ if (start_code >= 0xd0 && start_code <= 0xd7) { av_log(avctx, AV_LOG_DEBUG, "restart marker: %d\n", start_code&0x0f); /* APP fields */ } else if (start_code >= APP0 && start_code <= APP15) { mjpeg_decode_app(s); /* Comment */ } else if (start_code == COM){ mjpeg_decode_com(s); } switch(start_code) { case SOI: s->restart_interval = 0; s->restart_count = 0; /* nothing to do on SOI */ break; case DQT: ff_mjpeg_decode_dqt(s); break; case DHT: if(ff_mjpeg_decode_dht(s) < 0){ av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n"); return -1; } break; case SOF0: case SOF1: s->lossless=0; s->ls=0; s->progressive=0; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case SOF2: s->lossless=0; s->ls=0; s->progressive=1; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case SOF3: s->lossless=1; s->ls=0; s->progressive=0; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case SOF48: s->lossless=1; s->ls=1; s->progressive=0; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case LSE: if (!CONFIG_JPEGLS_DECODER || ff_jpegls_decode_lse(s) < 0) return -1; break; case EOI: s->cur_scan = 0; if ((s->buggy_avid && !s->interlaced) || s->restart_interval) break; eoi_parser: if (!s->got_picture) { av_log(avctx, AV_LOG_WARNING, "Found EOI before any SOF, ignoring\n"); break; } { if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field == !s->interlace_polarity) goto not_the_end; } *picture = s->picture; *data_size = sizeof(AVFrame); if(!s->lossless){ picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); picture->qstride= 0; picture->qscale_table= s->qscale_table; memset(picture->qscale_table, picture->quality, (s->width+15)/16); if(avctx->debug & FF_DEBUG_QP) av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality); picture->quality*= FF_QP2LAMBDA; } goto the_end; } break; case SOS: if (!s->got_picture) { av_log(avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n"); break; } ff_mjpeg_decode_sos(s); /* buggy avid puts EOI every 10-20th frame */ /* if restart period is over process EOI */ if ((s->buggy_avid && !s->interlaced) || s->restart_interval) goto eoi_parser; break; case DRI: mjpeg_decode_dri(s); break; case SOF5: case SOF6: case SOF7: case SOF9: case SOF10: case SOF11: case SOF13: case SOF14: case SOF15: case JPG: av_log(avctx, AV_LOG_ERROR, "mjpeg: unsupported coding type (%x)\n", start_code); break; // default: // printf("mjpeg: unsupported marker (%x)\n", start_code); // break; } not_the_end: /* eof process start code */ buf_ptr += (get_bits_count(&s->gb)+7)/8; av_log(avctx, AV_LOG_DEBUG, "marker parser used %d bytes (%d bits)\n", (get_bits_count(&s->gb)+7)/8, get_bits_count(&s->gb)); } } } if (s->got_picture) { av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n"); goto eoi_parser; } av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n"); return -1; the_end: av_log(avctx, AV_LOG_DEBUG, "mjpeg decode frame unused %td bytes\n", buf_end - buf_ptr); // return buf_end - buf_ptr; return buf_ptr - buf; } av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx) { MJpegDecodeContext *s = avctx->priv_data; int i, j; if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); av_free(s->buffer); av_free(s->qscale_table); av_freep(&s->ljpeg_buffer); s->ljpeg_buffer_size=0; for(i=0;i<2;i++) { for(j=0;j<4;j++) free_vlc(&s->vlcs[i][j]); } for(i=0; i<MAX_COMPONENTS; i++) { av_freep(&s->blocks[i]); av_freep(&s->last_nnz[i]); } return 0; } AVCodec mjpeg_decoder = { "mjpeg", AVMEDIA_TYPE_VIDEO, CODEC_ID_MJPEG, sizeof(MJpegDecodeContext), ff_mjpeg_decode_init, NULL, ff_mjpeg_decode_end, ff_mjpeg_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), }; AVCodec thp_decoder = { "thp", AVMEDIA_TYPE_VIDEO, CODEC_ID_THP, sizeof(MJpegDecodeContext), ff_mjpeg_decode_init, NULL, ff_mjpeg_decode_end, ff_mjpeg_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"), };
123linslouis-android-video-cutter
jni/libavcodec/mjpegdec.c
C
asf20
53,163
/* * AC-3 and E-AC-3 decoder tables * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AC3DEC_DATA_H #define AVCODEC_AC3DEC_DATA_H #include <stdint.h> extern const uint8_t ff_ac3_ungroup_3_in_5_bits_tab[32][3]; extern const uint8_t ff_ac3_rematrix_band_tab[5]; extern const uint8_t ff_eac3_hebap_tab[64]; extern const uint8_t ff_eac3_default_cpl_band_struct[18]; extern const uint8_t ff_eac3_default_spx_band_struct[17]; #endif /* AVCODEC_AC3DEC_DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/ac3dec_data.h
C
asf20
1,260
/* * Copyright (c) 2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com > * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * functions common to libdirac and libschroedinger */ #include "libdirac_libschro.h" static const FfmpegDiracSchroVideoFormatInfo ff_dirac_schro_video_format_info[] = { { 640, 480, 24000, 1001}, { 176, 120, 15000, 1001}, { 176, 144, 25, 2 }, { 352, 240, 15000, 1001}, { 352, 288, 25, 2 }, { 704, 480, 15000, 1001}, { 704, 576, 25, 2 }, { 720, 480, 30000, 1001}, { 720, 576, 25, 1 }, { 1280, 720, 60000, 1001}, { 1280, 720, 50, 1 }, { 1920, 1080, 30000, 1001}, { 1920, 1080, 25, 1 }, { 1920, 1080, 60000, 1001}, { 1920, 1080, 50, 1 }, { 2048, 1080, 24, 1 }, { 4096, 2160, 24, 1 }, }; unsigned int ff_dirac_schro_get_video_format_idx(AVCodecContext *avccontext) { unsigned int ret_idx = 0; unsigned int idx; unsigned int num_formats = sizeof(ff_dirac_schro_video_format_info) / sizeof(ff_dirac_schro_video_format_info[0]); for (idx = 1; idx < num_formats; ++idx) { const FfmpegDiracSchroVideoFormatInfo *vf = &ff_dirac_schro_video_format_info[idx]; if (avccontext->width == vf->width && avccontext->height == vf->height) { ret_idx = idx; if (avccontext->time_base.den == vf->frame_rate_num && avccontext->time_base.num == vf->frame_rate_denom) return idx; } } return ret_idx; } void ff_dirac_schro_queue_init(FfmpegDiracSchroQueue *queue) { queue->p_head = queue->p_tail = NULL; queue->size = 0; } void ff_dirac_schro_queue_free(FfmpegDiracSchroQueue *queue, void (*free_func)(void *)) { while (queue->p_head) free_func(ff_dirac_schro_queue_pop(queue)); } int ff_dirac_schro_queue_push_back(FfmpegDiracSchroQueue *queue, void *p_data) { FfmpegDiracSchroQueueElement *p_new = av_mallocz(sizeof(FfmpegDiracSchroQueueElement)); if (!p_new) return -1; p_new->data = p_data; if (!queue->p_head) queue->p_head = p_new; else queue->p_tail->next = p_new; queue->p_tail = p_new; ++queue->size; return 0; } void *ff_dirac_schro_queue_pop(FfmpegDiracSchroQueue *queue) { FfmpegDiracSchroQueueElement *top = queue->p_head; if (top) { void *data = top->data; queue->p_head = queue->p_head->next; --queue->size; av_freep(&top); return data; } return NULL; }
123linslouis-android-video-cutter
jni/libavcodec/libdirac_libschro.c
C
asf20
3,358