code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231 values | license stringclasses 13 values | size int64 1 2.01M |
|---|---|---|---|---|---|
/*
* Sierra SOL demuxer
* Copyright Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* Based on documents from Game Audio Player and own research
*/
#include "libavutil/bswap.h"
#include "avformat.h"
#include "raw.h"
/* if we don't know the size in advance */
#define AU_UNKNOWN_SIZE ((uint32_t)(~0))
static int sol_probe(AVProbeData *p)
{
/* check file header */
uint16_t magic;
magic=le2me_16(*((uint16_t*)p->buf));
if ((magic == 0x0B8D || magic == 0x0C0D || magic == 0x0C8D) &&
p->buf[2] == 'S' && p->buf[3] == 'O' &&
p->buf[4] == 'L' && p->buf[5] == 0)
return AVPROBE_SCORE_MAX;
else
return 0;
}
#define SOL_DPCM 1
#define SOL_16BIT 4
#define SOL_STEREO 16
static enum CodecID sol_codec_id(int magic, int type)
{
if (magic == 0x0B8D)
{
if (type & SOL_DPCM) return CODEC_ID_SOL_DPCM;
else return CODEC_ID_PCM_U8;
}
if (type & SOL_DPCM)
{
if (type & SOL_16BIT) return CODEC_ID_SOL_DPCM;
else if (magic == 0x0C8D) return CODEC_ID_SOL_DPCM;
else return CODEC_ID_SOL_DPCM;
}
if (type & SOL_16BIT) return CODEC_ID_PCM_S16LE;
return CODEC_ID_PCM_U8;
}
static int sol_codec_type(int magic, int type)
{
if (magic == 0x0B8D) return 1;//SOL_DPCM_OLD;
if (type & SOL_DPCM)
{
if (type & SOL_16BIT) return 3;//SOL_DPCM_NEW16;
else if (magic == 0x0C8D) return 1;//SOL_DPCM_OLD;
else return 2;//SOL_DPCM_NEW8;
}
return -1;
}
static int sol_channels(int magic, int type)
{
if (magic == 0x0B8D || !(type & SOL_STEREO)) return 1;
return 2;
}
static int sol_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
int size;
unsigned int magic,tag;
ByteIOContext *pb = s->pb;
unsigned int id, channels, rate, type;
enum CodecID codec;
AVStream *st;
/* check ".snd" header */
magic = get_le16(pb);
tag = get_le32(pb);
if (tag != MKTAG('S', 'O', 'L', 0))
return -1;
rate = get_le16(pb);
type = get_byte(pb);
size = get_le32(pb);
if (magic != 0x0B8D)
get_byte(pb); /* newer SOLs contain padding byte */
codec = sol_codec_id(magic, type);
channels = sol_channels(magic, type);
if (codec == CODEC_ID_SOL_DPCM)
id = sol_codec_type(magic, type);
else id = 0;
/* now we are ready: build format streams */
st = av_new_stream(s, 0);
if (!st)
return -1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = id;
st->codec->codec_id = codec;
st->codec->channels = channels;
st->codec->sample_rate = rate;
av_set_pts_info(st, 64, 1, rate);
return 0;
}
#define MAX_SIZE 4096
static int sol_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
int ret;
if (url_feof(s->pb))
return AVERROR(EIO);
ret= av_get_packet(s->pb, pkt, MAX_SIZE);
pkt->stream_index = 0;
/* note: we need to modify the packet size here to handle the last
packet */
pkt->size = ret;
return 0;
}
AVInputFormat sol_demuxer = {
"sol",
NULL_IF_CONFIG_SMALL("Sierra SOL format"),
0,
sol_probe,
sol_read_header,
sol_read_packet,
NULL,
pcm_read_seek,
};
| 123linslouis-android-video-cutter | jni/libavformat/sol.c | C | asf20 | 4,008 |
/**
Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
**/
#include <stdlib.h>
#include "libavutil/avstring.h"
#include "libavutil/bswap.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "oggdec.h"
static int ogm_chapter(AVFormatContext *as, uint8_t *key, uint8_t *val)
{
int i, cnum, h, m, s, ms, keylen = strlen(key);
AVChapter *chapter = NULL;
if (keylen < 9 || sscanf(key, "CHAPTER%02d", &cnum) != 1)
return 0;
if (keylen == 9) {
if (sscanf(val, "%02d:%02d:%02d.%03d", &h, &m, &s, &ms) < 4)
return 0;
ff_new_chapter(as, cnum, (AVRational){1,1000},
ms + 1000*(s + 60*(m + 60*h)),
AV_NOPTS_VALUE, NULL);
av_free(val);
} else if (!strcmp(key+9, "NAME")) {
for(i = 0; i < as->nb_chapters; i++)
if (as->chapters[i]->id == cnum) {
chapter = as->chapters[i];
break;
}
if (!chapter)
return 0;
av_metadata_set2(&chapter->metadata, "title", val,
AV_METADATA_DONT_STRDUP_VAL);
} else
return 0;
av_free(key);
return 1;
}
int
ff_vorbis_comment(AVFormatContext * as, AVMetadata **m, const uint8_t *buf, int size)
{
const uint8_t *p = buf;
const uint8_t *end = buf + size;
unsigned n, j;
int s;
if (size < 8) /* must have vendor_length and user_comment_list_length */
return -1;
s = bytestream_get_le32(&p);
if (end - p - 4 < s || s < 0)
return -1;
p += s;
n = bytestream_get_le32(&p);
while (end - p >= 4 && n > 0) {
const char *t, *v;
int tl, vl;
s = bytestream_get_le32(&p);
if (end - p < s || s < 0)
break;
t = p;
p += s;
n--;
v = memchr(t, '=', s);
if (!v)
continue;
tl = v - t;
vl = s - tl - 1;
v++;
if (tl && vl) {
char *tt, *ct;
tt = av_malloc(tl + 1);
ct = av_malloc(vl + 1);
if (!tt || !ct) {
av_freep(&tt);
av_freep(&ct);
av_log(as, AV_LOG_WARNING, "out-of-memory error. skipping VorbisComment tag.\n");
continue;
}
for (j = 0; j < tl; j++)
tt[j] = toupper(t[j]);
tt[tl] = 0;
memcpy(ct, v, vl);
ct[vl] = 0;
if (!ogm_chapter(as, tt, ct))
av_metadata_set2(m, tt, ct,
AV_METADATA_DONT_STRDUP_KEY |
AV_METADATA_DONT_STRDUP_VAL);
}
}
if (p != end)
av_log(as, AV_LOG_INFO, "%ti bytes of comment header remain\n", end-p);
if (n > 0)
av_log(as, AV_LOG_INFO,
"truncated comment header, %i comments not found\n", n);
return 0;
}
/** Parse the vorbis header
* Vorbis Identification header from Vorbis_I_spec.html#vorbis-spec-codec
* [vorbis_version] = read 32 bits as unsigned integer | Not used
* [audio_channels] = read 8 bit integer as unsigned | Used
* [audio_sample_rate] = read 32 bits as unsigned integer | Used
* [bitrate_maximum] = read 32 bits as signed integer | Not used yet
* [bitrate_nominal] = read 32 bits as signed integer | Not used yet
* [bitrate_minimum] = read 32 bits as signed integer | Used as bitrate
* [blocksize_0] = read 4 bits as unsigned integer | Not Used
* [blocksize_1] = read 4 bits as unsigned integer | Not Used
* [framing_flag] = read one bit | Not Used
* */
struct oggvorbis_private {
unsigned int len[3];
unsigned char *packet[3];
};
static unsigned int
fixup_vorbis_headers(AVFormatContext * as, struct oggvorbis_private *priv,
uint8_t **buf)
{
int i,offset, len;
unsigned char *ptr;
len = priv->len[0] + priv->len[1] + priv->len[2];
ptr = *buf = av_mallocz(len + len/255 + 64);
ptr[0] = 2;
offset = 1;
offset += av_xiphlacing(&ptr[offset], priv->len[0]);
offset += av_xiphlacing(&ptr[offset], priv->len[1]);
for (i = 0; i < 3; i++) {
memcpy(&ptr[offset], priv->packet[i], priv->len[i]);
offset += priv->len[i];
av_freep(&priv->packet[i]);
}
*buf = av_realloc(*buf, offset + FF_INPUT_BUFFER_PADDING_SIZE);
return offset;
}
static int
vorbis_header (AVFormatContext * s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
struct oggvorbis_private *priv;
int pkt_type = os->buf[os->pstart];
if (!(pkt_type & 1))
return 0;
if (!os->private) {
os->private = av_mallocz(sizeof(struct oggvorbis_private));
if (!os->private)
return 0;
}
if (os->psize < 1 || pkt_type > 5)
return -1;
priv = os->private;
priv->len[pkt_type >> 1] = os->psize;
priv->packet[pkt_type >> 1] = av_mallocz(os->psize);
memcpy(priv->packet[pkt_type >> 1], os->buf + os->pstart, os->psize);
if (os->buf[os->pstart] == 1) {
const uint8_t *p = os->buf + os->pstart + 7; /* skip "\001vorbis" tag */
unsigned blocksize, bs0, bs1;
if (os->psize != 30)
return -1;
if (bytestream_get_le32(&p) != 0) /* vorbis_version */
return -1;
st->codec->channels = bytestream_get_byte(&p);
st->codec->sample_rate = bytestream_get_le32(&p);
p += 4; // skip maximum bitrate
st->codec->bit_rate = bytestream_get_le32(&p); // nominal bitrate
p += 4; // skip minimum bitrate
blocksize = bytestream_get_byte(&p);
bs0 = blocksize & 15;
bs1 = blocksize >> 4;
if (bs0 > bs1)
return -1;
if (bs0 < 6 || bs1 > 13)
return -1;
if (bytestream_get_byte(&p) != 1) /* framing_flag */
return -1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_VORBIS;
st->time_base.num = 1;
st->time_base.den = st->codec->sample_rate;
} else if (os->buf[os->pstart] == 3) {
if (os->psize > 8)
ff_vorbis_comment (s, &st->metadata, os->buf + os->pstart + 7, os->psize - 8);
} else {
st->codec->extradata_size =
fixup_vorbis_headers(s, priv, &st->codec->extradata);
}
return 1;
}
const struct ogg_codec ff_vorbis_codec = {
.magic = "\001vorbis",
.magicsize = 7,
.header = vorbis_header
};
| 123linslouis-android-video-cutter | jni/libavformat/oggparsevorbis.c | C | asf20 | 7,724 |
/*
* ID3v1 header parser
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "id3v1.h"
#include "libavcodec/avcodec.h"
#include "libavutil/avstring.h"
const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1] = {
[0] = "Blues",
[1] = "Classic Rock",
[2] = "Country",
[3] = "Dance",
[4] = "Disco",
[5] = "Funk",
[6] = "Grunge",
[7] = "Hip-Hop",
[8] = "Jazz",
[9] = "Metal",
[10] = "New Age",
[11] = "Oldies",
[12] = "Other",
[13] = "Pop",
[14] = "R&B",
[15] = "Rap",
[16] = "Reggae",
[17] = "Rock",
[18] = "Techno",
[19] = "Industrial",
[20] = "Alternative",
[21] = "Ska",
[22] = "Death Metal",
[23] = "Pranks",
[24] = "Soundtrack",
[25] = "Euro-Techno",
[26] = "Ambient",
[27] = "Trip-Hop",
[28] = "Vocal",
[29] = "Jazz+Funk",
[30] = "Fusion",
[31] = "Trance",
[32] = "Classical",
[33] = "Instrumental",
[34] = "Acid",
[35] = "House",
[36] = "Game",
[37] = "Sound Clip",
[38] = "Gospel",
[39] = "Noise",
[40] = "AlternRock",
[41] = "Bass",
[42] = "Soul",
[43] = "Punk",
[44] = "Space",
[45] = "Meditative",
[46] = "Instrumental Pop",
[47] = "Instrumental Rock",
[48] = "Ethnic",
[49] = "Gothic",
[50] = "Darkwave",
[51] = "Techno-Industrial",
[52] = "Electronic",
[53] = "Pop-Folk",
[54] = "Eurodance",
[55] = "Dream",
[56] = "Southern Rock",
[57] = "Comedy",
[58] = "Cult",
[59] = "Gangsta",
[60] = "Top 40",
[61] = "Christian Rap",
[62] = "Pop/Funk",
[63] = "Jungle",
[64] = "Native American",
[65] = "Cabaret",
[66] = "New Wave",
[67] = "Psychadelic",
[68] = "Rave",
[69] = "Showtunes",
[70] = "Trailer",
[71] = "Lo-Fi",
[72] = "Tribal",
[73] = "Acid Punk",
[74] = "Acid Jazz",
[75] = "Polka",
[76] = "Retro",
[77] = "Musical",
[78] = "Rock & Roll",
[79] = "Hard Rock",
[80] = "Folk",
[81] = "Folk-Rock",
[82] = "National Folk",
[83] = "Swing",
[84] = "Fast Fusion",
[85] = "Bebob",
[86] = "Latin",
[87] = "Revival",
[88] = "Celtic",
[89] = "Bluegrass",
[90] = "Avantgarde",
[91] = "Gothic Rock",
[92] = "Progressive Rock",
[93] = "Psychedelic Rock",
[94] = "Symphonic Rock",
[95] = "Slow Rock",
[96] = "Big Band",
[97] = "Chorus",
[98] = "Easy Listening",
[99] = "Acoustic",
[100] = "Humour",
[101] = "Speech",
[102] = "Chanson",
[103] = "Opera",
[104] = "Chamber Music",
[105] = "Sonata",
[106] = "Symphony",
[107] = "Booty Bass",
[108] = "Primus",
[109] = "Porn Groove",
[110] = "Satire",
[111] = "Slow Jam",
[112] = "Club",
[113] = "Tango",
[114] = "Samba",
[115] = "Folklore",
[116] = "Ballad",
[117] = "Power Ballad",
[118] = "Rhythmic Soul",
[119] = "Freestyle",
[120] = "Duet",
[121] = "Punk Rock",
[122] = "Drum Solo",
[123] = "A capella",
[124] = "Euro-House",
[125] = "Dance Hall",
[126] = "Goa",
[127] = "Drum & Bass",
[128] = "Club-House",
[129] = "Hardcore",
[130] = "Terror",
[131] = "Indie",
[132] = "BritPop",
[133] = "Negerpunk",
[134] = "Polsk Punk",
[135] = "Beat",
[136] = "Christian Gangsta",
[137] = "Heavy Metal",
[138] = "Black Metal",
[139] = "Crossover",
[140] = "Contemporary Christian",
[141] = "Christian Rock",
[142] = "Merengue",
[143] = "Salsa",
[144] = "Thrash Metal",
[145] = "Anime",
[146] = "JPop",
[147] = "SynthPop",
};
static void get_string(AVFormatContext *s, const char *key,
const uint8_t *buf, int buf_size)
{
int i, c;
char *q, str[512];
q = str;
for(i = 0; i < buf_size; i++) {
c = buf[i];
if (c == '\0')
break;
if ((q - str) >= sizeof(str) - 1)
break;
*q++ = c;
}
*q = '\0';
if (*str)
av_metadata_set2(&s->metadata, key, str, 0);
}
/**
* Parse an ID3v1 tag
*
* @param buf ID3v1_TAG_SIZE long buffer containing the tag
*/
static int parse_tag(AVFormatContext *s, const uint8_t *buf)
{
int genre;
if (!(buf[0] == 'T' &&
buf[1] == 'A' &&
buf[2] == 'G'))
return -1;
get_string(s, "title", buf + 3, 30);
get_string(s, "artist", buf + 33, 30);
get_string(s, "album", buf + 63, 30);
get_string(s, "date", buf + 93, 4);
get_string(s, "comment", buf + 97, 30);
if (buf[125] == 0 && buf[126] != 0)
av_metadata_set2(&s->metadata, "track", av_d2str(buf[126]), AV_METADATA_DONT_STRDUP_VAL);
genre = buf[127];
if (genre <= ID3v1_GENRE_MAX)
av_metadata_set2(&s->metadata, "genre", ff_id3v1_genre_str[genre], 0);
return 0;
}
void ff_id3v1_read(AVFormatContext *s)
{
int ret, filesize;
uint8_t buf[ID3v1_TAG_SIZE];
if (!url_is_streamed(s->pb)) {
/* XXX: change that */
filesize = url_fsize(s->pb);
if (filesize > 128) {
url_fseek(s->pb, filesize - 128, SEEK_SET);
ret = get_buffer(s->pb, buf, ID3v1_TAG_SIZE);
if (ret == ID3v1_TAG_SIZE) {
parse_tag(s, buf);
}
url_fseek(s->pb, 0, SEEK_SET);
}
}
}
| 123linslouis-android-video-cutter | jni/libavformat/id3v1.c | C | asf20 | 6,243 |
/*
* copyright (c) 2009 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_METADATA_H
#define AVFORMAT_METADATA_H
/**
* @file
* internal metadata API header
* see avformat.h or the public API!
*/
#include "avformat.h"
struct AVMetadata{
int count;
AVMetadataTag *elems;
};
struct AVMetadataConv{
const char *native;
const char *generic;
};
#if LIBAVFORMAT_VERSION_MAJOR < 53
void ff_metadata_demux_compat(AVFormatContext *s);
void ff_metadata_mux_compat(AVFormatContext *s);
#endif
void metadata_conv(AVMetadata **pm, const AVMetadataConv *d_conv,
const AVMetadataConv *s_conv);
#endif /* AVFORMAT_METADATA_H */
| 123linslouis-android-video-cutter | jni/libavformat/metadata.h | C | asf20 | 1,427 |
/*
* "NUT" Container Format demuxer
* Copyright (c) 2004-2006 Michael Niedermayer
* Copyright (c) 2003 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <strings.h>
#include "libavutil/avstring.h"
#include "libavutil/bswap.h"
#include "libavutil/tree.h"
#include "nut.h"
#undef NDEBUG
#include <assert.h>
static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
unsigned int len= ff_get_v(bc);
if(len && maxlen)
get_buffer(bc, string, FFMIN(len, maxlen));
while(len > maxlen){
get_byte(bc);
len--;
}
if(maxlen)
string[FFMIN(len, maxlen-1)]= 0;
if(maxlen == len)
return -1;
else
return 0;
}
static int64_t get_s(ByteIOContext *bc){
int64_t v = ff_get_v(bc) + 1;
if (v&1) return -(v>>1);
else return (v>>1);
}
static uint64_t get_fourcc(ByteIOContext *bc){
unsigned int len= ff_get_v(bc);
if (len==2) return get_le16(bc);
else if(len==4) return get_le32(bc);
else return -1;
}
#ifdef TRACE
static inline uint64_t get_v_trace(ByteIOContext *bc, char *file, char *func, int line){
uint64_t v= ff_get_v(bc);
av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
return v;
}
static inline int64_t get_s_trace(ByteIOContext *bc, char *file, char *func, int line){
int64_t v= get_s(bc);
av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
return v;
}
static inline uint64_t get_vb_trace(ByteIOContext *bc, char *file, char *func, int line){
uint64_t v= get_vb(bc);
av_log(NULL, AV_LOG_DEBUG, "get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
return v;
}
#define ff_get_v(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
#endif
static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_checksum, uint64_t startcode)
{
int64_t size;
// start= url_ftell(bc) - 8;
startcode= be2me_64(startcode);
startcode= ff_crc04C11DB7_update(0, &startcode, 8);
init_checksum(bc, ff_crc04C11DB7_update, startcode);
size= ff_get_v(bc);
if(size > 4096)
get_be32(bc);
if(get_checksum(bc) && size > 4096)
return -1;
init_checksum(bc, calculate_checksum ? ff_crc04C11DB7_update : NULL, 0);
return size;
}
static uint64_t find_any_startcode(ByteIOContext *bc, int64_t pos){
uint64_t state=0;
if(pos >= 0)
url_fseek(bc, pos, SEEK_SET); //note, this may fail if the stream is not seekable, but that should not matter, as in this case we simply start where we currently are
while(!url_feof(bc)){
state= (state<<8) | get_byte(bc);
if((state>>56) != 'N')
continue;
switch(state){
case MAIN_STARTCODE:
case STREAM_STARTCODE:
case SYNCPOINT_STARTCODE:
case INFO_STARTCODE:
case INDEX_STARTCODE:
return state;
}
}
return 0;
}
/**
* Find the given startcode.
* @param code the startcode
* @param pos the start position of the search, or -1 if the current position
* @return the position of the startcode or -1 if not found
*/
static int64_t find_startcode(ByteIOContext *bc, uint64_t code, int64_t pos){
for(;;){
uint64_t startcode= find_any_startcode(bc, pos);
if(startcode == code)
return url_ftell(bc) - 8;
else if(startcode == 0)
return -1;
pos=-1;
}
}
static int nut_probe(AVProbeData *p){
int i;
uint64_t code= 0;
for (i = 0; i < p->buf_size; i++) {
code = (code << 8) | p->buf[i];
if (code == MAIN_STARTCODE)
return AVPROBE_SCORE_MAX;
}
return 0;
}
#define GET_V(dst, check) \
tmp= ff_get_v(bc);\
if(!(check)){\
av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp);\
return -1;\
}\
dst= tmp;
static int skip_reserved(ByteIOContext *bc, int64_t pos){
pos -= url_ftell(bc);
if(pos<0){
url_fseek(bc, pos, SEEK_CUR);
return -1;
}else{
while(pos--)
get_byte(bc);
return 0;
}
}
static int decode_main_header(NUTContext *nut){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
uint64_t tmp, end;
unsigned int stream_count;
int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res, tmp_head_idx;
int64_t tmp_match;
end= get_packetheader(nut, bc, 1, MAIN_STARTCODE);
end += url_ftell(bc);
GET_V(tmp , tmp >=2 && tmp <= 3)
GET_V(stream_count , tmp > 0 && tmp <=MAX_STREAMS)
nut->max_distance = ff_get_v(bc);
if(nut->max_distance > 65536){
av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance);
nut->max_distance= 65536;
}
GET_V(nut->time_base_count, tmp>0 && tmp<INT_MAX / sizeof(AVRational))
nut->time_base= av_malloc(nut->time_base_count * sizeof(AVRational));
for(i=0; i<nut->time_base_count; i++){
GET_V(nut->time_base[i].num, tmp>0 && tmp<(1ULL<<31))
GET_V(nut->time_base[i].den, tmp>0 && tmp<(1ULL<<31))
if(av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1){
av_log(s, AV_LOG_ERROR, "time base invalid\n");
return -1;
}
}
tmp_pts=0;
tmp_mul=1;
tmp_stream=0;
tmp_match= 1-(1LL<<62);
tmp_head_idx= 0;
for(i=0; i<256;){
int tmp_flags = ff_get_v(bc);
int tmp_fields= ff_get_v(bc);
if(tmp_fields>0) tmp_pts = get_s(bc);
if(tmp_fields>1) tmp_mul = ff_get_v(bc);
if(tmp_fields>2) tmp_stream= ff_get_v(bc);
if(tmp_fields>3) tmp_size = ff_get_v(bc);
else tmp_size = 0;
if(tmp_fields>4) tmp_res = ff_get_v(bc);
else tmp_res = 0;
if(tmp_fields>5) count = ff_get_v(bc);
else count = tmp_mul - tmp_size;
if(tmp_fields>6) tmp_match = get_s(bc);
if(tmp_fields>7) tmp_head_idx= ff_get_v(bc);
while(tmp_fields-- > 8)
ff_get_v(bc);
if(count == 0 || i+count > 256){
av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
return -1;
}
if(tmp_stream >= stream_count){
av_log(s, AV_LOG_ERROR, "illegal stream number\n");
return -1;
}
for(j=0; j<count; j++,i++){
if (i == 'N') {
nut->frame_code[i].flags= FLAG_INVALID;
j--;
continue;
}
nut->frame_code[i].flags = tmp_flags ;
nut->frame_code[i].pts_delta = tmp_pts ;
nut->frame_code[i].stream_id = tmp_stream;
nut->frame_code[i].size_mul = tmp_mul ;
nut->frame_code[i].size_lsb = tmp_size+j;
nut->frame_code[i].reserved_count = tmp_res ;
nut->frame_code[i].header_idx = tmp_head_idx;
}
}
assert(nut->frame_code['N'].flags == FLAG_INVALID);
if(end > url_ftell(bc) + 4){
int rem= 1024;
GET_V(nut->header_count, tmp<128U)
nut->header_count++;
for(i=1; i<nut->header_count; i++){
GET_V(nut->header_len[i], tmp>0 && tmp<256);
rem -= nut->header_len[i];
if(rem < 0){
av_log(s, AV_LOG_ERROR, "invalid elision header\n");
return -1;
}
nut->header[i]= av_malloc(nut->header_len[i]);
get_buffer(bc, nut->header[i], nut->header_len[i]);
}
assert(nut->header_len[0]==0);
}
if(skip_reserved(bc, end) || get_checksum(bc)){
av_log(s, AV_LOG_ERROR, "main header checksum mismatch\n");
return -1;
}
nut->stream = av_mallocz(sizeof(StreamContext)*stream_count);
for(i=0; i<stream_count; i++){
av_new_stream(s, i);
}
return 0;
}
static int decode_stream_header(NUTContext *nut){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
StreamContext *stc;
int class, stream_id;
uint64_t tmp, end;
AVStream *st;
end= get_packetheader(nut, bc, 1, STREAM_STARTCODE);
end += url_ftell(bc);
GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base);
stc= &nut->stream[stream_id];
st = s->streams[stream_id];
if (!st)
return AVERROR(ENOMEM);
class = ff_get_v(bc);
tmp = get_fourcc(bc);
st->codec->codec_tag= tmp;
switch(class)
{
case 0:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tmp);
break;
case 1:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, tmp);
break;
case 2:
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp);
break;
case 3:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class);
return -1;
}
if(class<3 && st->codec->codec_id == CODEC_ID_NONE)
av_log(s, AV_LOG_ERROR, "Unknown codec tag '0x%04x' for stream number %d\n",
(unsigned int)tmp, stream_id);
GET_V(stc->time_base_id , tmp < nut->time_base_count);
GET_V(stc->msb_pts_shift , tmp < 16);
stc->max_pts_distance= ff_get_v(bc);
GET_V(stc->decode_delay , tmp < 1000); //sanity limit, raise this if Moore's law is true
st->codec->has_b_frames= stc->decode_delay;
ff_get_v(bc); //stream flags
GET_V(st->codec->extradata_size, tmp < (1<<30));
if(st->codec->extradata_size){
st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
GET_V(st->codec->width , tmp > 0)
GET_V(st->codec->height, tmp > 0)
st->sample_aspect_ratio.num= ff_get_v(bc);
st->sample_aspect_ratio.den= ff_get_v(bc);
if((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)){
av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
return -1;
}
ff_get_v(bc); /* csp type */
}else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO){
GET_V(st->codec->sample_rate , tmp > 0)
ff_get_v(bc); // samplerate_den
GET_V(st->codec->channels, tmp > 0)
}
if(skip_reserved(bc, end) || get_checksum(bc)){
av_log(s, AV_LOG_ERROR, "stream header %d checksum mismatch\n", stream_id);
return -1;
}
stc->time_base= &nut->time_base[stc->time_base_id];
av_set_pts_info(s->streams[stream_id], 63, stc->time_base->num, stc->time_base->den);
return 0;
}
static void set_disposition_bits(AVFormatContext* avf, char* value, int stream_id){
int flag = 0, i;
for (i=0; ff_nut_dispositions[i].flag; ++i) {
if (!strcmp(ff_nut_dispositions[i].str, value))
flag = ff_nut_dispositions[i].flag;
}
if (!flag)
av_log(avf, AV_LOG_INFO, "unknown disposition type '%s'\n", value);
for (i = 0; i < avf->nb_streams; ++i)
if (stream_id == i || stream_id == -1)
avf->streams[i]->disposition |= flag;
}
static int decode_info_header(NUTContext *nut){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
uint64_t tmp, chapter_start, chapter_len;
unsigned int stream_id_plus1, count;
int chapter_id, i;
int64_t value, end;
char name[256], str_value[1024], type_str[256];
const char *type;
AVChapter *chapter= NULL;
AVStream *st= NULL;
end= get_packetheader(nut, bc, 1, INFO_STARTCODE);
end += url_ftell(bc);
GET_V(stream_id_plus1, tmp <= s->nb_streams)
chapter_id = get_s(bc);
chapter_start= ff_get_v(bc);
chapter_len = ff_get_v(bc);
count = ff_get_v(bc);
if(chapter_id && !stream_id_plus1){
int64_t start= chapter_start / nut->time_base_count;
chapter= ff_new_chapter(s, chapter_id,
nut->time_base[chapter_start % nut->time_base_count],
start, start + chapter_len, NULL);
} else if(stream_id_plus1)
st= s->streams[stream_id_plus1 - 1];
for(i=0; i<count; i++){
get_str(bc, name, sizeof(name));
value= get_s(bc);
if(value == -1){
type= "UTF-8";
get_str(bc, str_value, sizeof(str_value));
}else if(value == -2){
get_str(bc, type_str, sizeof(type_str));
type= type_str;
get_str(bc, str_value, sizeof(str_value));
}else if(value == -3){
type= "s";
value= get_s(bc);
}else if(value == -4){
type= "t";
value= ff_get_v(bc);
}else if(value < -4){
type= "r";
get_s(bc);
}else{
type= "v";
}
if (stream_id_plus1 > s->nb_streams) {
av_log(s, AV_LOG_ERROR, "invalid stream id for info packet\n");
continue;
}
if(!strcmp(type, "UTF-8")){
AVMetadata **metadata = NULL;
if(chapter_id==0 && !strcmp(name, "Disposition"))
set_disposition_bits(s, str_value, stream_id_plus1 - 1);
else if(chapter) metadata= &chapter->metadata;
else if(stream_id_plus1) metadata= &st->metadata;
else metadata= &s->metadata;
if(metadata && strcasecmp(name,"Uses")
&& strcasecmp(name,"Depends") && strcasecmp(name,"Replaces"))
av_metadata_set2(metadata, name, str_value, 0);
}
}
if(skip_reserved(bc, end) || get_checksum(bc)){
av_log(s, AV_LOG_ERROR, "info header checksum mismatch\n");
return -1;
}
return 0;
}
static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
int64_t end, tmp;
nut->last_syncpoint_pos= url_ftell(bc)-8;
end= get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE);
end += url_ftell(bc);
tmp= ff_get_v(bc);
*back_ptr= nut->last_syncpoint_pos - 16*ff_get_v(bc);
if(*back_ptr < 0)
return -1;
ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count], tmp / nut->time_base_count);
if(skip_reserved(bc, end) || get_checksum(bc)){
av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n");
return -1;
}
*ts= tmp / s->nb_streams * av_q2d(nut->time_base[tmp % s->nb_streams])*AV_TIME_BASE;
ff_nut_add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts);
return 0;
}
static int find_and_decode_index(NUTContext *nut){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
uint64_t tmp, end;
int i, j, syncpoint_count;
int64_t filesize= url_fsize(bc);
int64_t *syncpoints;
int8_t *has_keyframe;
int ret= -1;
url_fseek(bc, filesize-12, SEEK_SET);
url_fseek(bc, filesize-get_be64(bc), SEEK_SET);
if(get_be64(bc) != INDEX_STARTCODE){
av_log(s, AV_LOG_ERROR, "no index at the end\n");
return -1;
}
end= get_packetheader(nut, bc, 1, INDEX_STARTCODE);
end += url_ftell(bc);
ff_get_v(bc); //max_pts
GET_V(syncpoint_count, tmp < INT_MAX/8 && tmp > 0)
syncpoints= av_malloc(sizeof(int64_t)*syncpoint_count);
has_keyframe= av_malloc(sizeof(int8_t)*(syncpoint_count+1));
for(i=0; i<syncpoint_count; i++){
syncpoints[i] = ff_get_v(bc);
if(syncpoints[i] <= 0)
goto fail;
if(i)
syncpoints[i] += syncpoints[i-1];
}
for(i=0; i<s->nb_streams; i++){
int64_t last_pts= -1;
for(j=0; j<syncpoint_count;){
uint64_t x= ff_get_v(bc);
int type= x&1;
int n= j;
x>>=1;
if(type){
int flag= x&1;
x>>=1;
if(n+x >= syncpoint_count + 1){
av_log(s, AV_LOG_ERROR, "index overflow A\n");
goto fail;
}
while(x--)
has_keyframe[n++]= flag;
has_keyframe[n++]= !flag;
}else{
while(x != 1){
if(n>=syncpoint_count + 1){
av_log(s, AV_LOG_ERROR, "index overflow B\n");
goto fail;
}
has_keyframe[n++]= x&1;
x>>=1;
}
}
if(has_keyframe[0]){
av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n");
goto fail;
}
assert(n<=syncpoint_count+1);
for(; j<n && j<syncpoint_count; j++){
if(has_keyframe[j]){
uint64_t B, A= ff_get_v(bc);
if(!A){
A= ff_get_v(bc);
B= ff_get_v(bc);
//eor_pts[j][i] = last_pts + A + B
}else
B= 0;
av_add_index_entry(
s->streams[i],
16*syncpoints[j-1],
last_pts + A,
0,
0,
AVINDEX_KEYFRAME);
last_pts += A + B;
}
}
}
}
if(skip_reserved(bc, end) || get_checksum(bc)){
av_log(s, AV_LOG_ERROR, "index checksum mismatch\n");
goto fail;
}
ret= 0;
fail:
av_free(syncpoints);
av_free(has_keyframe);
return ret;
}
static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
NUTContext *nut = s->priv_data;
ByteIOContext *bc = s->pb;
int64_t pos;
int initialized_stream_count;
nut->avf= s;
/* main header */
pos=0;
do{
pos= find_startcode(bc, MAIN_STARTCODE, pos)+1;
if (pos<0+1){
av_log(s, AV_LOG_ERROR, "No main startcode found.\n");
return -1;
}
}while(decode_main_header(nut) < 0);
/* stream headers */
pos=0;
for(initialized_stream_count=0; initialized_stream_count < s->nb_streams;){
pos= find_startcode(bc, STREAM_STARTCODE, pos)+1;
if (pos<0+1){
av_log(s, AV_LOG_ERROR, "Not all stream headers found.\n");
return -1;
}
if(decode_stream_header(nut) >= 0)
initialized_stream_count++;
}
/* info headers */
pos=0;
for(;;){
uint64_t startcode= find_any_startcode(bc, pos);
pos= url_ftell(bc);
if(startcode==0){
av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
return -1;
}else if(startcode == SYNCPOINT_STARTCODE){
nut->next_startcode= startcode;
break;
}else if(startcode != INFO_STARTCODE){
continue;
}
decode_info_header(nut);
}
s->data_offset= pos-8;
if(!url_is_streamed(bc)){
int64_t orig_pos= url_ftell(bc);
find_and_decode_index(nut);
url_fseek(bc, orig_pos, SEEK_SET);
}
assert(nut->next_startcode == SYNCPOINT_STARTCODE);
return 0;
}
static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, uint8_t *header_idx, int frame_code){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
StreamContext *stc;
int size, flags, size_mul, pts_delta, i, reserved_count;
uint64_t tmp;
if(url_ftell(bc) > nut->last_syncpoint_pos + nut->max_distance){
av_log(s, AV_LOG_ERROR, "Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n", url_ftell(bc), nut->last_syncpoint_pos, nut->max_distance);
return -1;
}
flags = nut->frame_code[frame_code].flags;
size_mul = nut->frame_code[frame_code].size_mul;
size = nut->frame_code[frame_code].size_lsb;
*stream_id = nut->frame_code[frame_code].stream_id;
pts_delta = nut->frame_code[frame_code].pts_delta;
reserved_count = nut->frame_code[frame_code].reserved_count;
*header_idx = nut->frame_code[frame_code].header_idx;
if(flags & FLAG_INVALID)
return -1;
if(flags & FLAG_CODED)
flags ^= ff_get_v(bc);
if(flags & FLAG_STREAM_ID){
GET_V(*stream_id, tmp < s->nb_streams)
}
stc= &nut->stream[*stream_id];
if(flags&FLAG_CODED_PTS){
int coded_pts= ff_get_v(bc);
//FIXME check last_pts validity?
if(coded_pts < (1<<stc->msb_pts_shift)){
*pts=ff_lsb2full(stc, coded_pts);
}else
*pts=coded_pts - (1<<stc->msb_pts_shift);
}else
*pts= stc->last_pts + pts_delta;
if(flags&FLAG_SIZE_MSB){
size += size_mul*ff_get_v(bc);
}
if(flags&FLAG_MATCH_TIME)
get_s(bc);
if(flags&FLAG_HEADER_IDX)
*header_idx= ff_get_v(bc);
if(flags&FLAG_RESERVED)
reserved_count= ff_get_v(bc);
for(i=0; i<reserved_count; i++)
ff_get_v(bc);
if(*header_idx >= (unsigned)nut->header_count){
av_log(s, AV_LOG_ERROR, "header_idx invalid\n");
return -1;
}
if(size > 4096)
*header_idx=0;
size -= nut->header_len[*header_idx];
if(flags&FLAG_CHECKSUM){
get_be32(bc); //FIXME check this
}else if(size > 2*nut->max_distance || FFABS(stc->last_pts - *pts) > stc->max_pts_distance){
av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n");
return -1;
}
stc->last_pts= *pts;
stc->last_flags= flags;
return size;
}
static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code){
AVFormatContext *s= nut->avf;
ByteIOContext *bc = s->pb;
int size, stream_id, discard;
int64_t pts, last_IP_pts;
StreamContext *stc;
uint8_t header_idx;
size= decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code);
if(size < 0)
return -1;
stc= &nut->stream[stream_id];
if (stc->last_flags & FLAG_KEY)
stc->skip_until_key_frame=0;
discard= s->streams[ stream_id ]->discard;
last_IP_pts= s->streams[ stream_id ]->last_IP_pts;
if( (discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY))
||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts)
|| discard >= AVDISCARD_ALL
|| stc->skip_until_key_frame){
url_fskip(bc, size);
return 1;
}
av_new_packet(pkt, size + nut->header_len[header_idx]);
memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]);
pkt->pos= url_ftell(bc); //FIXME
get_buffer(bc, pkt->data + nut->header_len[header_idx], size);
pkt->stream_index = stream_id;
if (stc->last_flags & FLAG_KEY)
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pts = pts;
return 0;
}
static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
{
NUTContext *nut = s->priv_data;
ByteIOContext *bc = s->pb;
int i, frame_code=0, ret, skip;
int64_t ts, back_ptr;
for(;;){
int64_t pos= url_ftell(bc);
uint64_t tmp= nut->next_startcode;
nut->next_startcode=0;
if(tmp){
pos-=8;
}else{
frame_code = get_byte(bc);
if(url_feof(bc))
return -1;
if(frame_code == 'N'){
tmp= frame_code;
for(i=1; i<8; i++)
tmp = (tmp<<8) + get_byte(bc);
}
}
switch(tmp){
case MAIN_STARTCODE:
case STREAM_STARTCODE:
case INDEX_STARTCODE:
skip= get_packetheader(nut, bc, 0, tmp);
url_fseek(bc, skip, SEEK_CUR);
break;
case INFO_STARTCODE:
if(decode_info_header(nut)<0)
goto resync;
break;
case SYNCPOINT_STARTCODE:
if(decode_syncpoint(nut, &ts, &back_ptr)<0)
goto resync;
frame_code = get_byte(bc);
case 0:
ret= decode_frame(nut, pkt, frame_code);
if(ret==0)
return 0;
else if(ret==1) //ok but discard packet
break;
default:
resync:
av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos);
tmp= find_any_startcode(bc, nut->last_syncpoint_pos+1);
if(tmp==0)
return -1;
av_log(s, AV_LOG_DEBUG, "sync\n");
nut->next_startcode= tmp;
}
}
}
static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){
NUTContext *nut = s->priv_data;
ByteIOContext *bc = s->pb;
int64_t pos, pts, back_ptr;
av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit);
pos= *pos_arg;
do{
pos= find_startcode(bc, SYNCPOINT_STARTCODE, pos)+1;
if(pos < 1){
assert(nut->next_startcode == 0);
av_log(s, AV_LOG_ERROR, "read_timestamp failed.\n");
return AV_NOPTS_VALUE;
}
}while(decode_syncpoint(nut, &pts, &back_ptr) < 0);
*pos_arg = pos-1;
assert(nut->last_syncpoint_pos == *pos_arg);
av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts,back_ptr );
if (stream_index == -1) return pts;
else if(stream_index == -2) return back_ptr;
assert(0);
}
static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags){
NUTContext *nut = s->priv_data;
AVStream *st= s->streams[stream_index];
Syncpoint dummy={.ts= pts*av_q2d(st->time_base)*AV_TIME_BASE};
Syncpoint nopts_sp= {.ts= AV_NOPTS_VALUE, .back_ptr= AV_NOPTS_VALUE};
Syncpoint *sp, *next_node[2]= {&nopts_sp, &nopts_sp};
int64_t pos, pos2, ts;
int i;
if(st->index_entries){
int index= av_index_search_timestamp(st, pts, flags);
if(index<0)
return -1;
pos2= st->index_entries[index].pos;
ts = st->index_entries[index].timestamp;
}else{
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pts_cmp,
(void **) next_node);
av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", next_node[0]->pos, next_node[1]->pos,
next_node[0]->ts , next_node[1]->ts);
pos= av_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
if(!(flags & AVSEEK_FLAG_BACKWARD)){
dummy.pos= pos+16;
next_node[1]= &nopts_sp;
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
(void **) next_node);
pos2= av_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
if(pos2>=0)
pos= pos2;
//FIXME dir but I think it does not matter
}
dummy.pos= pos;
sp= av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
NULL);
assert(sp);
pos2= sp->back_ptr - 15;
}
av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2);
pos= find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2);
url_fseek(s->pb, pos, SEEK_SET);
av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos);
if(pos2 > pos || pos2 + 15 < pos){
av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n");
}
for(i=0; i<s->nb_streams; i++)
nut->stream[i].skip_until_key_frame=1;
return 0;
}
static int nut_read_close(AVFormatContext *s)
{
NUTContext *nut = s->priv_data;
int i;
av_freep(&nut->time_base);
av_freep(&nut->stream);
ff_nut_free_sp(nut);
for(i = 1; i < nut->header_count; i++)
av_freep(&nut->header[i]);
return 0;
}
#if CONFIG_NUT_DEMUXER
AVInputFormat nut_demuxer = {
"nut",
NULL_IF_CONFIG_SMALL("NUT format"),
sizeof(NUTContext),
nut_probe,
nut_read_header,
nut_read_packet,
nut_read_close,
read_seek,
.extensions = "nut",
.metadata_conv = ff_nut_metadata_conv,
};
#endif
| 123linslouis-android-video-cutter | jni/libavformat/nutdec.c | C | asf20 | 29,707 |
/*
* RTP network protocol
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* RTP protocol
*/
#include "libavutil/avstring.h"
#include "avformat.h"
#include "rtpdec.h"
#include <unistd.h>
#include <stdarg.h>
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include <fcntl.h>
#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#include <sys/time.h>
#define RTP_TX_BUF_SIZE (64 * 1024)
#define RTP_RX_BUF_SIZE (128 * 1024)
typedef struct RTPContext {
URLContext *rtp_hd, *rtcp_hd;
int rtp_fd, rtcp_fd;
} RTPContext;
/**
* If no filename is given to av_open_input_file because you want to
* get the local port first, then you must call this function to set
* the remote server address.
*
* @param s1 media file context
* @param uri of the remote server
* @return zero if no error.
*/
int rtp_set_remote_url(URLContext *h, const char *uri)
{
RTPContext *s = h->priv_data;
char hostname[256];
int port;
char buf[1024];
char path[1024];
ff_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
path, sizeof(path), uri);
ff_url_join(buf, sizeof(buf), "udp", NULL, hostname, port, "%s", path);
udp_set_remote_url(s->rtp_hd, buf);
ff_url_join(buf, sizeof(buf), "udp", NULL, hostname, port + 1, "%s", path);
udp_set_remote_url(s->rtcp_hd, buf);
return 0;
}
/**
* add option to url of the form:
* "http://host:port/path?option1=val1&option2=val2...
*/
static void url_add_option(char *buf, int buf_size, const char *fmt, ...)
{
char buf1[1024];
va_list ap;
va_start(ap, fmt);
if (strchr(buf, '?'))
av_strlcat(buf, "&", buf_size);
else
av_strlcat(buf, "?", buf_size);
vsnprintf(buf1, sizeof(buf1), fmt, ap);
av_strlcat(buf, buf1, buf_size);
va_end(ap);
}
static void build_udp_url(char *buf, int buf_size,
const char *hostname, int port,
int local_port, int ttl,
int max_packet_size)
{
ff_url_join(buf, buf_size, "udp", NULL, hostname, port, NULL);
if (local_port >= 0)
url_add_option(buf, buf_size, "localport=%d", local_port);
if (ttl >= 0)
url_add_option(buf, buf_size, "ttl=%d", ttl);
if (max_packet_size >=0)
url_add_option(buf, buf_size, "pkt_size=%d", max_packet_size);
}
/**
* url syntax: rtp://host:port[?option=val...]
* option: 'ttl=n' : set the ttl value (for multicast only)
* 'rtcpport=n' : set the remote rtcp port to n
* 'localrtpport=n' : set the local rtp port to n
* 'localrtcpport=n' : set the local rtcp port to n
* 'pkt_size=n' : set max packet size
* deprecated option:
* 'localport=n' : set the local port to n
*
* if rtcpport isn't set the rtcp port will be the rtp port + 1
* if local rtp port isn't set any available port will be used for the local
* rtp and rtcp ports
* if the local rtcp port is not set it will be the local rtp port + 1
*/
static int rtp_open(URLContext *h, const char *uri, int flags)
{
RTPContext *s;
int rtp_port, rtcp_port,
is_output, ttl,
local_rtp_port, local_rtcp_port, max_packet_size;
char hostname[256];
char buf[1024];
char path[1024];
const char *p;
is_output = (flags & URL_WRONLY);
s = av_mallocz(sizeof(RTPContext));
if (!s)
return AVERROR(ENOMEM);
h->priv_data = s;
ff_url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &rtp_port,
path, sizeof(path), uri);
/* extract parameters */
ttl = -1;
rtcp_port = rtp_port+1;
local_rtp_port = -1;
local_rtcp_port = -1;
max_packet_size = -1;
p = strchr(uri, '?');
if (p) {
if (find_info_tag(buf, sizeof(buf), "ttl", p)) {
ttl = strtol(buf, NULL, 10);
}
if (find_info_tag(buf, sizeof(buf), "rtcpport", p)) {
rtcp_port = strtol(buf, NULL, 10);
}
if (find_info_tag(buf, sizeof(buf), "localport", p)) {
local_rtp_port = strtol(buf, NULL, 10);
}
if (find_info_tag(buf, sizeof(buf), "localrtpport", p)) {
local_rtp_port = strtol(buf, NULL, 10);
}
if (find_info_tag(buf, sizeof(buf), "localrtcpport", p)) {
local_rtcp_port = strtol(buf, NULL, 10);
}
if (find_info_tag(buf, sizeof(buf), "pkt_size", p)) {
max_packet_size = strtol(buf, NULL, 10);
}
}
build_udp_url(buf, sizeof(buf),
hostname, rtp_port, local_rtp_port, ttl, max_packet_size);
if (url_open(&s->rtp_hd, buf, flags) < 0)
goto fail;
if (local_rtp_port>=0 && local_rtcp_port<0)
local_rtcp_port = udp_get_local_port(s->rtp_hd) + 1;
build_udp_url(buf, sizeof(buf),
hostname, rtcp_port, local_rtcp_port, ttl, max_packet_size);
if (url_open(&s->rtcp_hd, buf, flags) < 0)
goto fail;
/* just to ease handle access. XXX: need to suppress direct handle
access */
s->rtp_fd = url_get_file_handle(s->rtp_hd);
s->rtcp_fd = url_get_file_handle(s->rtcp_hd);
h->max_packet_size = url_get_max_packet_size(s->rtp_hd);
h->is_streamed = 1;
return 0;
fail:
if (s->rtp_hd)
url_close(s->rtp_hd);
if (s->rtcp_hd)
url_close(s->rtcp_hd);
av_free(s);
return AVERROR(EIO);
}
static int rtp_read(URLContext *h, uint8_t *buf, int size)
{
RTPContext *s = h->priv_data;
struct sockaddr_in from;
socklen_t from_len;
int len, fd_max, n;
fd_set rfds;
struct timeval tv;
#if 0
for(;;) {
from_len = sizeof(from);
len = recvfrom (s->rtp_fd, buf, size, 0,
(struct sockaddr *)&from, &from_len);
if (len < 0) {
if (ff_neterrno() == FF_NETERROR(EAGAIN) ||
ff_neterrno() == FF_NETERROR(EINTR))
continue;
return AVERROR(EIO);
}
break;
}
#else
for(;;) {
if (url_interrupt_cb())
return AVERROR(EINTR);
/* build fdset to listen to RTP and RTCP packets */
FD_ZERO(&rfds);
fd_max = s->rtp_fd;
FD_SET(s->rtp_fd, &rfds);
if (s->rtcp_fd > fd_max)
fd_max = s->rtcp_fd;
FD_SET(s->rtcp_fd, &rfds);
tv.tv_sec = 0;
tv.tv_usec = 100 * 1000;
n = select(fd_max + 1, &rfds, NULL, NULL, &tv);
if (n > 0) {
/* first try RTCP */
if (FD_ISSET(s->rtcp_fd, &rfds)) {
from_len = sizeof(from);
len = recvfrom (s->rtcp_fd, buf, size, 0,
(struct sockaddr *)&from, &from_len);
if (len < 0) {
if (ff_neterrno() == FF_NETERROR(EAGAIN) ||
ff_neterrno() == FF_NETERROR(EINTR))
continue;
return AVERROR(EIO);
}
break;
}
/* then RTP */
if (FD_ISSET(s->rtp_fd, &rfds)) {
from_len = sizeof(from);
len = recvfrom (s->rtp_fd, buf, size, 0,
(struct sockaddr *)&from, &from_len);
if (len < 0) {
if (ff_neterrno() == FF_NETERROR(EAGAIN) ||
ff_neterrno() == FF_NETERROR(EINTR))
continue;
return AVERROR(EIO);
}
break;
}
} else if (n < 0) {
if (ff_neterrno() == FF_NETERROR(EINTR))
continue;
return AVERROR(EIO);
}
}
#endif
return len;
}
static int rtp_write(URLContext *h, uint8_t *buf, int size)
{
RTPContext *s = h->priv_data;
int ret;
URLContext *hd;
if (buf[1] >= 200 && buf[1] <= 204) {
/* RTCP payload type */
hd = s->rtcp_hd;
} else {
/* RTP payload type */
hd = s->rtp_hd;
}
ret = url_write(hd, buf, size);
#if 0
{
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 10 * 1000000;
nanosleep(&ts, NULL);
}
#endif
return ret;
}
static int rtp_close(URLContext *h)
{
RTPContext *s = h->priv_data;
url_close(s->rtp_hd);
url_close(s->rtcp_hd);
av_free(s);
return 0;
}
/**
* Return the local rtp port used by the RTP connection
* @param s1 media file context
* @return the local port number
*/
int rtp_get_local_rtp_port(URLContext *h)
{
RTPContext *s = h->priv_data;
return udp_get_local_port(s->rtp_hd);
}
/**
* Return the local rtp port used by the RTP connection
* @param s1 media file context
* @return the local port number
*/
int rtp_get_local_port(URLContext *h)
{
RTPContext *s = h->priv_data;
return udp_get_local_port(s->rtp_hd);
}
/**
* Return the local rtcp port used by the RTP connection
* @param s1 media file context
* @return the local port number
*/
int rtp_get_local_rtcp_port(URLContext *h)
{
RTPContext *s = h->priv_data;
return udp_get_local_port(s->rtcp_hd);
}
#if (LIBAVFORMAT_VERSION_MAJOR <= 52)
/**
* Return the rtp and rtcp file handles for select() usage to wait for
* several RTP streams at the same time.
* @param h media file context
*/
void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd)
{
RTPContext *s = h->priv_data;
*prtp_fd = s->rtp_fd;
*prtcp_fd = s->rtcp_fd;
}
#endif
static int rtp_get_file_handle(URLContext *h)
{
RTPContext *s = h->priv_data;
return s->rtp_fd;
}
URLProtocol rtp_protocol = {
"rtp",
rtp_open,
rtp_read,
rtp_write,
NULL, /* seek */
rtp_close,
.url_get_file_handle = rtp_get_file_handle,
};
| 123linslouis-android-video-cutter | jni/libavformat/rtpproto.c | C | asf20 | 10,551 |
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_AVI_H
#define AVFORMAT_AVI_H
#include "metadata.h"
#define AVIF_HASINDEX 0x00000010 // Index at end of file?
#define AVIF_MUSTUSEINDEX 0x00000020
#define AVIF_ISINTERLEAVED 0x00000100
#define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames?
#define AVIF_WASCAPTUREFILE 0x00010000
#define AVIF_COPYRIGHTED 0x00020000
#define AVI_MAX_RIFF_SIZE 0x40000000LL
#define AVI_MASTER_INDEX_SIZE 256
/* index flags */
#define AVIIF_INDEX 0x10
extern const AVMetadataConv ff_avi_metadata_conv[];
/**
* A list of AVI info tags.
*/
extern const char ff_avi_tags[][5];
#endif /* AVFORMAT_AVI_H */
| 123linslouis-android-video-cutter | jni/libavformat/avi.h | C | asf20 | 1,499 |
/**
Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
**/
#ifndef AVFORMAT_OGGDEC_H
#define AVFORMAT_OGGDEC_H
#include "avformat.h"
#include "metadata.h"
struct ogg_codec {
const int8_t *magic;
uint8_t magicsize;
const int8_t *name;
/**
* Attempt to process a packet as a header
* @return 1 if the packet was a valid header,
* 0 if the packet was not a header (was a data packet)
* -1 if an error occurred or for unsupported stream
*/
int (*header)(AVFormatContext *, int);
int (*packet)(AVFormatContext *, int);
/**
* Translate a granule into a timestamp.
* Will set dts if non-null and known.
* @return pts
*/
uint64_t (*gptopts)(AVFormatContext *, int, uint64_t, int64_t *dts);
/**
* 1 if granule is the start time of the associated packet.
* 0 if granule is the end time of the associated packet.
*/
int granule_is_start;
};
struct ogg_stream {
uint8_t *buf;
unsigned int bufsize;
unsigned int bufpos;
unsigned int pstart;
unsigned int psize;
unsigned int pflags;
unsigned int pduration;
uint32_t serial;
uint64_t granule;
int64_t lastpts;
int64_t lastdts;
int64_t sync_pos; ///< file offset of the first page needed to reconstruct the current packet
int64_t page_pos; ///< file offset of the current page
int flags;
const struct ogg_codec *codec;
int header;
int nsegs, segp;
uint8_t segments[255];
int incomplete; ///< whether we're expecting a continuation in the next page
int page_end; ///< current packet is the last one completed in the page
int keyframe_seek;
void *private;
};
struct ogg_state {
uint64_t pos;
int curidx;
struct ogg_state *next;
int nstreams;
struct ogg_stream streams[1];
};
struct ogg {
struct ogg_stream *streams;
int nstreams;
int headers;
int curidx;
struct ogg_state *state;
};
#define OGG_FLAG_CONT 1
#define OGG_FLAG_BOS 2
#define OGG_FLAG_EOS 4
extern const struct ogg_codec ff_dirac_codec;
extern const struct ogg_codec ff_flac_codec;
extern const struct ogg_codec ff_ogm_audio_codec;
extern const struct ogg_codec ff_ogm_old_codec;
extern const struct ogg_codec ff_ogm_text_codec;
extern const struct ogg_codec ff_ogm_video_codec;
extern const struct ogg_codec ff_old_dirac_codec;
extern const struct ogg_codec ff_old_flac_codec;
extern const struct ogg_codec ff_skeleton_codec;
extern const struct ogg_codec ff_speex_codec;
extern const struct ogg_codec ff_theora_codec;
extern const struct ogg_codec ff_vorbis_codec;
int ff_vorbis_comment(AVFormatContext *ms, AVMetadata **m, const uint8_t *buf, int size);
static inline int
ogg_find_stream (struct ogg * ogg, int serial)
{
int i;
for (i = 0; i < ogg->nstreams; i++)
if (ogg->streams[i].serial == serial)
return i;
return -1;
}
static inline uint64_t
ogg_gptopts (AVFormatContext * s, int i, uint64_t gp, int64_t *dts)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + i;
uint64_t pts = AV_NOPTS_VALUE;
if(os->codec && os->codec->gptopts){
pts = os->codec->gptopts(s, i, gp, dts);
} else {
pts = gp;
if (dts)
*dts = pts;
}
return pts;
}
#endif /* AVFORMAT_OGGDEC_H */
| 123linslouis-android-video-cutter | jni/libavformat/oggdec.h | C | asf20 | 4,447 |
/*
* Flash Compatible Streaming Format muxer
* Copyright (c) 2000 Fabrice Bellard
* Copyright (c) 2003 Tinic Uro
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/put_bits.h"
#include "avformat.h"
#include "swf.h"
static void put_swf_tag(AVFormatContext *s, int tag)
{
SWFContext *swf = s->priv_data;
ByteIOContext *pb = s->pb;
swf->tag_pos = url_ftell(pb);
swf->tag = tag;
/* reserve some room for the tag */
if (tag & TAG_LONG) {
put_le16(pb, 0);
put_le32(pb, 0);
} else {
put_le16(pb, 0);
}
}
static void put_swf_end_tag(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos;
int tag_len, tag;
pos = url_ftell(pb);
tag_len = pos - swf->tag_pos - 2;
tag = swf->tag;
url_fseek(pb, swf->tag_pos, SEEK_SET);
if (tag & TAG_LONG) {
tag &= ~TAG_LONG;
put_le16(pb, (tag << 6) | 0x3f);
put_le32(pb, tag_len - 4);
} else {
assert(tag_len < 0x3f);
put_le16(pb, (tag << 6) | tag_len);
}
url_fseek(pb, pos, SEEK_SET);
}
static inline void max_nbits(int *nbits_ptr, int val)
{
int n;
if (val == 0)
return;
val = abs(val);
n = 1;
while (val != 0) {
n++;
val >>= 1;
}
if (n > *nbits_ptr)
*nbits_ptr = n;
}
static void put_swf_rect(ByteIOContext *pb,
int xmin, int xmax, int ymin, int ymax)
{
PutBitContext p;
uint8_t buf[256];
int nbits, mask;
init_put_bits(&p, buf, sizeof(buf));
nbits = 0;
max_nbits(&nbits, xmin);
max_nbits(&nbits, xmax);
max_nbits(&nbits, ymin);
max_nbits(&nbits, ymax);
mask = (1 << nbits) - 1;
/* rectangle info */
put_bits(&p, 5, nbits);
put_bits(&p, nbits, xmin & mask);
put_bits(&p, nbits, xmax & mask);
put_bits(&p, nbits, ymin & mask);
put_bits(&p, nbits, ymax & mask);
flush_put_bits(&p);
put_buffer(pb, buf, put_bits_ptr(&p) - p.buf);
}
static void put_swf_line_edge(PutBitContext *pb, int dx, int dy)
{
int nbits, mask;
put_bits(pb, 1, 1); /* edge */
put_bits(pb, 1, 1); /* line select */
nbits = 2;
max_nbits(&nbits, dx);
max_nbits(&nbits, dy);
mask = (1 << nbits) - 1;
put_bits(pb, 4, nbits - 2); /* 16 bits precision */
if (dx == 0) {
put_bits(pb, 1, 0);
put_bits(pb, 1, 1);
put_bits(pb, nbits, dy & mask);
} else if (dy == 0) {
put_bits(pb, 1, 0);
put_bits(pb, 1, 0);
put_bits(pb, nbits, dx & mask);
} else {
put_bits(pb, 1, 1);
put_bits(pb, nbits, dx & mask);
put_bits(pb, nbits, dy & mask);
}
}
#define FRAC_BITS 16
static void put_swf_matrix(ByteIOContext *pb,
int a, int b, int c, int d, int tx, int ty)
{
PutBitContext p;
uint8_t buf[256];
int nbits;
init_put_bits(&p, buf, sizeof(buf));
put_bits(&p, 1, 1); /* a, d present */
nbits = 1;
max_nbits(&nbits, a);
max_nbits(&nbits, d);
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, a);
put_bits(&p, nbits, d);
put_bits(&p, 1, 1); /* b, c present */
nbits = 1;
max_nbits(&nbits, c);
max_nbits(&nbits, b);
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, c);
put_bits(&p, nbits, b);
nbits = 1;
max_nbits(&nbits, tx);
max_nbits(&nbits, ty);
put_bits(&p, 5, nbits); /* nb bits */
put_bits(&p, nbits, tx);
put_bits(&p, nbits, ty);
flush_put_bits(&p);
put_buffer(pb, buf, put_bits_ptr(&p) - p.buf);
}
static int swf_write_header(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
ByteIOContext *pb = s->pb;
PutBitContext p;
uint8_t buf1[256];
int i, width, height, rate, rate_base;
int version;
swf->sound_samples = 0;
swf->swf_frame_number = 0;
swf->video_frame_number = 0;
for(i=0;i<s->nb_streams;i++) {
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
if (enc->codec_id == CODEC_ID_MP3) {
if (!enc->frame_size) {
av_log(s, AV_LOG_ERROR, "audio frame size not set\n");
return -1;
}
swf->audio_enc = enc;
swf->audio_fifo= av_fifo_alloc(AUDIO_FIFO_SIZE);
if (!swf->audio_fifo)
return AVERROR(ENOMEM);
} else {
av_log(s, AV_LOG_ERROR, "SWF muxer only supports MP3\n");
return -1;
}
} else {
if (enc->codec_id == CODEC_ID_VP6F ||
enc->codec_id == CODEC_ID_FLV1 ||
enc->codec_id == CODEC_ID_MJPEG) {
swf->video_enc = enc;
} else {
av_log(s, AV_LOG_ERROR, "SWF muxer only supports VP6, FLV1 and MJPEG\n");
return -1;
}
}
}
if (!swf->video_enc) {
/* currently, cannot work correctly if audio only */
width = 320;
height = 200;
rate = 10;
rate_base= 1;
} else {
width = swf->video_enc->width;
height = swf->video_enc->height;
rate = swf->video_enc->time_base.den;
rate_base = swf->video_enc->time_base.num;
}
if (!swf->audio_enc)
swf->samples_per_frame = (44100. * rate_base) / rate;
else
swf->samples_per_frame = (swf->audio_enc->sample_rate * rate_base) / rate;
put_tag(pb, "FWS");
if (!strcmp("avm2", s->oformat->name))
version = 9;
else if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_VP6F)
version = 8; /* version 8 and above support VP6 codec */
else if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_FLV1)
version = 6; /* version 6 and above support FLV1 codec */
else
version = 4; /* version 4 for mpeg audio support */
put_byte(pb, version);
put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
(will be patched if not streamed) */
put_swf_rect(pb, 0, width * 20, 0, height * 20);
put_le16(pb, (rate * 256) / rate_base); /* frame rate */
swf->duration_pos = url_ftell(pb);
put_le16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */
/* avm2/swf v9 (also v8?) files require a file attribute tag */
if (version == 9) {
put_swf_tag(s, TAG_FILEATTRIBUTES);
put_le32(pb, 1<<3); /* set ActionScript v3/AVM2 flag */
put_swf_end_tag(s);
}
/* define a shape with the jpeg inside */
if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_MJPEG) {
put_swf_tag(s, TAG_DEFINESHAPE);
put_le16(pb, SHAPE_ID); /* ID of shape */
/* bounding rectangle */
put_swf_rect(pb, 0, width, 0, height);
/* style info */
put_byte(pb, 1); /* one fill style */
put_byte(pb, 0x41); /* clipped bitmap fill */
put_le16(pb, BITMAP_ID); /* bitmap ID */
/* position of the bitmap */
put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
put_byte(pb, 0); /* no line style */
/* shape drawing */
init_put_bits(&p, buf1, sizeof(buf1));
put_bits(&p, 4, 1); /* one fill bit */
put_bits(&p, 4, 0); /* zero line bit */
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
put_bits(&p, 5, 1); /* nbits */
put_bits(&p, 1, 0); /* X */
put_bits(&p, 1, 0); /* Y */
put_bits(&p, 1, 1); /* set fill style 1 */
/* draw the rectangle ! */
put_swf_line_edge(&p, width, 0);
put_swf_line_edge(&p, 0, height);
put_swf_line_edge(&p, -width, 0);
put_swf_line_edge(&p, 0, -height);
/* end of shape */
put_bits(&p, 1, 0); /* not an edge */
put_bits(&p, 5, 0);
flush_put_bits(&p);
put_buffer(pb, buf1, put_bits_ptr(&p) - p.buf);
put_swf_end_tag(s);
}
if (swf->audio_enc && swf->audio_enc->codec_id == CODEC_ID_MP3) {
int v = 0;
/* start sound */
put_swf_tag(s, TAG_STREAMHEAD2);
switch(swf->audio_enc->sample_rate) {
case 11025: v |= 1 << 2; break;
case 22050: v |= 2 << 2; break;
case 44100: v |= 3 << 2; break;
default:
/* not supported */
av_log(s, AV_LOG_ERROR, "swf does not support that sample rate, choose from (44100, 22050, 11025).\n");
return -1;
}
v |= 0x02; /* 16 bit playback */
if (swf->audio_enc->channels == 2)
v |= 0x01; /* stereo playback */
put_byte(s->pb, v);
v |= 0x20; /* mp3 compressed */
put_byte(s->pb, v);
put_le16(s->pb, swf->samples_per_frame); /* avg samples per frame */
put_le16(s->pb, 0);
put_swf_end_tag(s);
}
put_flush_packet(s->pb);
return 0;
}
static int swf_write_video(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
ByteIOContext *pb = s->pb;
/* Flash Player limit */
if (swf->swf_frame_number == 16000)
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
if (enc->codec_id == CODEC_ID_VP6F ||
enc->codec_id == CODEC_ID_FLV1) {
if (swf->video_frame_number == 0) {
/* create a new video object */
put_swf_tag(s, TAG_VIDEOSTREAM);
put_le16(pb, VIDEO_ID);
swf->vframes_pos = url_ftell(pb);
put_le16(pb, 15000); /* hard flash player limit */
put_le16(pb, enc->width);
put_le16(pb, enc->height);
put_byte(pb, 0);
put_byte(pb,ff_codec_get_tag(swf_codec_tags,enc->codec_id));
put_swf_end_tag(s);
/* place the video object for the first time */
put_swf_tag(s, TAG_PLACEOBJECT2);
put_byte(pb, 0x36);
put_le16(pb, 1);
put_le16(pb, VIDEO_ID);
put_swf_matrix(pb, 1 << FRAC_BITS, 0, 0, 1 << FRAC_BITS, 0, 0);
put_le16(pb, swf->video_frame_number);
put_tag(pb, "video");
put_byte(pb, 0x00);
put_swf_end_tag(s);
} else {
/* mark the character for update */
put_swf_tag(s, TAG_PLACEOBJECT2);
put_byte(pb, 0x11);
put_le16(pb, 1);
put_le16(pb, swf->video_frame_number);
put_swf_end_tag(s);
}
/* set video frame data */
put_swf_tag(s, TAG_VIDEOFRAME | TAG_LONG);
put_le16(pb, VIDEO_ID);
put_le16(pb, swf->video_frame_number++);
put_buffer(pb, buf, size);
put_swf_end_tag(s);
} else if (enc->codec_id == CODEC_ID_MJPEG) {
if (swf->swf_frame_number > 0) {
/* remove the shape */
put_swf_tag(s, TAG_REMOVEOBJECT);
put_le16(pb, SHAPE_ID); /* shape ID */
put_le16(pb, 1); /* depth */
put_swf_end_tag(s);
/* free the bitmap */
put_swf_tag(s, TAG_FREECHARACTER);
put_le16(pb, BITMAP_ID);
put_swf_end_tag(s);
}
put_swf_tag(s, TAG_JPEG2 | TAG_LONG);
put_le16(pb, BITMAP_ID); /* ID of the image */
/* a dummy jpeg header seems to be required */
put_be32(pb, 0xffd8ffd9);
/* write the jpeg image */
put_buffer(pb, buf, size);
put_swf_end_tag(s);
/* draw the shape */
put_swf_tag(s, TAG_PLACEOBJECT);
put_le16(pb, SHAPE_ID); /* shape ID */
put_le16(pb, 1); /* depth */
put_swf_matrix(pb, 20 << FRAC_BITS, 0, 0, 20 << FRAC_BITS, 0, 0);
put_swf_end_tag(s);
}
swf->swf_frame_number++;
/* streaming sound always should be placed just before showframe tags */
if (swf->audio_enc && av_fifo_size(swf->audio_fifo)) {
int frame_size = av_fifo_size(swf->audio_fifo);
put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
put_le16(pb, swf->sound_samples);
put_le16(pb, 0); // seek samples
av_fifo_generic_read(swf->audio_fifo, pb, frame_size, &put_buffer);
put_swf_end_tag(s);
/* update FIFO */
swf->sound_samples = 0;
}
/* output the frame */
put_swf_tag(s, TAG_SHOWFRAME);
put_swf_end_tag(s);
put_flush_packet(s->pb);
return 0;
}
static int swf_write_audio(AVFormatContext *s,
AVCodecContext *enc, uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
/* Flash Player limit */
if (swf->swf_frame_number == 16000)
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
if (av_fifo_size(swf->audio_fifo) + size > AUDIO_FIFO_SIZE) {
av_log(s, AV_LOG_ERROR, "audio fifo too small to mux audio essence\n");
return -1;
}
av_fifo_generic_write(swf->audio_fifo, buf, size, NULL);
swf->sound_samples += enc->frame_size;
/* if audio only stream make sure we add swf frames */
if (!swf->video_enc)
swf_write_video(s, enc, 0, 0);
return 0;
}
static int swf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
return swf_write_audio(s, codec, pkt->data, pkt->size);
else
return swf_write_video(s, codec, pkt->data, pkt->size);
}
static int swf_write_trailer(AVFormatContext *s)
{
SWFContext *swf = s->priv_data;
ByteIOContext *pb = s->pb;
AVCodecContext *enc, *video_enc;
int file_size, i;
video_enc = NULL;
for(i=0;i<s->nb_streams;i++) {
enc = s->streams[i]->codec;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
video_enc = enc;
else
av_fifo_free(swf->audio_fifo);
}
put_swf_tag(s, TAG_END);
put_swf_end_tag(s);
put_flush_packet(s->pb);
/* patch file size and number of frames if not streamed */
if (!url_is_streamed(s->pb) && video_enc) {
file_size = url_ftell(pb);
url_fseek(pb, 4, SEEK_SET);
put_le32(pb, file_size);
url_fseek(pb, swf->duration_pos, SEEK_SET);
put_le16(pb, swf->video_frame_number);
url_fseek(pb, swf->vframes_pos, SEEK_SET);
put_le16(pb, swf->video_frame_number);
url_fseek(pb, file_size, SEEK_SET);
}
return 0;
}
#if CONFIG_SWF_MUXER
AVOutputFormat swf_muxer = {
"swf",
NULL_IF_CONFIG_SMALL("Flash format"),
"application/x-shockwave-flash",
"swf",
sizeof(SWFContext),
CODEC_ID_MP3,
CODEC_ID_FLV1,
swf_write_header,
swf_write_packet,
swf_write_trailer,
};
#endif
#if CONFIG_AVM2_MUXER
AVOutputFormat avm2_muxer = {
"avm2",
NULL_IF_CONFIG_SMALL("Flash 9 (AVM2) format"),
"application/x-shockwave-flash",
NULL,
sizeof(SWFContext),
CODEC_ID_MP3,
CODEC_ID_FLV1,
swf_write_header,
swf_write_packet,
swf_write_trailer,
};
#endif
| 123linslouis-android-video-cutter | jni/libavformat/swfenc.c | C | asf20 | 15,943 |
/*
* Delphine Software International CIN File Demuxer
* Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Delphine Software International CIN file demuxer
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
typedef struct CinFileHeader {
int video_frame_size;
int video_frame_width;
int video_frame_height;
int audio_frequency;
int audio_bits;
int audio_stereo;
int audio_frame_size;
} CinFileHeader;
typedef struct CinFrameHeader {
int audio_frame_type;
int video_frame_type;
int pal_colors_count;
int audio_frame_size;
int video_frame_size;
} CinFrameHeader;
typedef struct CinDemuxContext {
int audio_stream_index;
int video_stream_index;
CinFileHeader file_header;
int64_t audio_stream_pts;
int64_t video_stream_pts;
CinFrameHeader frame_header;
int audio_buffer_size;
} CinDemuxContext;
static int cin_probe(AVProbeData *p)
{
/* header starts with this special marker */
if (AV_RL32(&p->buf[0]) != 0x55AA0000)
return 0;
/* for accuracy, check some header field values */
if (AV_RL32(&p->buf[12]) != 22050 || p->buf[16] != 16 || p->buf[17] != 0)
return 0;
return AVPROBE_SCORE_MAX;
}
static int cin_read_file_header(CinDemuxContext *cin, ByteIOContext *pb) {
CinFileHeader *hdr = &cin->file_header;
if (get_le32(pb) != 0x55AA0000)
return AVERROR_INVALIDDATA;
hdr->video_frame_size = get_le32(pb);
hdr->video_frame_width = get_le16(pb);
hdr->video_frame_height = get_le16(pb);
hdr->audio_frequency = get_le32(pb);
hdr->audio_bits = get_byte(pb);
hdr->audio_stereo = get_byte(pb);
hdr->audio_frame_size = get_le16(pb);
if (hdr->audio_frequency != 22050 || hdr->audio_bits != 16 || hdr->audio_stereo != 0)
return AVERROR_INVALIDDATA;
return 0;
}
static int cin_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
int rc;
CinDemuxContext *cin = s->priv_data;
CinFileHeader *hdr = &cin->file_header;
ByteIOContext *pb = s->pb;
AVStream *st;
rc = cin_read_file_header(cin, pb);
if (rc)
return rc;
cin->video_stream_pts = 0;
cin->audio_stream_pts = 0;
cin->audio_buffer_size = 0;
/* initialize the video decoder stream */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 32, 1, 12);
cin->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_DSICINVIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = hdr->video_frame_width;
st->codec->height = hdr->video_frame_height;
/* initialize the audio decoder stream */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 32, 1, 22050);
cin->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_DSICINAUDIO;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = 1;
st->codec->sample_rate = 22050;
st->codec->bits_per_coded_sample = 16;
st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_coded_sample * st->codec->channels;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
return 0;
}
static int cin_read_frame_header(CinDemuxContext *cin, ByteIOContext *pb) {
CinFrameHeader *hdr = &cin->frame_header;
hdr->video_frame_type = get_byte(pb);
hdr->audio_frame_type = get_byte(pb);
hdr->pal_colors_count = get_le16(pb);
hdr->video_frame_size = get_le32(pb);
hdr->audio_frame_size = get_le32(pb);
if (url_feof(pb) || url_ferror(pb))
return AVERROR(EIO);
if (get_le32(pb) != 0xAA55AA55)
return AVERROR_INVALIDDATA;
return 0;
}
static int cin_read_packet(AVFormatContext *s, AVPacket *pkt)
{
CinDemuxContext *cin = s->priv_data;
ByteIOContext *pb = s->pb;
CinFrameHeader *hdr = &cin->frame_header;
int rc, palette_type, pkt_size;
int ret;
if (cin->audio_buffer_size == 0) {
rc = cin_read_frame_header(cin, pb);
if (rc)
return rc;
if ((int16_t)hdr->pal_colors_count < 0) {
hdr->pal_colors_count = -(int16_t)hdr->pal_colors_count;
palette_type = 1;
} else {
palette_type = 0;
}
/* palette and video packet */
pkt_size = (palette_type + 3) * hdr->pal_colors_count + hdr->video_frame_size;
ret = av_new_packet(pkt, 4 + pkt_size);
if (ret < 0)
return ret;
pkt->stream_index = cin->video_stream_index;
pkt->pts = cin->video_stream_pts++;
pkt->data[0] = palette_type;
pkt->data[1] = hdr->pal_colors_count & 0xFF;
pkt->data[2] = hdr->pal_colors_count >> 8;
pkt->data[3] = hdr->video_frame_type;
ret = get_buffer(pb, &pkt->data[4], pkt_size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (ret < pkt_size)
av_shrink_packet(pkt, 4 + ret);
/* sound buffer will be processed on next read_packet() call */
cin->audio_buffer_size = hdr->audio_frame_size;
return 0;
}
/* audio packet */
ret = av_get_packet(pb, pkt, cin->audio_buffer_size);
if (ret < 0)
return ret;
pkt->stream_index = cin->audio_stream_index;
pkt->pts = cin->audio_stream_pts;
cin->audio_stream_pts += cin->audio_buffer_size * 2 / cin->file_header.audio_frame_size;
cin->audio_buffer_size = 0;
return 0;
}
AVInputFormat dsicin_demuxer = {
"dsicin",
NULL_IF_CONFIG_SMALL("Delphine Software International CIN format"),
sizeof(CinDemuxContext),
cin_probe,
cin_read_header,
cin_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/dsicin.c | C | asf20 | 6,638 |
/*
* RTP H264 Protocol (RFC3984)
* Copyright (c) 2006 Ryan Martell
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTPDEC_H264_H
#define AVFORMAT_RTPDEC_H264_H
#include "rtpdec.h"
extern RTPDynamicProtocolHandler ff_h264_dynamic_handler;
#endif /* AVFORMAT_RTPDEC_H264_H */
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec_h264.h | C | asf20 | 1,012 |
/*
* ID3v1 header parser
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_ID3V1_H
#define AVFORMAT_ID3V1_H
#include "avformat.h"
#define ID3v1_TAG_SIZE 128
#define ID3v1_GENRE_MAX 147
/**
* ID3v1 genres
*/
extern const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1];
/**
* Read an ID3v1 tag
*/
void ff_id3v1_read(AVFormatContext *s);
#endif /* AVFORMAT_ID3V1_H */
| 123linslouis-android-video-cutter | jni/libavformat/id3v1.h | C | asf20 | 1,152 |
/*
* AVC helper functions for muxers
* Copyright (c) 2008 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_AVC_H
#define AVFORMAT_AVC_H
#include <stdint.h>
#include "avio.h"
int ff_avc_parse_nal_units(ByteIOContext *s, const uint8_t *buf, int size);
int ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size);
int ff_isom_write_avcc(ByteIOContext *pb, const uint8_t *data, int len);
const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end);
#endif /* AVFORMAT_AVC_H */
| 123linslouis-android-video-cutter | jni/libavformat/avc.h | C | asf20 | 1,280 |
/*
* Various simple utilities for ffmpeg system
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "internal.h"
/* add one element to a dynamic array */
void ff_dynarray_add(intptr_t **tab_ptr, int *nb_ptr, intptr_t elem)
{
int nb, nb_alloc;
intptr_t *tab;
nb = *nb_ptr;
tab = *tab_ptr;
if ((nb & (nb - 1)) == 0) {
if (nb == 0)
nb_alloc = 1;
else
nb_alloc = nb * 2;
tab = av_realloc(tab, nb_alloc * sizeof(intptr_t));
*tab_ptr = tab;
}
tab[nb++] = elem;
*nb_ptr = nb;
}
time_t mktimegm(struct tm *tm)
{
time_t t;
int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday;
if (m < 3) {
m += 12;
y--;
}
t = 86400 *
(d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 + y / 400 - 719469);
t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
return t;
}
#define ISLEAP(y) (((y) % 4 == 0) && (((y) % 100) != 0 || ((y) % 400) == 0))
#define LEAPS_COUNT(y) ((y)/4 - (y)/100 + (y)/400)
/* This is our own gmtime_r. It differs from its POSIX counterpart in a
couple of places, though. */
struct tm *brktimegm(time_t secs, struct tm *tm)
{
int days, y, ny, m;
int md[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
days = secs / 86400;
secs %= 86400;
tm->tm_hour = secs / 3600;
tm->tm_min = (secs % 3600) / 60;
tm->tm_sec = secs % 60;
/* oh well, may be someone some day will invent a formula for this stuff */
y = 1970; /* start "guessing" */
while (days > 365) {
ny = (y + days/366);
days -= (ny - y) * 365 + LEAPS_COUNT(ny - 1) - LEAPS_COUNT(y - 1);
y = ny;
}
if (days==365 && !ISLEAP(y)) { days=0; y++; }
md[1] = ISLEAP(y)?29:28;
for (m=0; days >= md[m]; m++)
days -= md[m];
tm->tm_year = y; /* unlike gmtime_r we store complete year here */
tm->tm_mon = m+1; /* unlike gmtime_r tm_mon is from 1 to 12 */
tm->tm_mday = days+1;
return tm;
}
/* get a positive number between n_min and n_max, for a maximum length
of len_max. Return -1 if error. */
static int date_get_num(const char **pp,
int n_min, int n_max, int len_max)
{
int i, val, c;
const char *p;
p = *pp;
val = 0;
for(i = 0; i < len_max; i++) {
c = *p;
if (!isdigit(c))
break;
val = (val * 10) + c - '0';
p++;
}
/* no number read ? */
if (p == *pp)
return -1;
if (val < n_min || val > n_max)
return -1;
*pp = p;
return val;
}
/* small strptime for ffmpeg */
const char *small_strptime(const char *p, const char *fmt,
struct tm *dt)
{
int c, val;
for(;;) {
c = *fmt++;
if (c == '\0') {
return p;
} else if (c == '%') {
c = *fmt++;
switch(c) {
case 'H':
val = date_get_num(&p, 0, 23, 2);
if (val == -1)
return NULL;
dt->tm_hour = val;
break;
case 'M':
val = date_get_num(&p, 0, 59, 2);
if (val == -1)
return NULL;
dt->tm_min = val;
break;
case 'S':
val = date_get_num(&p, 0, 59, 2);
if (val == -1)
return NULL;
dt->tm_sec = val;
break;
case 'Y':
val = date_get_num(&p, 0, 9999, 4);
if (val == -1)
return NULL;
dt->tm_year = val - 1900;
break;
case 'm':
val = date_get_num(&p, 1, 12, 2);
if (val == -1)
return NULL;
dt->tm_mon = val - 1;
break;
case 'd':
val = date_get_num(&p, 1, 31, 2);
if (val == -1)
return NULL;
dt->tm_mday = val;
break;
case '%':
goto match;
default:
return NULL;
}
} else {
match:
if (c != *p)
return NULL;
p++;
}
}
return p;
}
| 123linslouis-android-video-cutter | jni/libavformat/cutils.c | C | asf20 | 5,125 |
/*
* NuppelVideo demuxer.
* Copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "riff.h"
typedef struct {
int v_id;
int a_id;
int rtjpg_video;
} NUVContext;
typedef enum {
NUV_VIDEO = 'V',
NUV_EXTRADATA = 'D',
NUV_AUDIO = 'A',
NUV_SEEKP = 'R',
NUV_MYTHEXT = 'X'
} nuv_frametype;
static int nuv_probe(AVProbeData *p) {
if (!memcmp(p->buf, "NuppelVideo", 12))
return AVPROBE_SCORE_MAX;
if (!memcmp(p->buf, "MythTVVideo", 12))
return AVPROBE_SCORE_MAX;
return 0;
}
//! little macro to sanitize packet size
#define PKTSIZE(s) (s & 0xffffff)
/**
* \brief read until we found all data needed for decoding
* \param vst video stream of which to change parameters
* \param ast video stream of which to change parameters
* \param myth set if this is a MythTVVideo format file
* \return 1 if all required codec data was found
*/
static int get_codec_data(ByteIOContext *pb, AVStream *vst,
AVStream *ast, int myth) {
nuv_frametype frametype;
if (!vst && !myth)
return 1; // no codec data needed
while (!url_feof(pb)) {
int size, subtype;
frametype = get_byte(pb);
switch (frametype) {
case NUV_EXTRADATA:
subtype = get_byte(pb);
url_fskip(pb, 6);
size = PKTSIZE(get_le32(pb));
if (vst && subtype == 'R') {
vst->codec->extradata_size = size;
vst->codec->extradata = av_malloc(size);
get_buffer(pb, vst->codec->extradata, size);
size = 0;
if (!myth)
return 1;
}
break;
case NUV_MYTHEXT:
url_fskip(pb, 7);
size = PKTSIZE(get_le32(pb));
if (size != 128 * 4)
break;
get_le32(pb); // version
if (vst) {
vst->codec->codec_tag = get_le32(pb);
vst->codec->codec_id =
ff_codec_get_id(ff_codec_bmp_tags, vst->codec->codec_tag);
if (vst->codec->codec_tag == MKTAG('R', 'J', 'P', 'G'))
vst->codec->codec_id = CODEC_ID_NUV;
} else
url_fskip(pb, 4);
if (ast) {
ast->codec->codec_tag = get_le32(pb);
ast->codec->sample_rate = get_le32(pb);
ast->codec->bits_per_coded_sample = get_le32(pb);
ast->codec->channels = get_le32(pb);
ast->codec->codec_id =
ff_wav_codec_get_id(ast->codec->codec_tag,
ast->codec->bits_per_coded_sample);
ast->need_parsing = AVSTREAM_PARSE_FULL;
} else
url_fskip(pb, 4 * 4);
size -= 6 * 4;
url_fskip(pb, size);
return 1;
case NUV_SEEKP:
size = 11;
break;
default:
url_fskip(pb, 7);
size = PKTSIZE(get_le32(pb));
break;
}
url_fskip(pb, size);
}
return 0;
}
static int nuv_header(AVFormatContext *s, AVFormatParameters *ap) {
NUVContext *ctx = s->priv_data;
ByteIOContext *pb = s->pb;
char id_string[12];
double aspect, fps;
int is_mythtv, width, height, v_packs, a_packs;
int stream_nr = 0;
AVStream *vst = NULL, *ast = NULL;
get_buffer(pb, id_string, 12);
is_mythtv = !memcmp(id_string, "MythTVVideo", 12);
url_fskip(pb, 5); // version string
url_fskip(pb, 3); // padding
width = get_le32(pb);
height = get_le32(pb);
get_le32(pb); // unused, "desiredwidth"
get_le32(pb); // unused, "desiredheight"
get_byte(pb); // 'P' == progressive, 'I' == interlaced
url_fskip(pb, 3); // padding
aspect = av_int2dbl(get_le64(pb));
if (aspect > 0.9999 && aspect < 1.0001)
aspect = 4.0 / 3.0;
fps = av_int2dbl(get_le64(pb));
// number of packets per stream type, -1 means unknown, e.g. streaming
v_packs = get_le32(pb);
a_packs = get_le32(pb);
get_le32(pb); // text
get_le32(pb); // keyframe distance (?)
if (v_packs) {
ctx->v_id = stream_nr++;
vst = av_new_stream(s, ctx->v_id);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_NUV;
vst->codec->width = width;
vst->codec->height = height;
vst->codec->bits_per_coded_sample = 10;
vst->sample_aspect_ratio = av_d2q(aspect * height / width, 10000);
vst->r_frame_rate = av_d2q(fps, 60000);
av_set_pts_info(vst, 32, 1, 1000);
} else
ctx->v_id = -1;
if (a_packs) {
ctx->a_id = stream_nr++;
ast = av_new_stream(s, ctx->a_id);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = CODEC_ID_PCM_S16LE;
ast->codec->channels = 2;
ast->codec->sample_rate = 44100;
ast->codec->bit_rate = 2 * 2 * 44100 * 8;
ast->codec->block_align = 2 * 2;
ast->codec->bits_per_coded_sample = 16;
av_set_pts_info(ast, 32, 1, 1000);
} else
ctx->a_id = -1;
get_codec_data(pb, vst, ast, is_mythtv);
ctx->rtjpg_video = vst && vst->codec->codec_id == CODEC_ID_NUV;
return 0;
}
#define HDRSIZE 12
static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
NUVContext *ctx = s->priv_data;
ByteIOContext *pb = s->pb;
uint8_t hdr[HDRSIZE];
nuv_frametype frametype;
int ret, size;
while (!url_feof(pb)) {
int copyhdrsize = ctx->rtjpg_video ? HDRSIZE : 0;
uint64_t pos = url_ftell(pb);
ret = get_buffer(pb, hdr, HDRSIZE);
if (ret < HDRSIZE)
return ret < 0 ? ret : AVERROR(EIO);
frametype = hdr[0];
size = PKTSIZE(AV_RL32(&hdr[8]));
switch (frametype) {
case NUV_EXTRADATA:
if (!ctx->rtjpg_video) {
url_fskip(pb, size);
break;
}
case NUV_VIDEO:
if (ctx->v_id < 0) {
av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
url_fskip(pb, size);
break;
}
ret = av_new_packet(pkt, copyhdrsize + size);
if (ret < 0)
return ret;
// HACK: we have no idea if it is a keyframe,
// but if we mark none seeking will not work at all.
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pos = pos;
pkt->pts = AV_RL32(&hdr[4]);
pkt->stream_index = ctx->v_id;
memcpy(pkt->data, hdr, copyhdrsize);
ret = get_buffer(pb, pkt->data + copyhdrsize, size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (ret < size)
av_shrink_packet(pkt, copyhdrsize + ret);
return 0;
case NUV_AUDIO:
if (ctx->a_id < 0) {
av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
url_fskip(pb, size);
break;
}
ret = av_get_packet(pb, pkt, size);
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->pos = pos;
pkt->pts = AV_RL32(&hdr[4]);
pkt->stream_index = ctx->a_id;
if (ret < 0) return ret;
return 0;
case NUV_SEEKP:
// contains no data, size value is invalid
break;
default:
url_fskip(pb, size);
break;
}
}
return AVERROR(EIO);
}
AVInputFormat nuv_demuxer = {
"nuv",
NULL_IF_CONFIG_SMALL("NuppelVideo format"),
sizeof(NUVContext),
nuv_probe,
nuv_header,
nuv_packet,
NULL,
NULL,
.flags = AVFMT_GENERIC_INDEX,
};
| 123linslouis-android-video-cutter | jni/libavformat/nuv.c | C | asf20 | 9,146 |
/*
* ISO Media common code
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2002 Francois Revol <revol@free.fr>
* Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "riff.h"
#include "isom.h"
/* http://www.mp4ra.org */
/* ordered by muxing preference */
const AVCodecTag ff_mp4_obj_type[] = {
{ CODEC_ID_MOV_TEXT , 0x08 },
{ CODEC_ID_MPEG4 , 0x20 },
{ CODEC_ID_H264 , 0x21 },
{ CODEC_ID_AAC , 0x40 },
{ CODEC_ID_MP4ALS , 0x40 }, /* 14496-3 ALS */
{ CODEC_ID_MPEG2VIDEO, 0x61 }, /* MPEG2 Main */
{ CODEC_ID_MPEG2VIDEO, 0x60 }, /* MPEG2 Simple */
{ CODEC_ID_MPEG2VIDEO, 0x62 }, /* MPEG2 SNR */
{ CODEC_ID_MPEG2VIDEO, 0x63 }, /* MPEG2 Spatial */
{ CODEC_ID_MPEG2VIDEO, 0x64 }, /* MPEG2 High */
{ CODEC_ID_MPEG2VIDEO, 0x65 }, /* MPEG2 422 */
{ CODEC_ID_AAC , 0x66 }, /* MPEG2 AAC Main */
{ CODEC_ID_AAC , 0x67 }, /* MPEG2 AAC Low */
{ CODEC_ID_AAC , 0x68 }, /* MPEG2 AAC SSR */
{ CODEC_ID_MP3 , 0x69 }, /* 13818-3 */
{ CODEC_ID_MP2 , 0x69 }, /* 11172-3 */
{ CODEC_ID_MPEG1VIDEO, 0x6A }, /* 11172-2 */
{ CODEC_ID_MP3 , 0x6B }, /* 11172-3 */
{ CODEC_ID_MJPEG , 0x6C }, /* 10918-1 */
{ CODEC_ID_PNG , 0x6D },
{ CODEC_ID_JPEG2000 , 0x6E }, /* 15444-1 */
{ CODEC_ID_VC1 , 0xA3 },
{ CODEC_ID_DIRAC , 0xA4 },
{ CODEC_ID_AC3 , 0xA5 },
{ CODEC_ID_VORBIS , 0xDD }, /* non standard, gpac uses it */
{ CODEC_ID_DVD_SUBTITLE, 0xE0 }, /* non standard, see unsupported-embedded-subs-2.mp4 */
{ CODEC_ID_QCELP , 0xE1 },
{ CODEC_ID_NONE , 0 },
};
const AVCodecTag codec_movvideo_tags[] = {
/* { CODEC_ID_, MKTAG('I', 'V', '5', '0') }, *//* Indeo 5.0 */
{ CODEC_ID_RAWVIDEO, MKTAG('r', 'a', 'w', ' ') }, /* Uncompressed RGB */
{ CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', '2') }, /* Uncompressed YUV422 */
{ CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'U', 'I') }, /* YUV with alpha-channel (AVID Uncompressed) */
{ CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, /* UNCOMPRESSED 8BIT 4:2:2 */
{ CODEC_ID_RAWVIDEO, MKTAG('y', 'u', 'v', 's') }, /* same as 2vuy but byte swapped */
{ CODEC_ID_R210, MKTAG('r', '2', '1', '0') }, /* UNCOMPRESSED 10BIT RGB */
{ CODEC_ID_V210, MKTAG('v', '2', '1', '0') }, /* UNCOMPRESSED 10BIT 4:2:2 */
{ CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, /* PhotoJPEG */
{ CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, /* Motion-JPEG (format A) */
{ CODEC_ID_MJPEG, MKTAG('A', 'V', 'D', 'J') }, /* MJPEG with alpha-channel (AVID JFIF meridien compressed) */
/* { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, *//* MJPEG with alpha-channel (AVID ABVB/Truevision NuVista) */
{ CODEC_ID_MJPEG, MKTAG('d', 'm', 'b', '1') }, /* Motion JPEG OpenDML */
{ CODEC_ID_MJPEGB, MKTAG('m', 'j', 'p', 'b') }, /* Motion-JPEG (format B) */
{ CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') }, /* Sorenson Video v1 */
{ CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, /* Sorenson Video v1 */
{ CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', 'i') }, /* Sorenson Video v1 (from QT specs)*/
{ CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') }, /* Sorenson Video v3 */
{ CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
{ CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, /* OpenDiVX *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
{ CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
{ CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, /* experimental: 3IVX files before ivx D4 4.5.1 */
{ CODEC_ID_H263, MKTAG('h', '2', '6', '3') }, /* H263 */
{ CODEC_ID_H263, MKTAG('s', '2', '6', '3') }, /* H263 ?? works */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL produced by FCP */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL produced by FCP */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC produced by FCP */
{ CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', 'v') }, /* AVID DV */
{ CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', '1') }, /* AVID DV100 */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'q') }, /* DVCPRO HD 720p50 */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'p') }, /* DVCPRO HD 720p60 */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '1') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '2') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '4') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '5') }, /* DVCPRO HD 50i produced by FCP */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '6') }, /* DVCPRO HD 60i produced by FCP */
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '3') }, /* DVCPRO HD 30p produced by FCP */
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, /* On2 VP3 */
{ CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, /* Apple Video (RPZA) */
{ CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, /* Cinepak */
{ CODEC_ID_8BPS, MKTAG('8', 'B', 'P', 'S') }, /* Planar RGB (8BPS) */
{ CODEC_ID_SMC, MKTAG('s', 'm', 'c', ' ') }, /* Apple Graphics (SMC) */
{ CODEC_ID_QTRLE, MKTAG('r', 'l', 'e', ' ') }, /* Apple Animation (RLE) */
{ CODEC_ID_MSRLE, MKTAG('W', 'R', 'L', 'E') },
{ CODEC_ID_QDRAW, MKTAG('q', 'd', 'r', 'w') }, /* QuickDraw */
{ CODEC_ID_RAWVIDEO, MKTAG('W', 'R', 'A', 'W') },
{ CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, /* AVC-1/H.264 */
{ CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'e', 'g') }, /* MPEG */
{ CODEC_ID_MPEG1VIDEO, MKTAG('m', '1', 'v', '1') },
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '1') }, /* MPEG2 HDV 720p30 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '2') }, /* MPEG2 HDV 1080i60 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '3') }, /* MPEG2 HDV 1080i50 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '5') }, /* MPEG2 HDV 720p25 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '6') }, /* MPEG2 HDV 1080p24 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '7') }, /* MPEG2 HDV 1080p25 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '8') }, /* MPEG2 HDV 1080p30 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'n') }, /* MPEG2 IMX NTSC 525/60 50mb/s produced by FCP */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'p') }, /* MPEG2 IMX PAL 625/50 50mb/s produced by FCP */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '4', 'n') }, /* MPEG2 IMX NTSC 525/60 40mb/s produced by FCP */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '4', 'p') }, /* MPEG2 IMX PAL 625/50 40mb/s produced by FCP */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'n') }, /* MPEG2 IMX NTSC 525/60 30mb/s produced by FCP */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'p') }, /* MPEG2 IMX PAL 625/50 30mb/s produced by FCP */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', '9') }, /* XDCAM HD422 720p60 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'a') }, /* XDCAM HD422 720p50 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'b') }, /* XDCAM HD422 1080i60 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'c') }, /* XDCAM HD422 1080i50 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'd') }, /* XDCAM HD422 1080p24 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'e') }, /* XDCAM HD422 1080p25 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', '5', 'f') }, /* XDCAM HD422 1080p30 CBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '1') }, /* XDCAM EX 720p30 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '2') }, /* XDCAM HD 1080i60 */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '3') }, /* XDCAM HD 1080i50 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '4') }, /* XDCAM EX 720p24 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '5') }, /* XDCAM EX 720p25 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '6') }, /* XDCAM HD 1080p24 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '7') }, /* XDCAM HD 1080p25 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '8') }, /* XDCAM HD 1080p30 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', '9') }, /* XDCAM EX 720p60 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'a') }, /* XDCAM EX 720p50 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'b') }, /* XDCAM EX 1080i60 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'c') }, /* XDCAM EX 1080i50 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'd') }, /* XDCAM EX 1080p24 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'e') }, /* XDCAM EX 1080p25 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('x', 'd', 'v', 'f') }, /* XDCAM EX 1080p30 VBR */
{ CODEC_ID_MPEG2VIDEO, MKTAG('A', 'V', 'm', 'p') }, /* AVID IMX PAL */
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', '2', 'v', '1') },
{ CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') }, /* JPEG 2000 produced by FCP */
{ CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, /* Truevision Targa */
{ CODEC_ID_TIFF, MKTAG('t', 'i', 'f', 'f') }, /* TIFF embedded in MOV */
{ CODEC_ID_GIF, MKTAG('g', 'i', 'f', ' ') }, /* embedded gif files as frames (usually one "click to play movie" frame) */
{ CODEC_ID_PNG, MKTAG('p', 'n', 'g', ' ') },
{ CODEC_ID_VC1, MKTAG('v', 'c', '-', '1') }, /* SMPTE RP 2025 */
{ CODEC_ID_CAVS, MKTAG('a', 'v', 's', '2') },
{ CODEC_ID_DIRAC, MKTAG('d', 'r', 'a', 'c') },
{ CODEC_ID_DNXHD, MKTAG('A', 'V', 'd', 'n') }, /* AVID DNxHD */
{ CODEC_ID_RAWVIDEO, MKTAG('A', 'V', '1', 'x') }, /* AVID 1:1x */
{ CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'u', 'p') },
{ CODEC_ID_SGI, MKTAG('s', 'g', 'i', ' ') }, /* SGI */
{ CODEC_ID_DPX, MKTAG('d', 'p', 'x', ' ') }, /* DPX */
{ CODEC_ID_NONE, 0 },
};
const AVCodecTag codec_movaudio_tags[] = {
{ CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
{ CODEC_ID_PCM_S32LE, MKTAG('i', 'n', '3', '2') },
{ CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
{ CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
{ CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') }, /* 16 bits */
{ CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') }, /* */
{ CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
{ CODEC_ID_PCM_F32BE, MKTAG('f', 'l', '3', '2') },
{ CODEC_ID_PCM_F32LE, MKTAG('f', 'l', '3', '2') },
{ CODEC_ID_PCM_F64BE, MKTAG('f', 'l', '6', '4') },
{ CODEC_ID_PCM_F64LE, MKTAG('f', 'l', '6', '4') },
{ CODEC_ID_PCM_S8, MKTAG('s', 'o', 'w', 't') },
{ CODEC_ID_PCM_U8, MKTAG('r', 'a', 'w', ' ') }, /* 8 bits unsigned */
{ CODEC_ID_PCM_U8, MKTAG('N', 'O', 'N', 'E') }, /* uncompressed */
{ CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') }, /* */
{ CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') }, /* */
{ CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') }, /* IMA-4 ADPCM */
{ CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') }, /* Macintosh Audio Compression and Expansion 3:1 */
{ CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') }, /* Macintosh Audio Compression and Expansion 6:1 */
{ CODEC_ID_MP1, MKTAG('.', 'm', 'p', '1') }, /* MPEG layer 1 */
{ CODEC_ID_MP2, MKTAG('.', 'm', 'p', '2') }, /* MPEG layer 2 */
{ CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') }, /* MPEG layer 3 */ /* sample files at http://www.3ivx.com/showcase.html use this tag */
{ CODEC_ID_MP3, 0x6D730055 }, /* MPEG layer 3 */
/* { CODEC_ID_OGG_VORBIS, MKTAG('O', 'g', 'g', 'S') }, *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
{ CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, /* MPEG-4 AAC */
{ CODEC_ID_AC3, MKTAG('a', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F */
{ CODEC_ID_AC3, MKTAG('s', 'a', 'c', '3') }, /* Nero Recode */
{ CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
{ CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
{ CODEC_ID_GSM, MKTAG('a', 'g', 's', 'm') },
{ CODEC_ID_ALAC, MKTAG('a', 'l', 'a', 'c') }, /* Apple Lossless */
{ CODEC_ID_QCELP, MKTAG('Q','c','l','p') },
{ CODEC_ID_QCELP, MKTAG('Q','c','l','q') },
{ CODEC_ID_QCELP, MKTAG('s','q','c','p') }, /* ISO Media fourcc */
{ CODEC_ID_QDM2, MKTAG('Q', 'D', 'M', '2') }, /* QDM2 */
{ CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
{ CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
{ CODEC_ID_WMAV2, MKTAG('W', 'M', 'A', '2') },
{ CODEC_ID_NONE, 0 },
};
const AVCodecTag ff_codec_movsubtitle_tags[] = {
{ CODEC_ID_MOV_TEXT, MKTAG('t', 'e', 'x', 't') },
{ CODEC_ID_MOV_TEXT, MKTAG('t', 'x', '3', 'g') },
{ CODEC_ID_NONE, 0 },
};
/* map numeric codes from mdhd atom to ISO 639 */
/* cf. QTFileFormat.pdf p253, qtff.pdf p205 */
/* http://developer.apple.com/documentation/mac/Text/Text-368.html */
/* deprecated by putting the code as 3*5bit ascii */
static const char mov_mdhd_language_map[][4] = {
/* 0-9 */
"eng", "fra", "ger", "ita", "dut", "sve", "spa", "dan", "por", "nor",
"heb", "jpn", "ara", "fin", "gre", "ice", "mlt", "tur", "hr "/*scr*/, "chi"/*ace?*/,
"urd", "hin", "tha", "kor", "lit", "pol", "hun", "est", "lav", "",
"fo ", "", "rus", "chi", "", "iri", "alb", "ron", "ces", "slk",
"slv", "yid", "sr ", "mac", "bul", "ukr", "bel", "uzb", "kaz", "aze",
/*?*/
"aze", "arm", "geo", "mol", "kir", "tgk", "tuk", "mon", "", "pus",
"kur", "kas", "snd", "tib", "nep", "san", "mar", "ben", "asm", "guj",
"pa ", "ori", "mal", "kan", "tam", "tel", "", "bur", "khm", "lao",
/* roman? arabic? */
"vie", "ind", "tgl", "may", "may", "amh", "tir", "orm", "som", "swa",
/*==rundi?*/
"", "run", "", "mlg", "epo", "", "", "", "", "",
/* 100 */
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "wel", "baq",
"cat", "lat", "que", "grn", "aym", "tat", "uig", "dzo", "jav"
};
int ff_mov_iso639_to_lang(const char lang[4], int mp4)
{
int i, code = 0;
/* old way, only for QT? */
for (i = 0; lang[0] && !mp4 && i < FF_ARRAY_ELEMS(mov_mdhd_language_map); i++) {
if (!strcmp(lang, mov_mdhd_language_map[i]))
return i;
}
/* XXX:can we do that in mov too? */
if (!mp4)
return -1;
/* handle undefined as such */
if (lang[0] == '\0')
lang = "und";
/* 5bit ascii */
for (i = 0; i < 3; i++) {
uint8_t c = lang[i];
c -= 0x60;
if (c > 0x1f)
return -1;
code <<= 5;
code |= c;
}
return code;
}
int ff_mov_lang_to_iso639(unsigned code, char to[4])
{
int i;
memset(to, 0, 4);
/* is it the mangled iso code? */
/* see http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt */
if (code > 138) {
for (i = 2; i >= 0; i--) {
to[i] = 0x60 + (code & 0x1f);
code >>= 5;
}
return 1;
}
/* old fashion apple lang code */
if (code >= FF_ARRAY_ELEMS(mov_mdhd_language_map))
return 0;
if (!mov_mdhd_language_map[code][0])
return 0;
memcpy(to, mov_mdhd_language_map[code], 4);
return 1;
}
| 123linslouis-android-video-cutter | jni/libavformat/isom.c | C | asf20 | 16,003 |
/*
* RTP H.263 Depacketizer, RFC 4629
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTPDEC_H263_H
#define AVFORMAT_RTPDEC_H263_H
#include "rtpdec.h"
extern RTPDynamicProtocolHandler ff_h263_1998_dynamic_handler;
extern RTPDynamicProtocolHandler ff_h263_2000_dynamic_handler;
#endif /* AVFORMAT_RTPDEC_H263_H */
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec_h263.h | C | asf20 | 1,087 |
/*
* Matroska file demuxer
* Copyright (c) 2003-2008 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Matroska file demuxer
* by Ronald Bultje <rbultje@ronald.bitfreak.net>
* with a little help from Moritz Bunkus <moritz@bunkus.org>
* totally reworked by Aurelien Jacobs <aurel@gnuage.org>
* Specs available on the Matroska project page: http://www.matroska.org/.
*/
#include <stdio.h>
#include "avformat.h"
#include "internal.h"
/* For ff_codec_get_id(). */
#include "riff.h"
#include "isom.h"
#include "rm.h"
#include "matroska.h"
#include "libavcodec/mpeg4audio.h"
#include "libavutil/intfloat_readwrite.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/lzo.h"
#if CONFIG_ZLIB
#include <zlib.h>
#endif
#if CONFIG_BZLIB
#include <bzlib.h>
#endif
typedef enum {
EBML_NONE,
EBML_UINT,
EBML_FLOAT,
EBML_STR,
EBML_UTF8,
EBML_BIN,
EBML_NEST,
EBML_PASS,
EBML_STOP,
} EbmlType;
typedef const struct EbmlSyntax {
uint32_t id;
EbmlType type;
int list_elem_size;
int data_offset;
union {
uint64_t u;
double f;
const char *s;
const struct EbmlSyntax *n;
} def;
} EbmlSyntax;
typedef struct {
int nb_elem;
void *elem;
} EbmlList;
typedef struct {
int size;
uint8_t *data;
int64_t pos;
} EbmlBin;
typedef struct {
uint64_t version;
uint64_t max_size;
uint64_t id_length;
char *doctype;
uint64_t doctype_version;
} Ebml;
typedef struct {
uint64_t algo;
EbmlBin settings;
} MatroskaTrackCompression;
typedef struct {
uint64_t scope;
uint64_t type;
MatroskaTrackCompression compression;
} MatroskaTrackEncoding;
typedef struct {
double frame_rate;
uint64_t display_width;
uint64_t display_height;
uint64_t pixel_width;
uint64_t pixel_height;
uint64_t fourcc;
} MatroskaTrackVideo;
typedef struct {
double samplerate;
double out_samplerate;
uint64_t bitdepth;
uint64_t channels;
/* real audio header (extracted from extradata) */
int coded_framesize;
int sub_packet_h;
int frame_size;
int sub_packet_size;
int sub_packet_cnt;
int pkt_cnt;
uint8_t *buf;
} MatroskaTrackAudio;
typedef struct {
uint64_t num;
uint64_t uid;
uint64_t type;
char *name;
char *codec_id;
EbmlBin codec_priv;
char *language;
double time_scale;
uint64_t default_duration;
uint64_t flag_default;
MatroskaTrackVideo video;
MatroskaTrackAudio audio;
EbmlList encodings;
AVStream *stream;
int64_t end_timecode;
int ms_compat;
} MatroskaTrack;
typedef struct {
uint64_t uid;
char *filename;
char *mime;
EbmlBin bin;
AVStream *stream;
} MatroskaAttachement;
typedef struct {
uint64_t start;
uint64_t end;
uint64_t uid;
char *title;
AVChapter *chapter;
} MatroskaChapter;
typedef struct {
uint64_t track;
uint64_t pos;
} MatroskaIndexPos;
typedef struct {
uint64_t time;
EbmlList pos;
} MatroskaIndex;
typedef struct {
char *name;
char *string;
char *lang;
uint64_t def;
EbmlList sub;
} MatroskaTag;
typedef struct {
char *type;
uint64_t typevalue;
uint64_t trackuid;
uint64_t chapteruid;
uint64_t attachuid;
} MatroskaTagTarget;
typedef struct {
MatroskaTagTarget target;
EbmlList tag;
} MatroskaTags;
typedef struct {
uint64_t id;
uint64_t pos;
} MatroskaSeekhead;
typedef struct {
uint64_t start;
uint64_t length;
} MatroskaLevel;
typedef struct {
AVFormatContext *ctx;
/* EBML stuff */
int num_levels;
MatroskaLevel levels[EBML_MAX_DEPTH];
int level_up;
uint64_t time_scale;
double duration;
char *title;
EbmlList tracks;
EbmlList attachments;
EbmlList chapters;
EbmlList index;
EbmlList tags;
EbmlList seekhead;
/* byte position of the segment inside the stream */
int64_t segment_start;
/* the packet queue */
AVPacket **packets;
int num_packets;
AVPacket *prev_pkt;
int done;
int has_cluster_id;
/* What to skip before effectively reading a packet. */
int skip_to_keyframe;
uint64_t skip_to_timecode;
} MatroskaDemuxContext;
typedef struct {
uint64_t duration;
int64_t reference;
uint64_t non_simple;
EbmlBin bin;
} MatroskaBlock;
typedef struct {
uint64_t timecode;
EbmlList blocks;
} MatroskaCluster;
static EbmlSyntax ebml_header[] = {
{ EBML_ID_EBMLREADVERSION, EBML_UINT, 0, offsetof(Ebml,version), {.u=EBML_VERSION} },
{ EBML_ID_EBMLMAXSIZELENGTH, EBML_UINT, 0, offsetof(Ebml,max_size), {.u=8} },
{ EBML_ID_EBMLMAXIDLENGTH, EBML_UINT, 0, offsetof(Ebml,id_length), {.u=4} },
{ EBML_ID_DOCTYPE, EBML_STR, 0, offsetof(Ebml,doctype), {.s="(none)"} },
{ EBML_ID_DOCTYPEREADVERSION, EBML_UINT, 0, offsetof(Ebml,doctype_version), {.u=1} },
{ EBML_ID_EBMLVERSION, EBML_NONE },
{ EBML_ID_DOCTYPEVERSION, EBML_NONE },
{ 0 }
};
static EbmlSyntax ebml_syntax[] = {
{ EBML_ID_HEADER, EBML_NEST, 0, 0, {.n=ebml_header} },
{ 0 }
};
static EbmlSyntax matroska_info[] = {
{ MATROSKA_ID_TIMECODESCALE, EBML_UINT, 0, offsetof(MatroskaDemuxContext,time_scale), {.u=1000000} },
{ MATROSKA_ID_DURATION, EBML_FLOAT, 0, offsetof(MatroskaDemuxContext,duration) },
{ MATROSKA_ID_TITLE, EBML_UTF8, 0, offsetof(MatroskaDemuxContext,title) },
{ MATROSKA_ID_WRITINGAPP, EBML_NONE },
{ MATROSKA_ID_MUXINGAPP, EBML_NONE },
{ MATROSKA_ID_DATEUTC, EBML_NONE },
{ MATROSKA_ID_SEGMENTUID, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_track_video[] = {
{ MATROSKA_ID_VIDEOFRAMERATE, EBML_FLOAT,0, offsetof(MatroskaTrackVideo,frame_rate) },
{ MATROSKA_ID_VIDEODISPLAYWIDTH, EBML_UINT, 0, offsetof(MatroskaTrackVideo,display_width) },
{ MATROSKA_ID_VIDEODISPLAYHEIGHT, EBML_UINT, 0, offsetof(MatroskaTrackVideo,display_height) },
{ MATROSKA_ID_VIDEOPIXELWIDTH, EBML_UINT, 0, offsetof(MatroskaTrackVideo,pixel_width) },
{ MATROSKA_ID_VIDEOPIXELHEIGHT, EBML_UINT, 0, offsetof(MatroskaTrackVideo,pixel_height) },
{ MATROSKA_ID_VIDEOCOLORSPACE, EBML_UINT, 0, offsetof(MatroskaTrackVideo,fourcc) },
{ MATROSKA_ID_VIDEOPIXELCROPB, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPT, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPL, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPR, EBML_NONE },
{ MATROSKA_ID_VIDEODISPLAYUNIT, EBML_NONE },
{ MATROSKA_ID_VIDEOFLAGINTERLACED,EBML_NONE },
{ MATROSKA_ID_VIDEOSTEREOMODE, EBML_NONE },
{ MATROSKA_ID_VIDEOASPECTRATIO, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_track_audio[] = {
{ MATROSKA_ID_AUDIOSAMPLINGFREQ, EBML_FLOAT,0, offsetof(MatroskaTrackAudio,samplerate), {.f=8000.0} },
{ MATROSKA_ID_AUDIOOUTSAMPLINGFREQ,EBML_FLOAT,0,offsetof(MatroskaTrackAudio,out_samplerate) },
{ MATROSKA_ID_AUDIOBITDEPTH, EBML_UINT, 0, offsetof(MatroskaTrackAudio,bitdepth) },
{ MATROSKA_ID_AUDIOCHANNELS, EBML_UINT, 0, offsetof(MatroskaTrackAudio,channels), {.u=1} },
{ 0 }
};
static EbmlSyntax matroska_track_encoding_compression[] = {
{ MATROSKA_ID_ENCODINGCOMPALGO, EBML_UINT, 0, offsetof(MatroskaTrackCompression,algo), {.u=0} },
{ MATROSKA_ID_ENCODINGCOMPSETTINGS,EBML_BIN, 0, offsetof(MatroskaTrackCompression,settings) },
{ 0 }
};
static EbmlSyntax matroska_track_encoding[] = {
{ MATROSKA_ID_ENCODINGSCOPE, EBML_UINT, 0, offsetof(MatroskaTrackEncoding,scope), {.u=1} },
{ MATROSKA_ID_ENCODINGTYPE, EBML_UINT, 0, offsetof(MatroskaTrackEncoding,type), {.u=0} },
{ MATROSKA_ID_ENCODINGCOMPRESSION,EBML_NEST, 0, offsetof(MatroskaTrackEncoding,compression), {.n=matroska_track_encoding_compression} },
{ MATROSKA_ID_ENCODINGORDER, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_track_encodings[] = {
{ MATROSKA_ID_TRACKCONTENTENCODING, EBML_NEST, sizeof(MatroskaTrackEncoding), offsetof(MatroskaTrack,encodings), {.n=matroska_track_encoding} },
{ 0 }
};
static EbmlSyntax matroska_track[] = {
{ MATROSKA_ID_TRACKNUMBER, EBML_UINT, 0, offsetof(MatroskaTrack,num) },
{ MATROSKA_ID_TRACKNAME, EBML_UTF8, 0, offsetof(MatroskaTrack,name) },
{ MATROSKA_ID_TRACKUID, EBML_UINT, 0, offsetof(MatroskaTrack,uid) },
{ MATROSKA_ID_TRACKTYPE, EBML_UINT, 0, offsetof(MatroskaTrack,type) },
{ MATROSKA_ID_CODECID, EBML_STR, 0, offsetof(MatroskaTrack,codec_id) },
{ MATROSKA_ID_CODECPRIVATE, EBML_BIN, 0, offsetof(MatroskaTrack,codec_priv) },
{ MATROSKA_ID_TRACKLANGUAGE, EBML_UTF8, 0, offsetof(MatroskaTrack,language), {.s="eng"} },
{ MATROSKA_ID_TRACKDEFAULTDURATION, EBML_UINT, 0, offsetof(MatroskaTrack,default_duration) },
{ MATROSKA_ID_TRACKTIMECODESCALE, EBML_FLOAT,0, offsetof(MatroskaTrack,time_scale), {.f=1.0} },
{ MATROSKA_ID_TRACKFLAGDEFAULT, EBML_UINT, 0, offsetof(MatroskaTrack,flag_default), {.u=1} },
{ MATROSKA_ID_TRACKVIDEO, EBML_NEST, 0, offsetof(MatroskaTrack,video), {.n=matroska_track_video} },
{ MATROSKA_ID_TRACKAUDIO, EBML_NEST, 0, offsetof(MatroskaTrack,audio), {.n=matroska_track_audio} },
{ MATROSKA_ID_TRACKCONTENTENCODINGS,EBML_NEST, 0, 0, {.n=matroska_track_encodings} },
{ MATROSKA_ID_TRACKFLAGENABLED, EBML_NONE },
{ MATROSKA_ID_TRACKFLAGFORCED, EBML_NONE },
{ MATROSKA_ID_TRACKFLAGLACING, EBML_NONE },
{ MATROSKA_ID_CODECNAME, EBML_NONE },
{ MATROSKA_ID_CODECDECODEALL, EBML_NONE },
{ MATROSKA_ID_CODECINFOURL, EBML_NONE },
{ MATROSKA_ID_CODECDOWNLOADURL, EBML_NONE },
{ MATROSKA_ID_TRACKMINCACHE, EBML_NONE },
{ MATROSKA_ID_TRACKMAXCACHE, EBML_NONE },
{ MATROSKA_ID_TRACKMAXBLKADDID, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_tracks[] = {
{ MATROSKA_ID_TRACKENTRY, EBML_NEST, sizeof(MatroskaTrack), offsetof(MatroskaDemuxContext,tracks), {.n=matroska_track} },
{ 0 }
};
static EbmlSyntax matroska_attachment[] = {
{ MATROSKA_ID_FILEUID, EBML_UINT, 0, offsetof(MatroskaAttachement,uid) },
{ MATROSKA_ID_FILENAME, EBML_UTF8, 0, offsetof(MatroskaAttachement,filename) },
{ MATROSKA_ID_FILEMIMETYPE, EBML_STR, 0, offsetof(MatroskaAttachement,mime) },
{ MATROSKA_ID_FILEDATA, EBML_BIN, 0, offsetof(MatroskaAttachement,bin) },
{ MATROSKA_ID_FILEDESC, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_attachments[] = {
{ MATROSKA_ID_ATTACHEDFILE, EBML_NEST, sizeof(MatroskaAttachement), offsetof(MatroskaDemuxContext,attachments), {.n=matroska_attachment} },
{ 0 }
};
static EbmlSyntax matroska_chapter_display[] = {
{ MATROSKA_ID_CHAPSTRING, EBML_UTF8, 0, offsetof(MatroskaChapter,title) },
{ MATROSKA_ID_CHAPLANG, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_chapter_entry[] = {
{ MATROSKA_ID_CHAPTERTIMESTART, EBML_UINT, 0, offsetof(MatroskaChapter,start), {.u=AV_NOPTS_VALUE} },
{ MATROSKA_ID_CHAPTERTIMEEND, EBML_UINT, 0, offsetof(MatroskaChapter,end), {.u=AV_NOPTS_VALUE} },
{ MATROSKA_ID_CHAPTERUID, EBML_UINT, 0, offsetof(MatroskaChapter,uid) },
{ MATROSKA_ID_CHAPTERDISPLAY, EBML_NEST, 0, 0, {.n=matroska_chapter_display} },
{ MATROSKA_ID_CHAPTERFLAGHIDDEN, EBML_NONE },
{ MATROSKA_ID_CHAPTERFLAGENABLED, EBML_NONE },
{ MATROSKA_ID_CHAPTERPHYSEQUIV, EBML_NONE },
{ MATROSKA_ID_CHAPTERATOM, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_chapter[] = {
{ MATROSKA_ID_CHAPTERATOM, EBML_NEST, sizeof(MatroskaChapter), offsetof(MatroskaDemuxContext,chapters), {.n=matroska_chapter_entry} },
{ MATROSKA_ID_EDITIONUID, EBML_NONE },
{ MATROSKA_ID_EDITIONFLAGHIDDEN, EBML_NONE },
{ MATROSKA_ID_EDITIONFLAGDEFAULT, EBML_NONE },
{ MATROSKA_ID_EDITIONFLAGORDERED, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_chapters[] = {
{ MATROSKA_ID_EDITIONENTRY, EBML_NEST, 0, 0, {.n=matroska_chapter} },
{ 0 }
};
static EbmlSyntax matroska_index_pos[] = {
{ MATROSKA_ID_CUETRACK, EBML_UINT, 0, offsetof(MatroskaIndexPos,track) },
{ MATROSKA_ID_CUECLUSTERPOSITION, EBML_UINT, 0, offsetof(MatroskaIndexPos,pos) },
{ MATROSKA_ID_CUEBLOCKNUMBER, EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_index_entry[] = {
{ MATROSKA_ID_CUETIME, EBML_UINT, 0, offsetof(MatroskaIndex,time) },
{ MATROSKA_ID_CUETRACKPOSITION, EBML_NEST, sizeof(MatroskaIndexPos), offsetof(MatroskaIndex,pos), {.n=matroska_index_pos} },
{ 0 }
};
static EbmlSyntax matroska_index[] = {
{ MATROSKA_ID_POINTENTRY, EBML_NEST, sizeof(MatroskaIndex), offsetof(MatroskaDemuxContext,index), {.n=matroska_index_entry} },
{ 0 }
};
static EbmlSyntax matroska_simpletag[] = {
{ MATROSKA_ID_TAGNAME, EBML_UTF8, 0, offsetof(MatroskaTag,name) },
{ MATROSKA_ID_TAGSTRING, EBML_UTF8, 0, offsetof(MatroskaTag,string) },
{ MATROSKA_ID_TAGLANG, EBML_STR, 0, offsetof(MatroskaTag,lang), {.s="und"} },
{ MATROSKA_ID_TAGDEFAULT, EBML_UINT, 0, offsetof(MatroskaTag,def) },
{ MATROSKA_ID_SIMPLETAG, EBML_NEST, sizeof(MatroskaTag), offsetof(MatroskaTag,sub), {.n=matroska_simpletag} },
{ 0 }
};
static EbmlSyntax matroska_tagtargets[] = {
{ MATROSKA_ID_TAGTARGETS_TYPE, EBML_STR, 0, offsetof(MatroskaTagTarget,type) },
{ MATROSKA_ID_TAGTARGETS_TYPEVALUE, EBML_UINT, 0, offsetof(MatroskaTagTarget,typevalue), {.u=50} },
{ MATROSKA_ID_TAGTARGETS_TRACKUID, EBML_UINT, 0, offsetof(MatroskaTagTarget,trackuid) },
{ MATROSKA_ID_TAGTARGETS_CHAPTERUID,EBML_UINT, 0, offsetof(MatroskaTagTarget,chapteruid) },
{ MATROSKA_ID_TAGTARGETS_ATTACHUID, EBML_UINT, 0, offsetof(MatroskaTagTarget,attachuid) },
{ 0 }
};
static EbmlSyntax matroska_tag[] = {
{ MATROSKA_ID_SIMPLETAG, EBML_NEST, sizeof(MatroskaTag), offsetof(MatroskaTags,tag), {.n=matroska_simpletag} },
{ MATROSKA_ID_TAGTARGETS, EBML_NEST, 0, offsetof(MatroskaTags,target), {.n=matroska_tagtargets} },
{ 0 }
};
static EbmlSyntax matroska_tags[] = {
{ MATROSKA_ID_TAG, EBML_NEST, sizeof(MatroskaTags), offsetof(MatroskaDemuxContext,tags), {.n=matroska_tag} },
{ 0 }
};
static EbmlSyntax matroska_seekhead_entry[] = {
{ MATROSKA_ID_SEEKID, EBML_UINT, 0, offsetof(MatroskaSeekhead,id) },
{ MATROSKA_ID_SEEKPOSITION, EBML_UINT, 0, offsetof(MatroskaSeekhead,pos), {.u=-1} },
{ 0 }
};
static EbmlSyntax matroska_seekhead[] = {
{ MATROSKA_ID_SEEKENTRY, EBML_NEST, sizeof(MatroskaSeekhead), offsetof(MatroskaDemuxContext,seekhead), {.n=matroska_seekhead_entry} },
{ 0 }
};
static EbmlSyntax matroska_segment[] = {
{ MATROSKA_ID_INFO, EBML_NEST, 0, 0, {.n=matroska_info } },
{ MATROSKA_ID_TRACKS, EBML_NEST, 0, 0, {.n=matroska_tracks } },
{ MATROSKA_ID_ATTACHMENTS, EBML_NEST, 0, 0, {.n=matroska_attachments} },
{ MATROSKA_ID_CHAPTERS, EBML_NEST, 0, 0, {.n=matroska_chapters } },
{ MATROSKA_ID_CUES, EBML_NEST, 0, 0, {.n=matroska_index } },
{ MATROSKA_ID_TAGS, EBML_NEST, 0, 0, {.n=matroska_tags } },
{ MATROSKA_ID_SEEKHEAD, EBML_NEST, 0, 0, {.n=matroska_seekhead } },
{ MATROSKA_ID_CLUSTER, EBML_STOP, 0, offsetof(MatroskaDemuxContext,has_cluster_id) },
{ 0 }
};
static EbmlSyntax matroska_segments[] = {
{ MATROSKA_ID_SEGMENT, EBML_NEST, 0, 0, {.n=matroska_segment } },
{ 0 }
};
static EbmlSyntax matroska_blockgroup[] = {
{ MATROSKA_ID_BLOCK, EBML_BIN, 0, offsetof(MatroskaBlock,bin) },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_BIN, 0, offsetof(MatroskaBlock,bin) },
{ MATROSKA_ID_BLOCKDURATION, EBML_UINT, 0, offsetof(MatroskaBlock,duration), {.u=AV_NOPTS_VALUE} },
{ MATROSKA_ID_BLOCKREFERENCE, EBML_UINT, 0, offsetof(MatroskaBlock,reference) },
{ 1, EBML_UINT, 0, offsetof(MatroskaBlock,non_simple), {.u=1} },
{ 0 }
};
static EbmlSyntax matroska_cluster[] = {
{ MATROSKA_ID_CLUSTERTIMECODE,EBML_UINT,0, offsetof(MatroskaCluster,timecode) },
{ MATROSKA_ID_BLOCKGROUP, EBML_NEST, sizeof(MatroskaBlock), offsetof(MatroskaCluster,blocks), {.n=matroska_blockgroup} },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_PASS, sizeof(MatroskaBlock), offsetof(MatroskaCluster,blocks), {.n=matroska_blockgroup} },
{ MATROSKA_ID_CLUSTERPOSITION,EBML_NONE },
{ MATROSKA_ID_CLUSTERPREVSIZE,EBML_NONE },
{ 0 }
};
static EbmlSyntax matroska_clusters[] = {
{ MATROSKA_ID_CLUSTER, EBML_NEST, 0, 0, {.n=matroska_cluster} },
{ MATROSKA_ID_INFO, EBML_NONE },
{ MATROSKA_ID_CUES, EBML_NONE },
{ MATROSKA_ID_TAGS, EBML_NONE },
{ MATROSKA_ID_SEEKHEAD, EBML_NONE },
{ 0 }
};
static const char *matroska_doctypes[] = { "matroska", "webm" };
/*
* Return: Whether we reached the end of a level in the hierarchy or not.
*/
static int ebml_level_end(MatroskaDemuxContext *matroska)
{
ByteIOContext *pb = matroska->ctx->pb;
int64_t pos = url_ftell(pb);
if (matroska->num_levels > 0) {
MatroskaLevel *level = &matroska->levels[matroska->num_levels - 1];
if (pos - level->start >= level->length) {
matroska->num_levels--;
return 1;
}
}
return 0;
}
/*
* Read: an "EBML number", which is defined as a variable-length
* array of bytes. The first byte indicates the length by giving a
* number of 0-bits followed by a one. The position of the first
* "one" bit inside the first byte indicates the length of this
* number.
* Returns: number of bytes read, < 0 on error
*/
static int ebml_read_num(MatroskaDemuxContext *matroska, ByteIOContext *pb,
int max_size, uint64_t *number)
{
int len_mask = 0x80, read = 1, n = 1;
int64_t total = 0;
/* The first byte tells us the length in bytes - get_byte() can normally
* return 0, but since that's not a valid first ebmlID byte, we can
* use it safely here to catch EOS. */
if (!(total = get_byte(pb))) {
/* we might encounter EOS here */
if (!url_feof(pb)) {
int64_t pos = url_ftell(pb);
av_log(matroska->ctx, AV_LOG_ERROR,
"Read error at pos. %"PRIu64" (0x%"PRIx64")\n",
pos, pos);
}
return AVERROR(EIO); /* EOS or actual I/O error */
}
/* get the length of the EBML number */
while (read <= max_size && !(total & len_mask)) {
read++;
len_mask >>= 1;
}
if (read > max_size) {
int64_t pos = url_ftell(pb) - 1;
av_log(matroska->ctx, AV_LOG_ERROR,
"Invalid EBML number size tag 0x%02x at pos %"PRIu64" (0x%"PRIx64")\n",
(uint8_t) total, pos, pos);
return AVERROR_INVALIDDATA;
}
/* read out length */
total &= ~len_mask;
while (n++ < read)
total = (total << 8) | get_byte(pb);
*number = total;
return read;
}
/*
* Read the next element as an unsigned int.
* 0 is success, < 0 is failure.
*/
static int ebml_read_uint(ByteIOContext *pb, int size, uint64_t *num)
{
int n = 0;
if (size < 1 || size > 8)
return AVERROR_INVALIDDATA;
/* big-endian ordering; build up number */
*num = 0;
while (n++ < size)
*num = (*num << 8) | get_byte(pb);
return 0;
}
/*
* Read the next element as a float.
* 0 is success, < 0 is failure.
*/
static int ebml_read_float(ByteIOContext *pb, int size, double *num)
{
if (size == 4) {
*num= av_int2flt(get_be32(pb));
} else if(size==8){
*num= av_int2dbl(get_be64(pb));
} else
return AVERROR_INVALIDDATA;
return 0;
}
/*
* Read the next element as an ASCII string.
* 0 is success, < 0 is failure.
*/
static int ebml_read_ascii(ByteIOContext *pb, int size, char **str)
{
av_free(*str);
/* EBML strings are usually not 0-terminated, so we allocate one
* byte more, read the string and NULL-terminate it ourselves. */
if (!(*str = av_malloc(size + 1)))
return AVERROR(ENOMEM);
if (get_buffer(pb, (uint8_t *) *str, size) != size) {
av_free(*str);
return AVERROR(EIO);
}
(*str)[size] = '\0';
return 0;
}
/*
* Read the next element as binary data.
* 0 is success, < 0 is failure.
*/
static int ebml_read_binary(ByteIOContext *pb, int length, EbmlBin *bin)
{
av_free(bin->data);
if (!(bin->data = av_malloc(length)))
return AVERROR(ENOMEM);
bin->size = length;
bin->pos = url_ftell(pb);
if (get_buffer(pb, bin->data, length) != length)
return AVERROR(EIO);
return 0;
}
/*
* Read the next element, but only the header. The contents
* are supposed to be sub-elements which can be read separately.
* 0 is success, < 0 is failure.
*/
static int ebml_read_master(MatroskaDemuxContext *matroska, int length)
{
ByteIOContext *pb = matroska->ctx->pb;
MatroskaLevel *level;
if (matroska->num_levels >= EBML_MAX_DEPTH) {
av_log(matroska->ctx, AV_LOG_ERROR,
"File moves beyond max. allowed depth (%d)\n", EBML_MAX_DEPTH);
return AVERROR(ENOSYS);
}
level = &matroska->levels[matroska->num_levels++];
level->start = url_ftell(pb);
level->length = length;
return 0;
}
/*
* Read signed/unsigned "EBML" numbers.
* Return: number of bytes processed, < 0 on error
*/
static int matroska_ebmlnum_uint(MatroskaDemuxContext *matroska,
uint8_t *data, uint32_t size, uint64_t *num)
{
ByteIOContext pb;
init_put_byte(&pb, data, size, 0, NULL, NULL, NULL, NULL);
return ebml_read_num(matroska, &pb, 8, num);
}
/*
* Same as above, but signed.
*/
static int matroska_ebmlnum_sint(MatroskaDemuxContext *matroska,
uint8_t *data, uint32_t size, int64_t *num)
{
uint64_t unum;
int res;
/* read as unsigned number first */
if ((res = matroska_ebmlnum_uint(matroska, data, size, &unum)) < 0)
return res;
/* make signed (weird way) */
*num = unum - ((1LL << (7*res - 1)) - 1);
return res;
}
static int ebml_parse_elem(MatroskaDemuxContext *matroska,
EbmlSyntax *syntax, void *data);
static int ebml_parse_id(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
uint32_t id, void *data)
{
int i;
for (i=0; syntax[i].id; i++)
if (id == syntax[i].id)
break;
if (!syntax[i].id && id != EBML_ID_VOID && id != EBML_ID_CRC32)
av_log(matroska->ctx, AV_LOG_INFO, "Unknown entry 0x%X\n", id);
return ebml_parse_elem(matroska, &syntax[i], data);
}
static int ebml_parse(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
void *data)
{
uint64_t id;
int res = ebml_read_num(matroska, matroska->ctx->pb, 4, &id);
id |= 1 << 7*res;
return res < 0 ? res : ebml_parse_id(matroska, syntax, id, data);
}
static int ebml_parse_nest(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
void *data)
{
int i, res = 0;
for (i=0; syntax[i].id; i++)
switch (syntax[i].type) {
case EBML_UINT:
*(uint64_t *)((char *)data+syntax[i].data_offset) = syntax[i].def.u;
break;
case EBML_FLOAT:
*(double *)((char *)data+syntax[i].data_offset) = syntax[i].def.f;
break;
case EBML_STR:
case EBML_UTF8:
*(char **)((char *)data+syntax[i].data_offset) = av_strdup(syntax[i].def.s);
break;
}
while (!res && !ebml_level_end(matroska))
res = ebml_parse(matroska, syntax, data);
return res;
}
static int ebml_parse_elem(MatroskaDemuxContext *matroska,
EbmlSyntax *syntax, void *data)
{
ByteIOContext *pb = matroska->ctx->pb;
uint32_t id = syntax->id;
uint64_t length;
int res;
data = (char *)data + syntax->data_offset;
if (syntax->list_elem_size) {
EbmlList *list = data;
list->elem = av_realloc(list->elem, (list->nb_elem+1)*syntax->list_elem_size);
data = (char*)list->elem + list->nb_elem*syntax->list_elem_size;
memset(data, 0, syntax->list_elem_size);
list->nb_elem++;
}
if (syntax->type != EBML_PASS && syntax->type != EBML_STOP)
if ((res = ebml_read_num(matroska, pb, 8, &length)) < 0)
return res;
switch (syntax->type) {
case EBML_UINT: res = ebml_read_uint (pb, length, data); break;
case EBML_FLOAT: res = ebml_read_float (pb, length, data); break;
case EBML_STR:
case EBML_UTF8: res = ebml_read_ascii (pb, length, data); break;
case EBML_BIN: res = ebml_read_binary(pb, length, data); break;
case EBML_NEST: if ((res=ebml_read_master(matroska, length)) < 0)
return res;
if (id == MATROSKA_ID_SEGMENT)
matroska->segment_start = url_ftell(matroska->ctx->pb);
return ebml_parse_nest(matroska, syntax->def.n, data);
case EBML_PASS: return ebml_parse_id(matroska, syntax->def.n, id, data);
case EBML_STOP: *(int *)data = 1; return 1;
default: return url_fseek(pb,length,SEEK_CUR)<0 ? AVERROR(EIO) : 0;
}
if (res == AVERROR_INVALIDDATA)
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid element\n");
else if (res == AVERROR(EIO))
av_log(matroska->ctx, AV_LOG_ERROR, "Read error\n");
return res;
}
static void ebml_free(EbmlSyntax *syntax, void *data)
{
int i, j;
for (i=0; syntax[i].id; i++) {
void *data_off = (char *)data + syntax[i].data_offset;
switch (syntax[i].type) {
case EBML_STR:
case EBML_UTF8: av_freep(data_off); break;
case EBML_BIN: av_freep(&((EbmlBin *)data_off)->data); break;
case EBML_NEST:
if (syntax[i].list_elem_size) {
EbmlList *list = data_off;
char *ptr = list->elem;
for (j=0; j<list->nb_elem; j++, ptr+=syntax[i].list_elem_size)
ebml_free(syntax[i].def.n, ptr);
av_free(list->elem);
} else
ebml_free(syntax[i].def.n, data_off);
default: break;
}
}
}
/*
* Autodetecting...
*/
static int matroska_probe(AVProbeData *p)
{
uint64_t total = 0;
int len_mask = 0x80, size = 1, n = 1, i;
/* EBML header? */
if (AV_RB32(p->buf) != EBML_ID_HEADER)
return 0;
/* length of header */
total = p->buf[4];
while (size <= 8 && !(total & len_mask)) {
size++;
len_mask >>= 1;
}
if (size > 8)
return 0;
total &= (len_mask - 1);
while (n < size)
total = (total << 8) | p->buf[4 + n++];
/* Does the probe data contain the whole header? */
if (p->buf_size < 4 + size + total)
return 0;
/* The header should contain a known document type. For now,
* we don't parse the whole header but simply check for the
* availability of that array of characters inside the header.
* Not fully fool-proof, but good enough. */
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++) {
int probelen = strlen(matroska_doctypes[i]);
for (n = 4+size; n <= 4+size+total-probelen; n++)
if (!memcmp(p->buf+n, matroska_doctypes[i], probelen))
return AVPROBE_SCORE_MAX;
}
// probably valid EBML header but no recognized doctype
return AVPROBE_SCORE_MAX/2;
}
static MatroskaTrack *matroska_find_track_by_num(MatroskaDemuxContext *matroska,
int num)
{
MatroskaTrack *tracks = matroska->tracks.elem;
int i;
for (i=0; i < matroska->tracks.nb_elem; i++)
if (tracks[i].num == num)
return &tracks[i];
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid track number %d\n", num);
return NULL;
}
static int matroska_decode_buffer(uint8_t** buf, int* buf_size,
MatroskaTrack *track)
{
MatroskaTrackEncoding *encodings = track->encodings.elem;
uint8_t* data = *buf;
int isize = *buf_size;
uint8_t* pkt_data = NULL;
int pkt_size = isize;
int result = 0;
int olen;
switch (encodings[0].compression.algo) {
case MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP:
return encodings[0].compression.settings.size;
case MATROSKA_TRACK_ENCODING_COMP_LZO:
do {
olen = pkt_size *= 3;
pkt_data = av_realloc(pkt_data, pkt_size+AV_LZO_OUTPUT_PADDING);
result = av_lzo1x_decode(pkt_data, &olen, data, &isize);
} while (result==AV_LZO_OUTPUT_FULL && pkt_size<10000000);
if (result)
goto failed;
pkt_size -= olen;
break;
#if CONFIG_ZLIB
case MATROSKA_TRACK_ENCODING_COMP_ZLIB: {
z_stream zstream = {0};
if (inflateInit(&zstream) != Z_OK)
return -1;
zstream.next_in = data;
zstream.avail_in = isize;
do {
pkt_size *= 3;
pkt_data = av_realloc(pkt_data, pkt_size);
zstream.avail_out = pkt_size - zstream.total_out;
zstream.next_out = pkt_data + zstream.total_out;
result = inflate(&zstream, Z_NO_FLUSH);
} while (result==Z_OK && pkt_size<10000000);
pkt_size = zstream.total_out;
inflateEnd(&zstream);
if (result != Z_STREAM_END)
goto failed;
break;
}
#endif
#if CONFIG_BZLIB
case MATROSKA_TRACK_ENCODING_COMP_BZLIB: {
bz_stream bzstream = {0};
if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK)
return -1;
bzstream.next_in = data;
bzstream.avail_in = isize;
do {
pkt_size *= 3;
pkt_data = av_realloc(pkt_data, pkt_size);
bzstream.avail_out = pkt_size - bzstream.total_out_lo32;
bzstream.next_out = pkt_data + bzstream.total_out_lo32;
result = BZ2_bzDecompress(&bzstream);
} while (result==BZ_OK && pkt_size<10000000);
pkt_size = bzstream.total_out_lo32;
BZ2_bzDecompressEnd(&bzstream);
if (result != BZ_STREAM_END)
goto failed;
break;
}
#endif
default:
return -1;
}
*buf = pkt_data;
*buf_size = pkt_size;
return 0;
failed:
av_free(pkt_data);
return -1;
}
static void matroska_fix_ass_packet(MatroskaDemuxContext *matroska,
AVPacket *pkt, uint64_t display_duration)
{
char *line, *layer, *ptr = pkt->data, *end = ptr+pkt->size;
for (; *ptr!=',' && ptr<end-1; ptr++);
if (*ptr == ',')
layer = ++ptr;
for (; *ptr!=',' && ptr<end-1; ptr++);
if (*ptr == ',') {
int64_t end_pts = pkt->pts + display_duration;
int sc = matroska->time_scale * pkt->pts / 10000000;
int ec = matroska->time_scale * end_pts / 10000000;
int sh, sm, ss, eh, em, es, len;
sh = sc/360000; sc -= 360000*sh;
sm = sc/ 6000; sc -= 6000*sm;
ss = sc/ 100; sc -= 100*ss;
eh = ec/360000; ec -= 360000*eh;
em = ec/ 6000; ec -= 6000*em;
es = ec/ 100; ec -= 100*es;
*ptr++ = '\0';
len = 50 + end-ptr + FF_INPUT_BUFFER_PADDING_SIZE;
if (!(line = av_malloc(len)))
return;
snprintf(line,len,"Dialogue: %s,%d:%02d:%02d.%02d,%d:%02d:%02d.%02d,%s\r\n",
layer, sh, sm, ss, sc, eh, em, es, ec, ptr);
av_free(pkt->data);
pkt->data = line;
pkt->size = strlen(line);
}
}
static void matroska_merge_packets(AVPacket *out, AVPacket *in)
{
out->data = av_realloc(out->data, out->size+in->size);
memcpy(out->data+out->size, in->data, in->size);
out->size += in->size;
av_destruct_packet(in);
av_free(in);
}
static void matroska_convert_tag(AVFormatContext *s, EbmlList *list,
AVMetadata **metadata, char *prefix)
{
MatroskaTag *tags = list->elem;
char key[1024];
int i;
for (i=0; i < list->nb_elem; i++) {
const char *lang = strcmp(tags[i].lang, "und") ? tags[i].lang : NULL;
if (prefix) snprintf(key, sizeof(key), "%s/%s", prefix, tags[i].name);
else av_strlcpy(key, tags[i].name, sizeof(key));
if (tags[i].def || !lang) {
av_metadata_set2(metadata, key, tags[i].string, 0);
if (tags[i].sub.nb_elem)
matroska_convert_tag(s, &tags[i].sub, metadata, key);
}
if (lang) {
av_strlcat(key, "-", sizeof(key));
av_strlcat(key, lang, sizeof(key));
av_metadata_set2(metadata, key, tags[i].string, 0);
if (tags[i].sub.nb_elem)
matroska_convert_tag(s, &tags[i].sub, metadata, key);
}
}
}
static void matroska_convert_tags(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTags *tags = matroska->tags.elem;
int i, j;
for (i=0; i < matroska->tags.nb_elem; i++) {
if (tags[i].target.attachuid) {
MatroskaAttachement *attachment = matroska->attachments.elem;
for (j=0; j<matroska->attachments.nb_elem; j++)
if (attachment[j].uid == tags[i].target.attachuid)
matroska_convert_tag(s, &tags[i].tag,
&attachment[j].stream->metadata, NULL);
} else if (tags[i].target.chapteruid) {
MatroskaChapter *chapter = matroska->chapters.elem;
for (j=0; j<matroska->chapters.nb_elem; j++)
if (chapter[j].uid == tags[i].target.chapteruid)
matroska_convert_tag(s, &tags[i].tag,
&chapter[j].chapter->metadata, NULL);
} else if (tags[i].target.trackuid) {
MatroskaTrack *track = matroska->tracks.elem;
for (j=0; j<matroska->tracks.nb_elem; j++)
if (track[j].uid == tags[i].target.trackuid)
matroska_convert_tag(s, &tags[i].tag,
&track[j].stream->metadata, NULL);
} else {
matroska_convert_tag(s, &tags[i].tag, &s->metadata,
tags[i].target.type);
}
}
}
static void matroska_execute_seekhead(MatroskaDemuxContext *matroska)
{
EbmlList *seekhead_list = &matroska->seekhead;
MatroskaSeekhead *seekhead = seekhead_list->elem;
uint32_t level_up = matroska->level_up;
int64_t before_pos = url_ftell(matroska->ctx->pb);
MatroskaLevel level;
int i;
for (i=0; i<seekhead_list->nb_elem; i++) {
int64_t offset = seekhead[i].pos + matroska->segment_start;
if (seekhead[i].pos <= before_pos
|| seekhead[i].id == MATROSKA_ID_SEEKHEAD
|| seekhead[i].id == MATROSKA_ID_CLUSTER)
continue;
/* seek */
if (url_fseek(matroska->ctx->pb, offset, SEEK_SET) != offset)
continue;
/* We don't want to lose our seekhead level, so we add
* a dummy. This is a crude hack. */
if (matroska->num_levels == EBML_MAX_DEPTH) {
av_log(matroska->ctx, AV_LOG_INFO,
"Max EBML element depth (%d) reached, "
"cannot parse further.\n", EBML_MAX_DEPTH);
break;
}
level.start = 0;
level.length = (uint64_t)-1;
matroska->levels[matroska->num_levels] = level;
matroska->num_levels++;
ebml_parse(matroska, matroska_segment, matroska);
/* remove dummy level */
while (matroska->num_levels) {
uint64_t length = matroska->levels[--matroska->num_levels].length;
if (length == (uint64_t)-1)
break;
}
}
/* seek back */
url_fseek(matroska->ctx->pb, before_pos, SEEK_SET);
matroska->level_up = level_up;
}
static int matroska_aac_profile(char *codec_id)
{
static const char * const aac_profiles[] = { "MAIN", "LC", "SSR" };
int profile;
for (profile=0; profile<FF_ARRAY_ELEMS(aac_profiles); profile++)
if (strstr(codec_id, aac_profiles[profile]))
break;
return profile + 1;
}
static int matroska_aac_sri(int samplerate)
{
int sri;
for (sri=0; sri<FF_ARRAY_ELEMS(ff_mpeg4audio_sample_rates); sri++)
if (ff_mpeg4audio_sample_rates[sri] == samplerate)
break;
return sri;
}
static int matroska_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
MatroskaDemuxContext *matroska = s->priv_data;
EbmlList *attachements_list = &matroska->attachments;
MatroskaAttachement *attachements;
EbmlList *chapters_list = &matroska->chapters;
MatroskaChapter *chapters;
MatroskaTrack *tracks;
EbmlList *index_list;
MatroskaIndex *index;
int index_scale = 1;
uint64_t max_start = 0;
Ebml ebml = { 0 };
AVStream *st;
int i, j;
matroska->ctx = s;
/* First read the EBML header. */
if (ebml_parse(matroska, ebml_syntax, &ebml)
|| ebml.version > EBML_VERSION || ebml.max_size > sizeof(uint64_t)
|| ebml.id_length > sizeof(uint32_t) || ebml.doctype_version > 2) {
av_log(matroska->ctx, AV_LOG_ERROR,
"EBML header using unsupported features\n"
"(EBML version %"PRIu64", doctype %s, doc version %"PRIu64")\n",
ebml.version, ebml.doctype, ebml.doctype_version);
ebml_free(ebml_syntax, &ebml);
return AVERROR_PATCHWELCOME;
}
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++)
if (!strcmp(ebml.doctype, matroska_doctypes[i]))
break;
if (i >= FF_ARRAY_ELEMS(matroska_doctypes)) {
av_log(s, AV_LOG_WARNING, "Unknown EBML doctype '%s'\n", ebml.doctype);
}
av_metadata_set2(&s->metadata, "doctype", ebml.doctype, 0);
ebml_free(ebml_syntax, &ebml);
/* The next thing is a segment. */
if (ebml_parse(matroska, matroska_segments, matroska) < 0)
return -1;
matroska_execute_seekhead(matroska);
if (matroska->duration)
matroska->ctx->duration = matroska->duration * matroska->time_scale
* 1000 / AV_TIME_BASE;
av_metadata_set2(&s->metadata, "title", matroska->title, 0);
tracks = matroska->tracks.elem;
for (i=0; i < matroska->tracks.nb_elem; i++) {
MatroskaTrack *track = &tracks[i];
enum CodecID codec_id = CODEC_ID_NONE;
EbmlList *encodings_list = &tracks->encodings;
MatroskaTrackEncoding *encodings = encodings_list->elem;
uint8_t *extradata = NULL;
int extradata_size = 0;
int extradata_offset = 0;
ByteIOContext b;
/* Apply some sanity checks. */
if (track->type != MATROSKA_TRACK_TYPE_VIDEO &&
track->type != MATROSKA_TRACK_TYPE_AUDIO &&
track->type != MATROSKA_TRACK_TYPE_SUBTITLE) {
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown or unsupported track type %"PRIu64"\n",
track->type);
continue;
}
if (track->codec_id == NULL)
continue;
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
if (!track->default_duration)
track->default_duration = 1000000000/track->video.frame_rate;
if (!track->video.display_width)
track->video.display_width = track->video.pixel_width;
if (!track->video.display_height)
track->video.display_height = track->video.pixel_height;
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
if (!track->audio.out_samplerate)
track->audio.out_samplerate = track->audio.samplerate;
}
if (encodings_list->nb_elem > 1) {
av_log(matroska->ctx, AV_LOG_ERROR,
"Multiple combined encodings no supported");
} else if (encodings_list->nb_elem == 1) {
if (encodings[0].type ||
(encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP &&
#if CONFIG_ZLIB
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_ZLIB &&
#endif
#if CONFIG_BZLIB
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_BZLIB &&
#endif
encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_LZO)) {
encodings[0].scope = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Unsupported encoding type");
} else if (track->codec_priv.size && encodings[0].scope&2) {
uint8_t *codec_priv = track->codec_priv.data;
int offset = matroska_decode_buffer(&track->codec_priv.data,
&track->codec_priv.size,
track);
if (offset < 0) {
track->codec_priv.data = NULL;
track->codec_priv.size = 0;
av_log(matroska->ctx, AV_LOG_ERROR,
"Failed to decode codec private data\n");
} else if (offset > 0) {
track->codec_priv.data = av_malloc(track->codec_priv.size + offset);
memcpy(track->codec_priv.data,
encodings[0].compression.settings.data, offset);
memcpy(track->codec_priv.data+offset, codec_priv,
track->codec_priv.size);
track->codec_priv.size += offset;
}
if (codec_priv != track->codec_priv.data)
av_free(codec_priv);
}
}
for(j=0; ff_mkv_codec_tags[j].id != CODEC_ID_NONE; j++){
if(!strncmp(ff_mkv_codec_tags[j].str, track->codec_id,
strlen(ff_mkv_codec_tags[j].str))){
codec_id= ff_mkv_codec_tags[j].id;
break;
}
}
st = track->stream = av_new_stream(s, 0);
if (st == NULL)
return AVERROR(ENOMEM);
if (!strcmp(track->codec_id, "V_MS/VFW/FOURCC")
&& track->codec_priv.size >= 40
&& track->codec_priv.data != NULL) {
track->ms_compat = 1;
track->video.fourcc = AV_RL32(track->codec_priv.data + 16);
codec_id = ff_codec_get_id(ff_codec_bmp_tags, track->video.fourcc);
extradata_offset = 40;
} else if (!strcmp(track->codec_id, "A_MS/ACM")
&& track->codec_priv.size >= 14
&& track->codec_priv.data != NULL) {
init_put_byte(&b, track->codec_priv.data, track->codec_priv.size,
URL_RDONLY, NULL, NULL, NULL, NULL);
ff_get_wav_header(&b, st->codec, track->codec_priv.size);
codec_id = st->codec->codec_id;
extradata_offset = FFMIN(track->codec_priv.size, 18);
} else if (!strcmp(track->codec_id, "V_QUICKTIME")
&& (track->codec_priv.size >= 86)
&& (track->codec_priv.data != NULL)) {
track->video.fourcc = AV_RL32(track->codec_priv.data);
codec_id=ff_codec_get_id(codec_movvideo_tags, track->video.fourcc);
} else if (codec_id == CODEC_ID_PCM_S16BE) {
switch (track->audio.bitdepth) {
case 8: codec_id = CODEC_ID_PCM_U8; break;
case 24: codec_id = CODEC_ID_PCM_S24BE; break;
case 32: codec_id = CODEC_ID_PCM_S32BE; break;
}
} else if (codec_id == CODEC_ID_PCM_S16LE) {
switch (track->audio.bitdepth) {
case 8: codec_id = CODEC_ID_PCM_U8; break;
case 24: codec_id = CODEC_ID_PCM_S24LE; break;
case 32: codec_id = CODEC_ID_PCM_S32LE; break;
}
} else if (codec_id==CODEC_ID_PCM_F32LE && track->audio.bitdepth==64) {
codec_id = CODEC_ID_PCM_F64LE;
} else if (codec_id == CODEC_ID_AAC && !track->codec_priv.size) {
int profile = matroska_aac_profile(track->codec_id);
int sri = matroska_aac_sri(track->audio.samplerate);
extradata = av_malloc(5);
if (extradata == NULL)
return AVERROR(ENOMEM);
extradata[0] = (profile << 3) | ((sri&0x0E) >> 1);
extradata[1] = ((sri&0x01) << 7) | (track->audio.channels<<3);
if (strstr(track->codec_id, "SBR")) {
sri = matroska_aac_sri(track->audio.out_samplerate);
extradata[2] = 0x56;
extradata[3] = 0xE5;
extradata[4] = 0x80 | (sri<<3);
extradata_size = 5;
} else
extradata_size = 2;
} else if (codec_id == CODEC_ID_TTA) {
extradata_size = 30;
extradata = av_mallocz(extradata_size);
if (extradata == NULL)
return AVERROR(ENOMEM);
init_put_byte(&b, extradata, extradata_size, 1,
NULL, NULL, NULL, NULL);
put_buffer(&b, "TTA1", 4);
put_le16(&b, 1);
put_le16(&b, track->audio.channels);
put_le16(&b, track->audio.bitdepth);
put_le32(&b, track->audio.out_samplerate);
put_le32(&b, matroska->ctx->duration * track->audio.out_samplerate);
} else if (codec_id == CODEC_ID_RV10 || codec_id == CODEC_ID_RV20 ||
codec_id == CODEC_ID_RV30 || codec_id == CODEC_ID_RV40) {
extradata_offset = 26;
} else if (codec_id == CODEC_ID_RA_144) {
track->audio.out_samplerate = 8000;
track->audio.channels = 1;
} else if (codec_id == CODEC_ID_RA_288 || codec_id == CODEC_ID_COOK ||
codec_id == CODEC_ID_ATRAC3 || codec_id == CODEC_ID_SIPR) {
int flavor;
init_put_byte(&b, track->codec_priv.data,track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
url_fskip(&b, 22);
flavor = get_be16(&b);
track->audio.coded_framesize = get_be32(&b);
url_fskip(&b, 12);
track->audio.sub_packet_h = get_be16(&b);
track->audio.frame_size = get_be16(&b);
track->audio.sub_packet_size = get_be16(&b);
track->audio.buf = av_malloc(track->audio.frame_size * track->audio.sub_packet_h);
if (codec_id == CODEC_ID_RA_288) {
st->codec->block_align = track->audio.coded_framesize;
track->codec_priv.size = 0;
} else {
if (codec_id == CODEC_ID_SIPR && flavor < 4) {
const int sipr_bit_rate[4] = { 6504, 8496, 5000, 16000 };
track->audio.sub_packet_size = ff_sipr_subpk_size[flavor];
st->codec->bit_rate = sipr_bit_rate[flavor];
}
st->codec->block_align = track->audio.sub_packet_size;
extradata_offset = 78;
}
}
track->codec_priv.size -= extradata_offset;
if (codec_id == CODEC_ID_NONE)
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown/unsupported CodecID %s.\n", track->codec_id);
if (track->time_scale < 0.01)
track->time_scale = 1.0;
av_set_pts_info(st, 64, matroska->time_scale*track->time_scale, 1000*1000*1000); /* 64 bit pts in ns */
st->codec->codec_id = codec_id;
st->start_time = 0;
if (strcmp(track->language, "und"))
av_metadata_set2(&st->metadata, "language", track->language, 0);
av_metadata_set2(&st->metadata, "title", track->name, 0);
if (track->flag_default)
st->disposition |= AV_DISPOSITION_DEFAULT;
if (track->default_duration)
av_reduce(&st->codec->time_base.num, &st->codec->time_base.den,
track->default_duration, 1000000000, 30000);
if (!st->codec->extradata) {
if(extradata){
st->codec->extradata = extradata;
st->codec->extradata_size = extradata_size;
} else if(track->codec_priv.data && track->codec_priv.size > 0){
st->codec->extradata = av_mallocz(track->codec_priv.size +
FF_INPUT_BUFFER_PADDING_SIZE);
if(st->codec->extradata == NULL)
return AVERROR(ENOMEM);
st->codec->extradata_size = track->codec_priv.size;
memcpy(st->codec->extradata,
track->codec_priv.data + extradata_offset,
track->codec_priv.size);
}
}
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = track->video.fourcc;
st->codec->width = track->video.pixel_width;
st->codec->height = track->video.pixel_height;
av_reduce(&st->sample_aspect_ratio.num,
&st->sample_aspect_ratio.den,
st->codec->height * track->video.display_width,
st->codec-> width * track->video.display_height,
255);
if (st->codec->codec_id != CODEC_ID_H264)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->default_duration)
st->avg_frame_rate = av_d2q(1000000000.0/track->default_duration, INT_MAX);
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = track->audio.out_samplerate;
st->codec->channels = track->audio.channels;
if (st->codec->codec_id != CODEC_ID_AAC)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
} else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
attachements = attachements_list->elem;
for (j=0; j<attachements_list->nb_elem; j++) {
if (!(attachements[j].filename && attachements[j].mime &&
attachements[j].bin.data && attachements[j].bin.size > 0)) {
av_log(matroska->ctx, AV_LOG_ERROR, "incomplete attachment\n");
} else {
AVStream *st = av_new_stream(s, 0);
if (st == NULL)
break;
av_metadata_set2(&st->metadata, "filename",attachements[j].filename, 0);
st->codec->codec_id = CODEC_ID_NONE;
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
st->codec->extradata = av_malloc(attachements[j].bin.size);
if(st->codec->extradata == NULL)
break;
st->codec->extradata_size = attachements[j].bin.size;
memcpy(st->codec->extradata, attachements[j].bin.data, attachements[j].bin.size);
for (i=0; ff_mkv_mime_tags[i].id != CODEC_ID_NONE; i++) {
if (!strncmp(ff_mkv_mime_tags[i].str, attachements[j].mime,
strlen(ff_mkv_mime_tags[i].str))) {
st->codec->codec_id = ff_mkv_mime_tags[i].id;
break;
}
}
attachements[j].stream = st;
}
}
chapters = chapters_list->elem;
for (i=0; i<chapters_list->nb_elem; i++)
if (chapters[i].start != AV_NOPTS_VALUE && chapters[i].uid
&& (max_start==0 || chapters[i].start > max_start)) {
chapters[i].chapter =
ff_new_chapter(s, chapters[i].uid, (AVRational){1, 1000000000},
chapters[i].start, chapters[i].end,
chapters[i].title);
av_metadata_set2(&chapters[i].chapter->metadata,
"title", chapters[i].title, 0);
max_start = chapters[i].start;
}
index_list = &matroska->index;
index = index_list->elem;
if (index_list->nb_elem
&& index[0].time > 100000000000000/matroska->time_scale) {
av_log(matroska->ctx, AV_LOG_WARNING, "Working around broken index.\n");
index_scale = matroska->time_scale;
}
for (i=0; i<index_list->nb_elem; i++) {
EbmlList *pos_list = &index[i].pos;
MatroskaIndexPos *pos = pos_list->elem;
for (j=0; j<pos_list->nb_elem; j++) {
MatroskaTrack *track = matroska_find_track_by_num(matroska,
pos[j].track);
if (track && track->stream)
av_add_index_entry(track->stream,
pos[j].pos + matroska->segment_start,
index[i].time/index_scale, 0, 0,
AVINDEX_KEYFRAME);
}
}
matroska_convert_tags(s);
return 0;
}
/*
* Put one packet in an application-supplied AVPacket struct.
* Returns 0 on success or -1 on failure.
*/
static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
AVPacket *pkt)
{
if (matroska->num_packets > 0) {
memcpy(pkt, matroska->packets[0], sizeof(AVPacket));
av_free(matroska->packets[0]);
if (matroska->num_packets > 1) {
memmove(&matroska->packets[0], &matroska->packets[1],
(matroska->num_packets - 1) * sizeof(AVPacket *));
matroska->packets =
av_realloc(matroska->packets, (matroska->num_packets - 1) *
sizeof(AVPacket *));
} else {
av_freep(&matroska->packets);
}
matroska->num_packets--;
return 0;
}
return -1;
}
/*
* Free all packets in our internal queue.
*/
static void matroska_clear_queue(MatroskaDemuxContext *matroska)
{
if (matroska->packets) {
int n;
for (n = 0; n < matroska->num_packets; n++) {
av_free_packet(matroska->packets[n]);
av_free(matroska->packets[n]);
}
av_freep(&matroska->packets);
matroska->num_packets = 0;
}
}
static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data,
int size, int64_t pos, uint64_t cluster_time,
uint64_t duration, int is_keyframe,
int64_t cluster_pos)
{
uint64_t timecode = AV_NOPTS_VALUE;
MatroskaTrack *track;
int res = 0;
AVStream *st;
AVPacket *pkt;
int16_t block_time;
uint32_t *lace_size = NULL;
int n, flags, laces = 0;
uint64_t num;
if ((n = matroska_ebmlnum_uint(matroska, data, size, &num)) < 0) {
av_log(matroska->ctx, AV_LOG_ERROR, "EBML block data error\n");
return res;
}
data += n;
size -= n;
track = matroska_find_track_by_num(matroska, num);
if (size <= 3 || !track || !track->stream) {
av_log(matroska->ctx, AV_LOG_INFO,
"Invalid stream %"PRIu64" or size %u\n", num, size);
return res;
}
st = track->stream;
if (st->discard >= AVDISCARD_ALL)
return res;
if (duration == AV_NOPTS_VALUE)
duration = track->default_duration / matroska->time_scale;
block_time = AV_RB16(data);
data += 2;
flags = *data++;
size -= 3;
if (is_keyframe == -1)
is_keyframe = flags & 0x80 ? AV_PKT_FLAG_KEY : 0;
if (cluster_time != (uint64_t)-1
&& (block_time >= 0 || cluster_time >= -block_time)) {
timecode = cluster_time + block_time;
if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE
&& timecode < track->end_timecode)
is_keyframe = 0; /* overlapping subtitles are not key frame */
if (is_keyframe)
av_add_index_entry(st, cluster_pos, timecode, 0,0,AVINDEX_KEYFRAME);
track->end_timecode = FFMAX(track->end_timecode, timecode+duration);
}
if (matroska->skip_to_keyframe && track->type != MATROSKA_TRACK_TYPE_SUBTITLE) {
if (!is_keyframe || timecode < matroska->skip_to_timecode)
return res;
matroska->skip_to_keyframe = 0;
}
switch ((flags & 0x06) >> 1) {
case 0x0: /* no lacing */
laces = 1;
lace_size = av_mallocz(sizeof(int));
lace_size[0] = size;
break;
case 0x1: /* Xiph lacing */
case 0x2: /* fixed-size lacing */
case 0x3: /* EBML lacing */
assert(size>0); // size <=3 is checked before size-=3 above
laces = (*data) + 1;
data += 1;
size -= 1;
lace_size = av_mallocz(laces * sizeof(int));
switch ((flags & 0x06) >> 1) {
case 0x1: /* Xiph lacing */ {
uint8_t temp;
uint32_t total = 0;
for (n = 0; res == 0 && n < laces - 1; n++) {
while (1) {
if (size == 0) {
res = -1;
break;
}
temp = *data;
lace_size[n] += temp;
data += 1;
size -= 1;
if (temp != 0xff)
break;
}
total += lace_size[n];
}
lace_size[n] = size - total;
break;
}
case 0x2: /* fixed-size lacing */
for (n = 0; n < laces; n++)
lace_size[n] = size / laces;
break;
case 0x3: /* EBML lacing */ {
uint32_t total;
n = matroska_ebmlnum_uint(matroska, data, size, &num);
if (n < 0) {
av_log(matroska->ctx, AV_LOG_INFO,
"EBML block data error\n");
break;
}
data += n;
size -= n;
total = lace_size[0] = num;
for (n = 1; res == 0 && n < laces - 1; n++) {
int64_t snum;
int r;
r = matroska_ebmlnum_sint(matroska, data, size, &snum);
if (r < 0) {
av_log(matroska->ctx, AV_LOG_INFO,
"EBML block data error\n");
break;
}
data += r;
size -= r;
lace_size[n] = lace_size[n - 1] + snum;
total += lace_size[n];
}
lace_size[n] = size - total;
break;
}
}
break;
}
if (res == 0) {
for (n = 0; n < laces; n++) {
if ((st->codec->codec_id == CODEC_ID_RA_288 ||
st->codec->codec_id == CODEC_ID_COOK ||
st->codec->codec_id == CODEC_ID_SIPR ||
st->codec->codec_id == CODEC_ID_ATRAC3) &&
st->codec->block_align && track->audio.sub_packet_size) {
int a = st->codec->block_align;
int sps = track->audio.sub_packet_size;
int cfs = track->audio.coded_framesize;
int h = track->audio.sub_packet_h;
int y = track->audio.sub_packet_cnt;
int w = track->audio.frame_size;
int x;
if (!track->audio.pkt_cnt) {
if (st->codec->codec_id == CODEC_ID_RA_288)
for (x=0; x<h/2; x++)
memcpy(track->audio.buf+x*2*w+y*cfs,
data+x*cfs, cfs);
else if (st->codec->codec_id == CODEC_ID_SIPR)
memcpy(track->audio.buf + y*w, data, w);
else
for (x=0; x<w/sps; x++)
memcpy(track->audio.buf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), data+x*sps, sps);
if (++track->audio.sub_packet_cnt >= h) {
if (st->codec->codec_id == CODEC_ID_SIPR)
ff_rm_reorder_sipr_data(track->audio.buf, h, w);
track->audio.sub_packet_cnt = 0;
track->audio.pkt_cnt = h*w / a;
}
}
while (track->audio.pkt_cnt) {
pkt = av_mallocz(sizeof(AVPacket));
av_new_packet(pkt, a);
memcpy(pkt->data, track->audio.buf
+ a * (h*w / a - track->audio.pkt_cnt--), a);
pkt->pos = pos;
pkt->stream_index = st->index;
dynarray_add(&matroska->packets,&matroska->num_packets,pkt);
}
} else {
MatroskaTrackEncoding *encodings = track->encodings.elem;
int offset = 0, pkt_size = lace_size[n];
uint8_t *pkt_data = data;
if (lace_size[n] > size) {
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid packet size\n");
break;
}
if (encodings && encodings->scope & 1) {
offset = matroska_decode_buffer(&pkt_data,&pkt_size, track);
if (offset < 0)
continue;
}
pkt = av_mallocz(sizeof(AVPacket));
/* XXX: prevent data copy... */
if (av_new_packet(pkt, pkt_size+offset) < 0) {
av_free(pkt);
res = AVERROR(ENOMEM);
break;
}
if (offset)
memcpy (pkt->data, encodings->compression.settings.data, offset);
memcpy (pkt->data+offset, pkt_data, pkt_size);
if (pkt_data != data)
av_free(pkt_data);
if (n == 0)
pkt->flags = is_keyframe;
pkt->stream_index = st->index;
if (track->ms_compat)
pkt->dts = timecode;
else
pkt->pts = timecode;
pkt->pos = pos;
if (st->codec->codec_id == CODEC_ID_TEXT)
pkt->convergence_duration = duration;
else if (track->type != MATROSKA_TRACK_TYPE_SUBTITLE)
pkt->duration = duration;
if (st->codec->codec_id == CODEC_ID_SSA)
matroska_fix_ass_packet(matroska, pkt, duration);
if (matroska->prev_pkt &&
timecode != AV_NOPTS_VALUE &&
matroska->prev_pkt->pts == timecode &&
matroska->prev_pkt->stream_index == st->index)
matroska_merge_packets(matroska->prev_pkt, pkt);
else {
dynarray_add(&matroska->packets,&matroska->num_packets,pkt);
matroska->prev_pkt = pkt;
}
}
if (timecode != AV_NOPTS_VALUE)
timecode = duration ? timecode + duration : AV_NOPTS_VALUE;
data += lace_size[n];
size -= lace_size[n];
}
}
av_free(lace_size);
return res;
}
static int matroska_parse_cluster(MatroskaDemuxContext *matroska)
{
MatroskaCluster cluster = { 0 };
EbmlList *blocks_list;
MatroskaBlock *blocks;
int i, res;
int64_t pos = url_ftell(matroska->ctx->pb);
matroska->prev_pkt = NULL;
if (matroska->has_cluster_id){
/* For the first cluster we parse, its ID was already read as
part of matroska_read_header(), so don't read it again */
res = ebml_parse_id(matroska, matroska_clusters,
MATROSKA_ID_CLUSTER, &cluster);
pos -= 4; /* sizeof the ID which was already read */
matroska->has_cluster_id = 0;
} else
res = ebml_parse(matroska, matroska_clusters, &cluster);
blocks_list = &cluster.blocks;
blocks = blocks_list->elem;
for (i=0; i<blocks_list->nb_elem; i++)
if (blocks[i].bin.size > 0) {
int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1;
res=matroska_parse_block(matroska,
blocks[i].bin.data, blocks[i].bin.size,
blocks[i].bin.pos, cluster.timecode,
blocks[i].duration, is_keyframe,
pos);
}
ebml_free(matroska_cluster, &cluster);
if (res < 0) matroska->done = 1;
return res;
}
static int matroska_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MatroskaDemuxContext *matroska = s->priv_data;
while (matroska_deliver_packet(matroska, pkt)) {
if (matroska->done)
return AVERROR_EOF;
matroska_parse_cluster(matroska);
}
return 0;
}
static int matroska_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = matroska->tracks.elem;
AVStream *st = s->streams[stream_index];
int i, index, index_sub, index_min;
if (!st->nb_index_entries)
return 0;
timestamp = FFMAX(timestamp, st->index_entries[0].timestamp);
if ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) {
url_fseek(s->pb, st->index_entries[st->nb_index_entries-1].pos, SEEK_SET);
while ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) {
matroska_clear_queue(matroska);
if (matroska_parse_cluster(matroska) < 0)
break;
}
}
matroska_clear_queue(matroska);
if (index < 0)
return 0;
index_min = index;
for (i=0; i < matroska->tracks.nb_elem; i++) {
tracks[i].end_timecode = 0;
if (tracks[i].type == MATROSKA_TRACK_TYPE_SUBTITLE
&& !tracks[i].stream->discard != AVDISCARD_ALL) {
index_sub = av_index_search_timestamp(tracks[i].stream, st->index_entries[index].timestamp, AVSEEK_FLAG_BACKWARD);
if (index_sub >= 0
&& st->index_entries[index_sub].pos < st->index_entries[index_min].pos
&& st->index_entries[index].timestamp - st->index_entries[index_sub].timestamp < 30000000000/matroska->time_scale)
index_min = index_sub;
}
}
url_fseek(s->pb, st->index_entries[index_min].pos, SEEK_SET);
matroska->skip_to_keyframe = !(flags & AVSEEK_FLAG_ANY);
matroska->skip_to_timecode = st->index_entries[index].timestamp;
matroska->done = 0;
av_update_cur_dts(s, st, st->index_entries[index].timestamp);
return 0;
}
static int matroska_read_close(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = matroska->tracks.elem;
int n;
matroska_clear_queue(matroska);
for (n=0; n < matroska->tracks.nb_elem; n++)
if (tracks[n].type == MATROSKA_TRACK_TYPE_AUDIO)
av_free(tracks[n].audio.buf);
ebml_free(matroska_segment, matroska);
return 0;
}
AVInputFormat matroska_demuxer = {
"matroska",
NULL_IF_CONFIG_SMALL("Matroska file format"),
sizeof(MatroskaDemuxContext),
matroska_probe,
matroska_read_header,
matroska_read_packet,
matroska_read_close,
matroska_read_seek,
.metadata_conv = ff_mkv_metadata_conv,
};
| 123linslouis-android-video-cutter | jni/libavformat/matroskadec.c | C | asf20 | 69,340 |
/*
* Maxis XA (.xa) File Demuxer
* Copyright (c) 2008 Robert Marston
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Maxis XA File Demuxer
* by Robert Marston (rmarston@gmail.com)
* for more information on the XA audio format see
* http://wiki.multimedia.cx/index.php?title=Maxis_XA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define XA00_TAG MKTAG('X', 'A', 0, 0)
#define XAI0_TAG MKTAG('X', 'A', 'I', 0)
#define XAJ0_TAG MKTAG('X', 'A', 'J', 0)
typedef struct MaxisXADemuxContext {
uint32_t out_size;
uint32_t sent_bytes;
uint32_t audio_frame_counter;
} MaxisXADemuxContext;
static int xa_probe(AVProbeData *p)
{
int channels, srate, bits_per_sample;
if (p->buf_size < 24)
return 0;
switch(AV_RL32(p->buf)) {
case XA00_TAG:
case XAI0_TAG:
case XAJ0_TAG:
break;
default:
return 0;
}
channels = AV_RL16(p->buf + 10);
srate = AV_RL32(p->buf + 12);
bits_per_sample = AV_RL16(p->buf + 22);
if (!channels || channels > 8 || !srate || srate > 192000 ||
bits_per_sample < 4 || bits_per_sample > 32)
return 0;
return AVPROBE_SCORE_MAX/2;
}
static int xa_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
MaxisXADemuxContext *xa = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
/*Set up the XA Audio Decoder*/
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_ADPCM_EA_MAXIS_XA;
url_fskip(pb, 4); /* Skip the XA ID */
xa->out_size = get_le32(pb);
url_fskip(pb, 2); /* Skip the tag */
st->codec->channels = get_le16(pb);
st->codec->sample_rate = get_le32(pb);
/* Value in file is average byte rate*/
st->codec->bit_rate = get_le32(pb) * 8;
st->codec->block_align = get_le16(pb);
st->codec->bits_per_coded_sample = get_le16(pb);
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
static int xa_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MaxisXADemuxContext *xa = s->priv_data;
AVStream *st = s->streams[0];
ByteIOContext *pb = s->pb;
unsigned int packet_size;
int ret;
if(xa->sent_bytes > xa->out_size)
return AVERROR(EIO);
/* 1 byte header and 14 bytes worth of samples * number channels per block */
packet_size = 15*st->codec->channels;
ret = av_get_packet(pb, pkt, packet_size);
if(ret < 0)
return ret;
pkt->stream_index = st->index;
xa->sent_bytes += packet_size;
pkt->pts = xa->audio_frame_counter;
/* 14 bytes Samples per channel with 2 samples per byte */
xa->audio_frame_counter += 28 * st->codec->channels;
return ret;
}
AVInputFormat xa_demuxer = {
"xa",
NULL_IF_CONFIG_SMALL("Maxis XA File Format"),
sizeof(MaxisXADemuxContext),
xa_probe,
xa_read_header,
xa_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/xa.c | C | asf20 | 3,758 |
/*
* Sega FILM Format (CPK) Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Sega FILM (.cpk) file demuxer
* by Mike Melanson (melanson@pcisys.net)
* For more information regarding the Sega FILM file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define FILM_TAG MKBETAG('F', 'I', 'L', 'M')
#define FDSC_TAG MKBETAG('F', 'D', 'S', 'C')
#define STAB_TAG MKBETAG('S', 'T', 'A', 'B')
#define CVID_TAG MKBETAG('c', 'v', 'i', 'd')
typedef struct {
int stream;
int64_t sample_offset;
unsigned int sample_size;
int64_t pts;
int keyframe;
} film_sample;
typedef struct FilmDemuxContext {
int video_stream_index;
int audio_stream_index;
enum CodecID audio_type;
unsigned int audio_samplerate;
unsigned int audio_bits;
unsigned int audio_channels;
enum CodecID video_type;
unsigned int sample_count;
film_sample *sample_table;
unsigned int current_sample;
unsigned int base_clock;
unsigned int version;
/* buffer used for interleaving stereo PCM data */
unsigned char *stereo_buffer;
int stereo_buffer_size;
} FilmDemuxContext;
static int film_probe(AVProbeData *p)
{
if (AV_RB32(&p->buf[0]) != FILM_TAG)
return 0;
return AVPROBE_SCORE_MAX;
}
static int film_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
FilmDemuxContext *film = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
unsigned char scratch[256];
int i;
unsigned int data_offset;
unsigned int audio_frame_counter;
film->sample_table = NULL;
film->stereo_buffer = NULL;
film->stereo_buffer_size = 0;
/* load the main FILM header */
if (get_buffer(pb, scratch, 16) != 16)
return AVERROR(EIO);
data_offset = AV_RB32(&scratch[4]);
film->version = AV_RB32(&scratch[8]);
/* load the FDSC chunk */
if (film->version == 0) {
/* special case for Lemmings .film files; 20-byte header */
if (get_buffer(pb, scratch, 20) != 20)
return AVERROR(EIO);
/* make some assumptions about the audio parameters */
film->audio_type = CODEC_ID_PCM_S8;
film->audio_samplerate = 22050;
film->audio_channels = 1;
film->audio_bits = 8;
} else {
/* normal Saturn .cpk files; 32-byte header */
if (get_buffer(pb, scratch, 32) != 32)
return AVERROR(EIO);
film->audio_samplerate = AV_RB16(&scratch[24]);
film->audio_channels = scratch[21];
film->audio_bits = scratch[22];
if (film->audio_bits == 8)
film->audio_type = CODEC_ID_PCM_S8;
else if (film->audio_bits == 16)
film->audio_type = CODEC_ID_PCM_S16BE;
else
film->audio_type = CODEC_ID_NONE;
}
if (AV_RB32(&scratch[0]) != FDSC_TAG)
return AVERROR_INVALIDDATA;
if (AV_RB32(&scratch[8]) == CVID_TAG) {
film->video_type = CODEC_ID_CINEPAK;
} else
film->video_type = CODEC_ID_NONE;
/* initialize the decoder streams */
if (film->video_type) {
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
film->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = film->video_type;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RB32(&scratch[16]);
st->codec->height = AV_RB32(&scratch[12]);
}
if (film->audio_type) {
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
film->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = film->audio_type;
st->codec->codec_tag = 1;
st->codec->channels = film->audio_channels;
st->codec->bits_per_coded_sample = film->audio_bits;
st->codec->sample_rate = film->audio_samplerate;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels *
st->codec->bits_per_coded_sample / 8;
}
/* load the sample table */
if (get_buffer(pb, scratch, 16) != 16)
return AVERROR(EIO);
if (AV_RB32(&scratch[0]) != STAB_TAG)
return AVERROR_INVALIDDATA;
film->base_clock = AV_RB32(&scratch[8]);
film->sample_count = AV_RB32(&scratch[12]);
if(film->sample_count >= UINT_MAX / sizeof(film_sample))
return -1;
film->sample_table = av_malloc(film->sample_count * sizeof(film_sample));
for(i=0; i<s->nb_streams; i++)
av_set_pts_info(s->streams[i], 33, 1, film->base_clock);
audio_frame_counter = 0;
for (i = 0; i < film->sample_count; i++) {
/* load the next sample record and transfer it to an internal struct */
if (get_buffer(pb, scratch, 16) != 16) {
av_free(film->sample_table);
return AVERROR(EIO);
}
film->sample_table[i].sample_offset =
data_offset + AV_RB32(&scratch[0]);
film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
film->sample_table[i].stream = film->audio_stream_index;
film->sample_table[i].pts = audio_frame_counter;
film->sample_table[i].pts *= film->base_clock;
film->sample_table[i].pts /= film->audio_samplerate;
audio_frame_counter += (film->sample_table[i].sample_size /
(film->audio_channels * film->audio_bits / 8));
} else {
film->sample_table[i].stream = film->video_stream_index;
film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
}
}
film->current_sample = 0;
return 0;
}
static int film_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FilmDemuxContext *film = s->priv_data;
ByteIOContext *pb = s->pb;
film_sample *sample;
int ret = 0;
int i;
int left, right;
if (film->current_sample >= film->sample_count)
return AVERROR(EIO);
sample = &film->sample_table[film->current_sample];
/* position the stream (will probably be there anyway) */
url_fseek(pb, sample->sample_offset, SEEK_SET);
/* do a special song and dance when loading FILM Cinepak chunks */
if ((sample->stream == film->video_stream_index) &&
(film->video_type == CODEC_ID_CINEPAK)) {
pkt->pos= url_ftell(pb);
if (av_new_packet(pkt, sample->sample_size))
return AVERROR(ENOMEM);
get_buffer(pb, pkt->data, sample->sample_size);
} else if ((sample->stream == film->audio_stream_index) &&
(film->audio_channels == 2)) {
/* stereo PCM needs to be interleaved */
if (av_new_packet(pkt, sample->sample_size))
return AVERROR(ENOMEM);
/* make sure the interleave buffer is large enough */
if (sample->sample_size > film->stereo_buffer_size) {
av_free(film->stereo_buffer);
film->stereo_buffer_size = sample->sample_size;
film->stereo_buffer = av_malloc(film->stereo_buffer_size);
}
pkt->pos= url_ftell(pb);
ret = get_buffer(pb, film->stereo_buffer, sample->sample_size);
if (ret != sample->sample_size)
ret = AVERROR(EIO);
left = 0;
right = sample->sample_size / 2;
for (i = 0; i < sample->sample_size; ) {
if (film->audio_bits == 8) {
pkt->data[i++] = film->stereo_buffer[left++];
pkt->data[i++] = film->stereo_buffer[right++];
} else {
pkt->data[i++] = film->stereo_buffer[left++];
pkt->data[i++] = film->stereo_buffer[left++];
pkt->data[i++] = film->stereo_buffer[right++];
pkt->data[i++] = film->stereo_buffer[right++];
}
}
} else {
ret= av_get_packet(pb, pkt, sample->sample_size);
if (ret != sample->sample_size)
ret = AVERROR(EIO);
}
pkt->stream_index = sample->stream;
pkt->pts = sample->pts;
film->current_sample++;
return ret;
}
static int film_read_close(AVFormatContext *s)
{
FilmDemuxContext *film = s->priv_data;
av_free(film->sample_table);
av_free(film->stereo_buffer);
return 0;
}
AVInputFormat segafilm_demuxer = {
"film_cpk",
NULL_IF_CONFIG_SMALL("Sega FILM/CPK format"),
sizeof(FilmDemuxContext),
film_probe,
film_read_header,
film_read_packet,
film_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/segafilm.c | C | asf20 | 9,548 |
/*
* 4X Technologies .4xm File Demuxer (no muxer)
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* 4X Technologies file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the .4xm file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
#define FOURXMV_TAG MKTAG('4', 'X', 'M', 'V')
#define LIST_TAG MKTAG('L', 'I', 'S', 'T')
#define HEAD_TAG MKTAG('H', 'E', 'A', 'D')
#define TRK__TAG MKTAG('T', 'R', 'K', '_')
#define MOVI_TAG MKTAG('M', 'O', 'V', 'I')
#define VTRK_TAG MKTAG('V', 'T', 'R', 'K')
#define STRK_TAG MKTAG('S', 'T', 'R', 'K')
#define std__TAG MKTAG('s', 't', 'd', '_')
#define name_TAG MKTAG('n', 'a', 'm', 'e')
#define vtrk_TAG MKTAG('v', 't', 'r', 'k')
#define strk_TAG MKTAG('s', 't', 'r', 'k')
#define ifrm_TAG MKTAG('i', 'f', 'r', 'm')
#define pfrm_TAG MKTAG('p', 'f', 'r', 'm')
#define cfrm_TAG MKTAG('c', 'f', 'r', 'm')
#define ifr2_TAG MKTAG('i', 'f', 'r', '2')
#define pfr2_TAG MKTAG('p', 'f', 'r', '2')
#define cfr2_TAG MKTAG('c', 'f', 'r', '2')
#define snd__TAG MKTAG('s', 'n', 'd', '_')
#define vtrk_SIZE 0x44
#define strk_SIZE 0x28
#define GET_LIST_HEADER() \
fourcc_tag = get_le32(pb); \
size = get_le32(pb); \
if (fourcc_tag != LIST_TAG) \
return AVERROR_INVALIDDATA; \
fourcc_tag = get_le32(pb);
typedef struct AudioTrack {
int sample_rate;
int bits;
int channels;
int stream_index;
int adpcm;
int64_t audio_pts;
} AudioTrack;
typedef struct FourxmDemuxContext {
int width;
int height;
int video_stream_index;
int track_count;
AudioTrack *tracks;
int64_t video_pts;
float fps;
} FourxmDemuxContext;
static int fourxm_probe(AVProbeData *p)
{
if ((AV_RL32(&p->buf[0]) != RIFF_TAG) ||
(AV_RL32(&p->buf[8]) != FOURXMV_TAG))
return 0;
return AVPROBE_SCORE_MAX;
}
static int fourxm_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
int header_size;
FourxmDemuxContext *fourxm = s->priv_data;
unsigned char *header;
int i, ret;
AVStream *st;
fourxm->track_count = 0;
fourxm->tracks = NULL;
fourxm->fps = 1.0;
/* skip the first 3 32-bit numbers */
url_fseek(pb, 12, SEEK_CUR);
/* check for LIST-HEAD */
GET_LIST_HEADER();
header_size = size - 4;
if (fourcc_tag != HEAD_TAG || header_size < 0)
return AVERROR_INVALIDDATA;
/* allocate space for the header and load the whole thing */
header = av_malloc(header_size);
if (!header)
return AVERROR(ENOMEM);
if (get_buffer(pb, header, header_size) != header_size){
av_free(header);
return AVERROR(EIO);
}
/* take the lazy approach and search for any and all vtrk and strk chunks */
for (i = 0; i < header_size - 8; i++) {
fourcc_tag = AV_RL32(&header[i]);
size = AV_RL32(&header[i + 4]);
if (fourcc_tag == std__TAG) {
fourxm->fps = av_int2flt(AV_RL32(&header[i + 12]));
} else if (fourcc_tag == vtrk_TAG) {
/* check that there is enough data */
if (size != vtrk_SIZE) {
ret= AVERROR_INVALIDDATA;
goto fail;
}
fourxm->width = AV_RL32(&header[i + 36]);
fourxm->height = AV_RL32(&header[i + 40]);
/* allocate a new AVStream */
st = av_new_stream(s, 0);
if (!st){
ret= AVERROR(ENOMEM);
goto fail;
}
av_set_pts_info(st, 60, 1, fourxm->fps);
fourxm->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_4XM;
st->codec->extradata_size = 4;
st->codec->extradata = av_malloc(4);
AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16]));
st->codec->width = fourxm->width;
st->codec->height = fourxm->height;
i += 8 + size;
} else if (fourcc_tag == strk_TAG) {
int current_track;
/* check that there is enough data */
if (size != strk_SIZE) {
ret= AVERROR_INVALIDDATA;
goto fail;
}
current_track = AV_RL32(&header[i + 8]);
if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){
av_log(s, AV_LOG_ERROR, "current_track too large\n");
ret= -1;
goto fail;
}
if (current_track + 1 > fourxm->track_count) {
fourxm->track_count = current_track + 1;
fourxm->tracks = av_realloc(fourxm->tracks,
fourxm->track_count * sizeof(AudioTrack));
if (!fourxm->tracks) {
ret= AVERROR(ENOMEM);
goto fail;
}
}
fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]);
fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]);
fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]);
fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]);
fourxm->tracks[current_track].audio_pts = 0;
i += 8 + size;
/* allocate a new AVStream */
st = av_new_stream(s, current_track);
if (!st){
ret= AVERROR(ENOMEM);
goto fail;
}
av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);
fourxm->tracks[current_track].stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 0;
st->codec->channels = fourxm->tracks[current_track].channels;
st->codec->sample_rate = fourxm->tracks[current_track].sample_rate;
st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
if (fourxm->tracks[current_track].adpcm){
st->codec->codec_id = CODEC_ID_ADPCM_4XM;
}else if (st->codec->bits_per_coded_sample == 8){
st->codec->codec_id = CODEC_ID_PCM_U8;
}else
st->codec->codec_id = CODEC_ID_PCM_S16LE;
}
}
/* skip over the LIST-MOVI chunk (which is where the stream should be */
GET_LIST_HEADER();
if (fourcc_tag != MOVI_TAG){
ret= AVERROR_INVALIDDATA;
goto fail;
}
av_free(header);
/* initialize context members */
fourxm->video_pts = -1; /* first frame will push to 0 */
return 0;
fail:
av_freep(&fourxm->tracks);
av_free(header);
return ret;
}
static int fourxm_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FourxmDemuxContext *fourxm = s->priv_data;
ByteIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size, out_size;
int ret = 0;
unsigned int track_number;
int packet_read = 0;
unsigned char header[8];
int audio_frame_count;
while (!packet_read) {
if ((ret = get_buffer(s->pb, header, 8)) < 0)
return ret;
fourcc_tag = AV_RL32(&header[0]);
size = AV_RL32(&header[4]);
if (url_feof(pb))
return AVERROR(EIO);
switch (fourcc_tag) {
case LIST_TAG:
/* this is a good time to bump the video pts */
fourxm->video_pts ++;
/* skip the LIST-* tag and move on to the next fourcc */
get_le32(pb);
break;
case ifrm_TAG:
case pfrm_TAG:
case cfrm_TAG:
case ifr2_TAG:
case pfr2_TAG:
case cfr2_TAG:
/* allocate 8 more bytes than 'size' to account for fourcc
* and size */
if (size + 8 < size || av_new_packet(pkt, size + 8))
return AVERROR(EIO);
pkt->stream_index = fourxm->video_stream_index;
pkt->pts = fourxm->video_pts;
pkt->pos = url_ftell(s->pb);
memcpy(pkt->data, header, 8);
ret = get_buffer(s->pb, &pkt->data[8], size);
if (ret < 0){
av_free_packet(pkt);
}else
packet_read = 1;
break;
case snd__TAG:
track_number = get_le32(pb);
out_size= get_le32(pb);
size-=8;
if (track_number < fourxm->track_count) {
ret= av_get_packet(s->pb, pkt, size);
if(ret<0)
return AVERROR(EIO);
pkt->stream_index =
fourxm->tracks[track_number].stream_index;
pkt->pts = fourxm->tracks[track_number].audio_pts;
packet_read = 1;
/* pts accounting */
audio_frame_count = size;
if (fourxm->tracks[track_number].adpcm)
audio_frame_count -=
2 * (fourxm->tracks[track_number].channels);
audio_frame_count /=
fourxm->tracks[track_number].channels;
if (fourxm->tracks[track_number].adpcm){
audio_frame_count *= 2;
}else
audio_frame_count /=
(fourxm->tracks[track_number].bits / 8);
fourxm->tracks[track_number].audio_pts += audio_frame_count;
} else {
url_fseek(pb, size, SEEK_CUR);
}
break;
default:
url_fseek(pb, size, SEEK_CUR);
break;
}
}
return ret;
}
static int fourxm_read_close(AVFormatContext *s)
{
FourxmDemuxContext *fourxm = s->priv_data;
av_freep(&fourxm->tracks);
return 0;
}
AVInputFormat fourxm_demuxer = {
"4xm",
NULL_IF_CONFIG_SMALL("4X Technologies format"),
sizeof(FourxmDemuxContext),
fourxm_probe,
fourxm_read_header,
fourxm_read_packet,
fourxm_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/4xm.c | C | asf20 | 11,308 |
/*
* MPEG2 transport stream defines
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_MPEGTS_H
#define AVFORMAT_MPEGTS_H
#include "avformat.h"
#define TS_FEC_PACKET_SIZE 204
#define TS_DVHS_PACKET_SIZE 192
#define TS_PACKET_SIZE 188
#define TS_MAX_PACKET_SIZE 204
#define NB_PID_MAX 8192
#define MAX_SECTION_SIZE 4096
/* pids */
#define PAT_PID 0x0000
#define SDT_PID 0x0011
/* table ids */
#define PAT_TID 0x00
#define PMT_TID 0x02
#define SDT_TID 0x42
#define STREAM_TYPE_VIDEO_MPEG1 0x01
#define STREAM_TYPE_VIDEO_MPEG2 0x02
#define STREAM_TYPE_AUDIO_MPEG1 0x03
#define STREAM_TYPE_AUDIO_MPEG2 0x04
#define STREAM_TYPE_PRIVATE_SECTION 0x05
#define STREAM_TYPE_PRIVATE_DATA 0x06
#define STREAM_TYPE_AUDIO_AAC 0x0f
#define STREAM_TYPE_VIDEO_MPEG4 0x10
#define STREAM_TYPE_VIDEO_H264 0x1b
#define STREAM_TYPE_VIDEO_VC1 0xea
#define STREAM_TYPE_VIDEO_DIRAC 0xd1
#define STREAM_TYPE_AUDIO_AC3 0x81
#define STREAM_TYPE_AUDIO_DTS 0x8a
typedef struct MpegTSContext MpegTSContext;
MpegTSContext *ff_mpegts_parse_open(AVFormatContext *s);
int ff_mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
const uint8_t *buf, int len);
void ff_mpegts_parse_close(MpegTSContext *ts);
#endif /* AVFORMAT_MPEGTS_H */
| 123linslouis-android-video-cutter | jni/libavformat/mpegts.h | C | asf20 | 2,102 |
/*
* Microsoft RTP/ASF support.
* Copyright (c) 2008 Ronald S. Bultje
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @brief Microsoft RTP/ASF support
* @author Ronald S. Bultje <rbultje@ronald.bitfreak.net>
*/
#include <libavutil/base64.h>
#include <libavutil/avstring.h>
#include <libavutil/intreadwrite.h>
#include "rtp.h"
#include "rtpdec_asf.h"
#include "rtsp.h"
#include "asf.h"
/**
* From MSDN 2.2.1.4, we learn that ASF data packets over RTP should not
* contain any padding. Unfortunately, the header min/max_pktsize are not
* updated (thus making min_pktsize invalid). Here, we "fix" these faulty
* min_pktsize values in the ASF file header.
* @return 0 on success, <0 on failure (currently -1).
*/
static int rtp_asf_fix_header(uint8_t *buf, int len)
{
uint8_t *p = buf, *end = buf + len;
if (len < sizeof(ff_asf_guid) * 2 + 22 ||
memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) {
return -1;
}
p += sizeof(ff_asf_guid) + 14;
do {
uint64_t chunksize = AV_RL64(p + sizeof(ff_asf_guid));
if (memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) {
if (chunksize > end - p)
return -1;
p += chunksize;
continue;
}
/* skip most of the file header, to min_pktsize */
p += 6 * 8 + 3 * 4 + sizeof(ff_asf_guid) * 2;
if (p + 8 <= end && AV_RL32(p) == AV_RL32(p + 4)) {
/* and set that to zero */
AV_WL32(p, 0);
return 0;
}
break;
} while (end - p >= sizeof(ff_asf_guid) + 8);
return -1;
}
/**
* The following code is basically a buffered ByteIOContext,
* with the added benefit of returning -EAGAIN (instead of 0)
* on packet boundaries, such that the ASF demuxer can return
* safely and resume business at the next packet.
*/
static int packetizer_read(void *opaque, uint8_t *buf, int buf_size)
{
return AVERROR(EAGAIN);
}
static void init_packetizer(ByteIOContext *pb, uint8_t *buf, int len)
{
init_put_byte(pb, buf, len, 0, NULL, packetizer_read, NULL, NULL);
/* this "fills" the buffer with its current content */
pb->pos = len;
pb->buf_end = buf + len;
}
void ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
{
if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
ByteIOContext pb;
RTSPState *rt = s->priv_data;
int len = strlen(p) * 6 / 8;
char *buf = av_mallocz(len);
av_base64_decode(buf, p, len);
if (rtp_asf_fix_header(buf, len) < 0)
av_log(s, AV_LOG_ERROR,
"Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
init_packetizer(&pb, buf, len);
if (rt->asf_ctx) {
av_close_input_stream(rt->asf_ctx);
rt->asf_ctx = NULL;
}
av_open_input_stream(&rt->asf_ctx, &pb, "", &asf_demuxer, NULL);
rt->asf_pb_pos = url_ftell(&pb);
av_free(buf);
rt->asf_ctx->pb = NULL;
}
}
static int asfrtp_parse_sdp_line(AVFormatContext *s, int stream_index,
PayloadContext *asf, const char *line)
{
if (av_strstart(line, "stream:", &line)) {
RTSPState *rt = s->priv_data;
s->streams[stream_index]->id = strtol(line, NULL, 10);
if (rt->asf_ctx) {
int i;
for (i = 0; i < rt->asf_ctx->nb_streams; i++) {
if (s->streams[stream_index]->id == rt->asf_ctx->streams[i]->id) {
*s->streams[stream_index]->codec =
*rt->asf_ctx->streams[i]->codec;
rt->asf_ctx->streams[i]->codec->extradata_size = 0;
rt->asf_ctx->streams[i]->codec->extradata = NULL;
av_set_pts_info(s->streams[stream_index], 32, 1, 1000);
}
}
}
}
return 0;
}
struct PayloadContext {
ByteIOContext *pktbuf, pb;
char *buf;
};
/**
* @return 0 when a packet was written into /p pkt, and no more data is left;
* 1 when a packet was written into /p pkt, and more packets might be left;
* <0 when not enough data was provided to return a full packet, or on error.
*/
static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t *buf, int len, int flags)
{
ByteIOContext *pb = &asf->pb;
int res, mflags, len_off;
RTSPState *rt = s->priv_data;
if (!rt->asf_ctx)
return -1;
if (len > 0) {
int off, out_len;
if (len < 4)
return -1;
init_put_byte(pb, buf, len, 0, NULL, NULL, NULL, NULL);
mflags = get_byte(pb);
if (mflags & 0x80)
flags |= RTP_FLAG_KEY;
len_off = get_be24(pb);
if (mflags & 0x20) /**< relative timestamp */
url_fskip(pb, 4);
if (mflags & 0x10) /**< has duration */
url_fskip(pb, 4);
if (mflags & 0x8) /**< has location ID */
url_fskip(pb, 4);
off = url_ftell(pb);
av_freep(&asf->buf);
if (!(mflags & 0x40)) {
/**
* If 0x40 is not set, the len_off field specifies an offset of this
* packet's payload data in the complete (reassembled) ASF packet.
* This is used to spread one ASF packet over multiple RTP packets.
*/
if (asf->pktbuf && len_off != url_ftell(asf->pktbuf)) {
uint8_t *p;
url_close_dyn_buf(asf->pktbuf, &p);
asf->pktbuf = NULL;
av_free(p);
}
if (!len_off && !asf->pktbuf &&
(res = url_open_dyn_buf(&asf->pktbuf)) < 0)
return res;
if (!asf->pktbuf)
return AVERROR(EIO);
put_buffer(asf->pktbuf, buf + off, len - off);
if (!(flags & RTP_FLAG_MARKER))
return -1;
out_len = url_close_dyn_buf(asf->pktbuf, &asf->buf);
asf->pktbuf = NULL;
} else {
/**
* If 0x40 is set, the len_off field specifies the length of the
* next ASF packet that can be read from this payload data alone.
* This is commonly the same as the payload size, but could be
* less in case of packet splitting (i.e. multiple ASF packets in
* one RTP packet).
*/
if (len_off != len) {
av_log_missing_feature(s,
"RTSP-MS packet splitting", 1);
return -1;
}
asf->buf = av_malloc(len - off);
out_len = len - off;
memcpy(asf->buf, buf + off, len - off);
}
init_packetizer(pb, asf->buf, out_len);
pb->pos += rt->asf_pb_pos;
pb->eof_reached = 0;
rt->asf_ctx->pb = pb;
}
for (;;) {
int i;
res = av_read_packet(rt->asf_ctx, pkt);
rt->asf_pb_pos = url_ftell(pb);
if (res != 0)
break;
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) {
pkt->stream_index = i;
return 1; // FIXME: return 0 if last packet
}
}
av_free_packet(pkt);
}
return res == 1 ? -1 : res;
}
static PayloadContext *asfrtp_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
static void asfrtp_free_context(PayloadContext *asf)
{
if (asf->pktbuf) {
uint8_t *p = NULL;
url_close_dyn_buf(asf->pktbuf, &p);
asf->pktbuf = NULL;
av_free(p);
}
av_freep(&asf->buf);
av_free(asf);
}
#define RTP_ASF_HANDLER(n, s, t) \
RTPDynamicProtocolHandler ff_ms_rtp_ ## n ## _handler = { \
.enc_name = s, \
.codec_type = t, \
.codec_id = CODEC_ID_NONE, \
.parse_sdp_a_line = asfrtp_parse_sdp_line, \
.open = asfrtp_new_context, \
.close = asfrtp_free_context, \
.parse_packet = asfrtp_parse_packet, \
};
RTP_ASF_HANDLER(asf_pfv, "x-asf-pf", AVMEDIA_TYPE_VIDEO);
RTP_ASF_HANDLER(asf_pfa, "x-asf-pf", AVMEDIA_TYPE_AUDIO);
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec_asf.c | C | asf20 | 9,094 |
/*
* Copyright (C) 2005 Matthieu CASTET
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include "libavcodec/get_bits.h"
#include "libavcodec/flac.h"
#include "avformat.h"
#include "oggdec.h"
#define OGG_FLAC_METADATA_TYPE_STREAMINFO 0x7F
static int
flac_header (AVFormatContext * s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
GetBitContext gb;
FLACStreaminfo si;
int mdt;
if (os->buf[os->pstart] == 0xff)
return 0;
init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
skip_bits1(&gb); /* metadata_last */
mdt = get_bits(&gb, 7);
if (mdt == OGG_FLAC_METADATA_TYPE_STREAMINFO) {
uint8_t *streaminfo_start = os->buf + os->pstart + 5 + 4 + 4 + 4;
skip_bits_long(&gb, 4*8); /* "FLAC" */
if(get_bits(&gb, 8) != 1) /* unsupported major version */
return -1;
skip_bits_long(&gb, 8 + 16); /* minor version + header count */
skip_bits_long(&gb, 4*8); /* "fLaC" */
/* METADATA_BLOCK_HEADER */
if (get_bits_long(&gb, 32) != FLAC_STREAMINFO_SIZE)
return -1;
ff_flac_parse_streaminfo(st->codec, &si, streaminfo_start);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_FLAC;
st->codec->extradata =
av_malloc(FLAC_STREAMINFO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(st->codec->extradata, streaminfo_start, FLAC_STREAMINFO_SIZE);
st->codec->extradata_size = FLAC_STREAMINFO_SIZE;
st->time_base.num = 1;
st->time_base.den = st->codec->sample_rate;
} else if (mdt == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
ff_vorbis_comment (s, &st->metadata, os->buf + os->pstart + 4, os->psize - 4);
}
return 1;
}
static int
old_flac_header (AVFormatContext * s, int idx)
{
AVStream *st = s->streams[idx];
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_FLAC;
return 0;
}
const struct ogg_codec ff_flac_codec = {
.magic = "\177FLAC",
.magicsize = 5,
.header = flac_header
};
const struct ogg_codec ff_old_flac_codec = {
.magic = "fLaC",
.magicsize = 4,
.header = old_flac_header
};
| 123linslouis-android-video-cutter | jni/libavformat/oggparseflac.c | C | asf20 | 3,006 |
/*
* RTP packetization for MPEG video
* Copyright (c) 2002 Fabrice Bellard
* Copyright (c) 2007 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/mpegvideo.h"
#include "avformat.h"
#include "rtpenc.h"
/* NOTE: a single frame must be passed with sequence header if
needed. XXX: use slices. */
void ff_rtp_send_mpegvideo(AVFormatContext *s1, const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, h, max_packet_size;
uint8_t *q;
const uint8_t *end = buf1 + size;
int begin_of_slice, end_of_slice, frame_type, temporal_reference;
max_packet_size = s->max_payload_size;
begin_of_slice = 1;
end_of_slice = 0;
frame_type = 0;
temporal_reference = 0;
while (size > 0) {
int begin_of_sequence;
begin_of_sequence = 0;
len = max_packet_size - 4;
if (len >= size) {
len = size;
end_of_slice = 1;
} else {
const uint8_t *r, *r1;
int start_code;
r1 = buf1;
while (1) {
start_code = -1;
r = ff_find_start_code(r1, end, &start_code);
if((start_code & 0xFFFFFF00) == 0x100) {
/* New start code found */
if (start_code == 0x100) {
frame_type = (r[1] & 0x38) >> 3;
temporal_reference = (int)r[0] << 2 | r[1] >> 6;
}
if (start_code == 0x1B8) {
begin_of_sequence = 1;
}
if (r - buf1 - 4 <= len) {
/* The current slice fits in the packet */
if (begin_of_slice == 0) {
/* no slice at the beginning of the packet... */
end_of_slice = 1;
len = r - buf1 - 4;
break;
}
r1 = r;
} else {
if ((r1 - buf1 > 4) && (r - r1 < max_packet_size)) {
len = r1 - buf1 - 4;
end_of_slice = 1;
}
break;
}
} else {
break;
}
}
}
h = 0;
h |= temporal_reference << 16;
h |= begin_of_sequence << 13;
h |= begin_of_slice << 12;
h |= end_of_slice << 11;
h |= frame_type << 8;
q = s->buf;
*q++ = h >> 24;
*q++ = h >> 16;
*q++ = h >> 8;
*q++ = h;
memcpy(q, buf1, len);
q += len;
/* 90kHz time stamp */
s->timestamp = s->cur_timestamp;
ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size));
buf1 += len;
size -= len;
begin_of_slice = end_of_slice;
end_of_slice = 0;
}
}
| 123linslouis-android-video-cutter | jni/libavformat/rtpenc_mpv.c | C | asf20 | 3,715 |
/*
* Musepack SV8 demuxer
* Copyright (c) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/get_bits.h"
#include "libavcodec/unary.h"
#include "avformat.h"
/// Two-byte MPC tag
#define MKMPCTAG(a, b) (a | (b << 8))
#define TAG_MPCK MKTAG('M','P','C','K')
/// Reserved MPC tags
enum MPCPacketTags{
TAG_STREAMHDR = MKMPCTAG('S','H'),
TAG_STREAMEND = MKMPCTAG('S','E'),
TAG_AUDIOPACKET = MKMPCTAG('A','P'),
TAG_SEEKTBLOFF = MKMPCTAG('S','O'),
TAG_SEEKTABLE = MKMPCTAG('S','T'),
TAG_REPLAYGAIN = MKMPCTAG('R','G'),
TAG_ENCINFO = MKMPCTAG('E','I'),
};
static const int mpc8_rate[8] = { 44100, 48000, 37800, 32000, -1, -1, -1, -1 };
typedef struct {
int ver;
int frame;
int64_t header_pos;
int64_t samples;
} MPCContext;
static inline int64_t bs_get_v(uint8_t **bs)
{
int64_t v = 0;
int br = 0;
int c;
do {
c = **bs; (*bs)++;
v <<= 7;
v |= c & 0x7F;
br++;
if (br > 10)
return -1;
} while (c & 0x80);
return v - br;
}
static int mpc8_probe(AVProbeData *p)
{
uint8_t *bs = p->buf + 4;
uint8_t *bs_end = bs + p->buf_size;
int64_t size;
if (p->buf_size < 16)
return 0;
if (AV_RL32(p->buf) != TAG_MPCK)
return 0;
while (bs < bs_end + 3) {
int header_found = (bs[0] == 'S' && bs[1] == 'H');
if (bs[0] < 'A' || bs[0] > 'Z' || bs[1] < 'A' || bs[1] > 'Z')
return 0;
bs += 2;
size = bs_get_v(&bs);
if (size < 2)
return 0;
if (bs + size - 2 >= bs_end)
return AVPROBE_SCORE_MAX / 4 - 1; //seems to be valid MPC but no header yet
if (header_found) {
if (size < 11 || size > 28)
return 0;
if (!AV_RL32(bs)) //zero CRC is invalid
return 0;
return AVPROBE_SCORE_MAX;
} else {
bs += size - 2;
}
}
return 0;
}
static inline int64_t gb_get_v(GetBitContext *gb)
{
int64_t v = 0;
int bits = 0;
while(get_bits1(gb) && bits < 64-7){
v <<= 7;
v |= get_bits(gb, 7);
bits += 7;
}
v <<= 7;
v |= get_bits(gb, 7);
return v;
}
static void mpc8_get_chunk_header(ByteIOContext *pb, int *tag, int64_t *size)
{
int64_t pos;
pos = url_ftell(pb);
*tag = get_le16(pb);
*size = ff_get_v(pb);
*size -= url_ftell(pb) - pos;
}
static void mpc8_parse_seektable(AVFormatContext *s, int64_t off)
{
MPCContext *c = s->priv_data;
int tag;
int64_t size, pos, ppos[2];
uint8_t *buf;
int i, t, seekd;
GetBitContext gb;
url_fseek(s->pb, off, SEEK_SET);
mpc8_get_chunk_header(s->pb, &tag, &size);
if(tag != TAG_SEEKTABLE){
av_log(s, AV_LOG_ERROR, "No seek table at given position\n");
return;
}
if(!(buf = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE)))
return;
get_buffer(s->pb, buf, size);
init_get_bits(&gb, buf, size * 8);
size = gb_get_v(&gb);
if(size > UINT_MAX/4 || size > c->samples/1152){
av_log(s, AV_LOG_ERROR, "Seek table is too big\n");
return;
}
seekd = get_bits(&gb, 4);
for(i = 0; i < 2; i++){
pos = gb_get_v(&gb) + c->header_pos;
ppos[1 - i] = pos;
av_add_index_entry(s->streams[0], pos, i, 0, 0, AVINDEX_KEYFRAME);
}
for(; i < size; i++){
t = get_unary(&gb, 1, 33) << 12;
t += get_bits(&gb, 12);
if(t & 1)
t = -(t & ~1);
pos = (t >> 1) + ppos[0]*2 - ppos[1];
av_add_index_entry(s->streams[0], pos, i << seekd, 0, 0, AVINDEX_KEYFRAME);
ppos[1] = ppos[0];
ppos[0] = pos;
}
av_free(buf);
}
static void mpc8_handle_chunk(AVFormatContext *s, int tag, int64_t chunk_pos, int64_t size)
{
ByteIOContext *pb = s->pb;
int64_t pos, off;
switch(tag){
case TAG_SEEKTBLOFF:
pos = url_ftell(pb) + size;
off = ff_get_v(pb);
mpc8_parse_seektable(s, chunk_pos + off);
url_fseek(pb, pos, SEEK_SET);
break;
default:
url_fskip(pb, size);
}
}
static int mpc8_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
MPCContext *c = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
int tag = 0;
int64_t size, pos;
c->header_pos = url_ftell(pb);
if(get_le32(pb) != TAG_MPCK){
av_log(s, AV_LOG_ERROR, "Not a Musepack8 file\n");
return -1;
}
while(!url_feof(pb)){
pos = url_ftell(pb);
mpc8_get_chunk_header(pb, &tag, &size);
if(tag == TAG_STREAMHDR)
break;
mpc8_handle_chunk(s, tag, pos, size);
}
if(tag != TAG_STREAMHDR){
av_log(s, AV_LOG_ERROR, "Stream header not found\n");
return -1;
}
pos = url_ftell(pb);
url_fskip(pb, 4); //CRC
c->ver = get_byte(pb);
if(c->ver != 8){
av_log(s, AV_LOG_ERROR, "Unknown stream version %d\n", c->ver);
return -1;
}
c->samples = ff_get_v(pb);
ff_get_v(pb); //silence samples at the beginning
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_MUSEPACK8;
st->codec->bits_per_coded_sample = 16;
st->codec->extradata_size = 2;
st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
st->codec->channels = (st->codec->extradata[1] >> 4) + 1;
st->codec->sample_rate = mpc8_rate[st->codec->extradata[0] >> 5];
av_set_pts_info(st, 32, 1152 << (st->codec->extradata[1]&3)*2, st->codec->sample_rate);
st->duration = c->samples / (1152 << (st->codec->extradata[1]&3)*2);
size -= url_ftell(pb) - pos;
return 0;
}
static int mpc8_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MPCContext *c = s->priv_data;
int tag;
int64_t pos, size;
while(!url_feof(s->pb)){
pos = url_ftell(s->pb);
mpc8_get_chunk_header(s->pb, &tag, &size);
if (size < 0)
return -1;
if(tag == TAG_AUDIOPACKET){
if(av_get_packet(s->pb, pkt, size) < 0)
return AVERROR(ENOMEM);
pkt->stream_index = 0;
pkt->pts = c->frame;
return 0;
}
if(tag == TAG_STREAMEND)
return AVERROR(EIO);
mpc8_handle_chunk(s, tag, pos, size);
}
return 0;
}
static int mpc8_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
MPCContext *c = s->priv_data;
int index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0) return -1;
url_fseek(s->pb, st->index_entries[index].pos, SEEK_SET);
c->frame = st->index_entries[index].timestamp;
return 0;
}
AVInputFormat mpc8_demuxer = {
"mpc8",
NULL_IF_CONFIG_SMALL("Musepack SV8"),
sizeof(MPCContext),
mpc8_probe,
mpc8_read_header,
mpc8_read_packet,
NULL,
mpc8_read_seek,
};
| 123linslouis-android-video-cutter | jni/libavformat/mpc8.c | C | asf20 | 7,880 |
/*
* Bethsoft VID format Demuxer
* Copyright (c) 2007 Nicholas Tung
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @brief Bethesda Softworks VID (.vid) file demuxer
* @author Nicholas Tung [ntung (at. ntung com] (2007-03)
* @sa http://wiki.multimedia.cx/index.php?title=Bethsoft_VID
* @sa http://www.svatopluk.com/andux/docs/dfvid.html
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "libavcodec/bethsoftvideo.h"
typedef struct BVID_DemuxContext
{
int nframes;
/** delay value between frames, added to individual frame delay.
* custom units, which will be added to other custom units (~=16ms according
* to free, unofficial documentation) */
int bethsoft_global_delay;
/** video presentation time stamp.
* delay = 16 milliseconds * (global_delay + per_frame_delay) */
int video_pts;
int is_finished;
} BVID_DemuxContext;
static int vid_probe(AVProbeData *p)
{
// little endian VID tag, file starts with "VID\0"
if (AV_RL32(p->buf) != MKTAG('V', 'I', 'D', 0))
return 0;
return AVPROBE_SCORE_MAX;
}
static int vid_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
BVID_DemuxContext *vid = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *stream;
/* load main header. Contents:
* bytes: 'V' 'I' 'D'
* int16s: always_512, nframes, width, height, delay, always_14
*/
url_fseek(pb, 5, SEEK_CUR);
vid->nframes = get_le16(pb);
stream = av_new_stream(s, 0);
if (!stream)
return AVERROR(ENOMEM);
av_set_pts_info(stream, 32, 1, 60); // 16 ms increments, i.e. 60 fps
stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codec->codec_id = CODEC_ID_BETHSOFTVID;
stream->codec->width = get_le16(pb);
stream->codec->height = get_le16(pb);
stream->codec->pix_fmt = PIX_FMT_PAL8;
vid->bethsoft_global_delay = get_le16(pb);
get_le16(pb);
// done with video codec, set up audio codec
stream = av_new_stream(s, 0);
if (!stream)
return AVERROR(ENOMEM);
stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
stream->codec->codec_id = CODEC_ID_PCM_U8;
stream->codec->channels = 1;
stream->codec->sample_rate = 11025;
stream->codec->bits_per_coded_sample = 8;
stream->codec->bit_rate = stream->codec->channels * stream->codec->sample_rate * stream->codec->bits_per_coded_sample;
return 0;
}
#define BUFFER_PADDING_SIZE 1000
static int read_frame(BVID_DemuxContext *vid, ByteIOContext *pb, AVPacket *pkt,
uint8_t block_type, AVFormatContext *s, int npixels)
{
uint8_t * vidbuf_start = NULL;
int vidbuf_nbytes = 0;
int code;
int bytes_copied = 0;
int position;
unsigned int vidbuf_capacity;
vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
if(!vidbuf_start)
return AVERROR(ENOMEM);
// save the file position for the packet, include block type
position = url_ftell(pb) - 1;
vidbuf_start[vidbuf_nbytes++] = block_type;
// get the video delay (next int16), and set the presentation time
vid->video_pts += vid->bethsoft_global_delay + get_le16(pb);
// set the y offset if it exists (decoder header data should be in data section)
if(block_type == VIDEO_YOFF_P_FRAME){
if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2)
goto fail;
vidbuf_nbytes += 2;
}
do{
vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
if(!vidbuf_start)
return AVERROR(ENOMEM);
code = get_byte(pb);
vidbuf_start[vidbuf_nbytes++] = code;
if(code >= 0x80){ // rle sequence
if(block_type == VIDEO_I_FRAME)
vidbuf_start[vidbuf_nbytes++] = get_byte(pb);
} else if(code){ // plain sequence
if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], code) != code)
goto fail;
vidbuf_nbytes += code;
}
bytes_copied += code & 0x7F;
if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied
// may contain a 0 byte even if read all pixels
if(get_byte(pb))
url_fseek(pb, -1, SEEK_CUR);
break;
}
if(bytes_copied > npixels)
goto fail;
} while(code);
// copy data into packet
if(av_new_packet(pkt, vidbuf_nbytes) < 0)
goto fail;
memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
av_free(vidbuf_start);
pkt->pos = position;
pkt->stream_index = 0; // use the video decoder, which was initialized as the first stream
pkt->pts = vid->video_pts;
vid->nframes--; // used to check if all the frames were read
return vidbuf_nbytes;
fail:
av_free(vidbuf_start);
return -1;
}
static int vid_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
BVID_DemuxContext *vid = s->priv_data;
ByteIOContext *pb = s->pb;
unsigned char block_type;
int audio_length;
int ret_value;
if(vid->is_finished || url_feof(pb))
return AVERROR(EIO);
block_type = get_byte(pb);
switch(block_type){
case PALETTE_BLOCK:
url_fseek(pb, -1, SEEK_CUR); // include block type
ret_value = av_get_packet(pb, pkt, 3 * 256 + 1);
if(ret_value != 3 * 256 + 1){
av_free_packet(pkt);
return AVERROR(EIO);
}
pkt->stream_index = 0;
return ret_value;
case FIRST_AUDIO_BLOCK:
get_le16(pb);
// soundblaster DAC used for sample rate, as on specification page (link above)
s->streams[1]->codec->sample_rate = 1000000 / (256 - get_byte(pb));
s->streams[1]->codec->bit_rate = s->streams[1]->codec->channels * s->streams[1]->codec->sample_rate * s->streams[1]->codec->bits_per_coded_sample;
case AUDIO_BLOCK:
audio_length = get_le16(pb);
ret_value = av_get_packet(pb, pkt, audio_length);
pkt->stream_index = 1;
return ret_value != audio_length ? AVERROR(EIO) : ret_value;
case VIDEO_P_FRAME:
case VIDEO_YOFF_P_FRAME:
case VIDEO_I_FRAME:
return read_frame(vid, pb, pkt, block_type, s,
s->streams[0]->codec->width * s->streams[0]->codec->height);
case EOF_BLOCK:
if(vid->nframes != 0)
av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n");
vid->is_finished = 1;
return AVERROR(EIO);
default:
av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n",
block_type, block_type, block_type); return -1;
}
return 0;
}
AVInputFormat bethsoftvid_demuxer = {
"bethsoftvid",
NULL_IF_CONFIG_SMALL("Bethesda Softworks VID format"),
sizeof(BVID_DemuxContext),
vid_probe,
vid_read_header,
vid_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/bethsoftvid.c | C | asf20 | 7,860 |
/*
* IEC958 muxer
* Copyright (c) 2009 Bartlomiej Wolowiec
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* IEC-61937 encapsulation of various formats, used by S/PDIF
* @author Bartlomiej Wolowiec
*/
/*
* Terminology used in specification:
* data-burst - IEC958 frame, contains header and encapsuled frame
* burst-preambule - IEC958 frame header, contains 16-bits words named Pa, Pb, Pc and Pd
* burst-payload - encapsuled frame
* Pa, Pb - syncword - 0xF872, 0x4E1F
* Pc - burst-info, contains data-type (bits 0-6), error flag (bit 7), data-type-dependent info (bits 8-12)
* and bitstream number (bits 13-15)
* data-type - determines type of encapsuled frames
* Pd - length code (number of bits or bytes of encapsuled frame - according to data_type)
*
* IEC958 frames at normal usage start every specific count of bytes,
* dependent from data-type (spaces between packets are filled by zeros)
*/
#include "avformat.h"
#include "libavcodec/ac3.h"
#include "libavcodec/dca.h"
#include "libavcodec/aac_parser.h"
#define SYNCWORD1 0xF872
#define SYNCWORD2 0x4E1F
#define BURST_HEADER_SIZE 0x8
enum IEC958DataType {
IEC958_AC3 = 0x01, ///< AC-3 data
IEC958_MPEG1_LAYER1 = 0x04, ///< MPEG-1 layer 1
IEC958_MPEG1_LAYER23 = 0x05, ///< MPEG-1 layer 2 or 3 data or MPEG-2 without extension
IEC958_MPEG2_EXT = 0x06, ///< MPEG-2 data with extension
IEC958_MPEG2_AAC = 0x07, ///< MPEG-2 AAC ADTS
IEC958_MPEG2_LAYER1_LSF = 0x08, ///< MPEG-2, layer-1 low sampling frequency
IEC958_MPEG2_LAYER2_LSF = 0x09, ///< MPEG-2, layer-2 low sampling frequency
IEC958_MPEG2_LAYER3_LSF = 0x0A, ///< MPEG-2, layer-3 low sampling frequency
IEC958_DTS1 = 0x0B, ///< DTS type I (512 samples)
IEC958_DTS2 = 0x0C, ///< DTS type II (1024 samples)
IEC958_DTS3 = 0x0D, ///< DTS type III (2048 samples)
IEC958_MPEG2_AAC_LSF_2048 = 0x13, ///< MPEG-2 AAC ADTS half-rate low sampling frequency
IEC958_MPEG2_AAC_LSF_4096 = 0x13 | 0x20, ///< MPEG-2 AAC ADTS quarter-rate low sampling frequency
};
typedef struct IEC958Context {
enum IEC958DataType data_type; ///< burst info - reference to type of payload of the data-burst
int pkt_size; ///< length code in bits
int pkt_offset; ///< data burst repetition period in bytes
uint8_t *buffer; ///< allocated buffer, used for swap bytes
int buffer_size; ///< size of allocated buffer
/// function, which generates codec dependent header information.
/// Sets data_type and data_offset
int (*header_info) (AVFormatContext *s, AVPacket *pkt);
} IEC958Context;
//TODO move to DSP
static void bswap_buf16(uint16_t *dst, const uint16_t *src, int w)
{
int i;
for (i = 0; i + 8 <= w; i += 8) {
dst[i + 0] = bswap_16(src[i + 0]);
dst[i + 1] = bswap_16(src[i + 1]);
dst[i + 2] = bswap_16(src[i + 2]);
dst[i + 3] = bswap_16(src[i + 3]);
dst[i + 4] = bswap_16(src[i + 4]);
dst[i + 5] = bswap_16(src[i + 5]);
dst[i + 6] = bswap_16(src[i + 6]);
dst[i + 7] = bswap_16(src[i + 7]);
}
for (; i < w; i++)
dst[i + 0] = bswap_16(src[i + 0]);
}
static int spdif_header_ac3(AVFormatContext *s, AVPacket *pkt)
{
IEC958Context *ctx = s->priv_data;
int bitstream_mode = pkt->data[6] & 0x7;
ctx->data_type = IEC958_AC3 | (bitstream_mode << 8);
ctx->pkt_offset = AC3_FRAME_SIZE << 2;
return 0;
}
static int spdif_header_dts(AVFormatContext *s, AVPacket *pkt)
{
IEC958Context *ctx = s->priv_data;
uint32_t syncword_dts = AV_RB32(pkt->data);
int blocks;
switch (syncword_dts) {
case DCA_MARKER_RAW_BE:
blocks = (AV_RB16(pkt->data + 4) >> 2) & 0x7f;
break;
case DCA_MARKER_RAW_LE:
blocks = (AV_RL16(pkt->data + 4) >> 2) & 0x7f;
break;
case DCA_MARKER_14B_BE:
blocks =
(((pkt->data[5] & 0x07) << 4) | ((pkt->data[6] & 0x3f) >> 2));
break;
case DCA_MARKER_14B_LE:
blocks =
(((pkt->data[4] & 0x07) << 4) | ((pkt->data[7] & 0x3f) >> 2));
break;
default:
av_log(s, AV_LOG_ERROR, "bad DTS syncword 0x%x\n", syncword_dts);
return -1;
}
blocks++;
switch (blocks) {
case 512 >> 5: ctx->data_type = IEC958_DTS1; break;
case 1024 >> 5: ctx->data_type = IEC958_DTS2; break;
case 2048 >> 5: ctx->data_type = IEC958_DTS3; break;
default:
av_log(s, AV_LOG_ERROR, "%i samples in DTS frame not supported\n",
blocks << 5);
return -1;
}
ctx->pkt_offset = blocks << 7;
return 0;
}
static const enum IEC958DataType mpeg_data_type[2][3] = {
// LAYER1 LAYER2 LAYER3
{ IEC958_MPEG2_LAYER1_LSF, IEC958_MPEG2_LAYER2_LSF, IEC958_MPEG2_LAYER3_LSF }, //MPEG2 LSF
{ IEC958_MPEG1_LAYER1, IEC958_MPEG1_LAYER23, IEC958_MPEG1_LAYER23 }, //MPEG1
};
static const uint16_t mpeg_pkt_offset[2][3] = {
//LAYER1 LAYER2 LAYER3
{ 3072, 9216, 4608 }, // MPEG2 LSF
{ 1536, 4608, 4608 }, // MPEG1
};
static int spdif_header_mpeg(AVFormatContext *s, AVPacket *pkt)
{
IEC958Context *ctx = s->priv_data;
int version = (pkt->data[1] >> 3) & 3;
int layer = 3 - ((pkt->data[1] >> 1) & 3);
int extension = pkt->data[2] & 1;
if (layer == 3 || version == 1) {
av_log(s, AV_LOG_ERROR, "Wrong MPEG file format\n");
return -1;
}
av_log(s, AV_LOG_DEBUG, "version: %i layer: %i extension: %i\n", version, layer, extension);
if (version == 2 && extension) {
ctx->data_type = IEC958_MPEG2_EXT;
ctx->pkt_offset = 4608;
} else {
ctx->data_type = mpeg_data_type [version & 1][layer];
ctx->pkt_offset = mpeg_pkt_offset[version & 1][layer];
}
// TODO Data type dependant info (normal/karaoke, dynamic range control)
return 0;
}
static int spdif_header_aac(AVFormatContext *s, AVPacket *pkt)
{
IEC958Context *ctx = s->priv_data;
AACADTSHeaderInfo hdr;
GetBitContext gbc;
int ret;
init_get_bits(&gbc, pkt->data, AAC_ADTS_HEADER_SIZE * 8);
ret = ff_aac_parse_header(&gbc, &hdr);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Wrong AAC file format\n");
return -1;
}
ctx->pkt_offset = hdr.samples << 2;
switch (hdr.num_aac_frames) {
case 1:
ctx->data_type = IEC958_MPEG2_AAC;
break;
case 2:
ctx->data_type = IEC958_MPEG2_AAC_LSF_2048;
break;
case 4:
ctx->data_type = IEC958_MPEG2_AAC_LSF_4096;
break;
default:
av_log(s, AV_LOG_ERROR, "%i samples in AAC frame not supported\n",
hdr.samples);
return -1;
}
//TODO Data type dependent info (LC profile/SBR)
return 0;
}
static int spdif_write_header(AVFormatContext *s)
{
IEC958Context *ctx = s->priv_data;
switch (s->streams[0]->codec->codec_id) {
case CODEC_ID_AC3:
ctx->header_info = spdif_header_ac3;
break;
case CODEC_ID_MP1:
case CODEC_ID_MP2:
case CODEC_ID_MP3:
ctx->header_info = spdif_header_mpeg;
break;
case CODEC_ID_DTS:
ctx->header_info = spdif_header_dts;
break;
case CODEC_ID_AAC:
ctx->header_info = spdif_header_aac;
break;
default:
av_log(s, AV_LOG_ERROR, "codec not supported\n");
return -1;
}
return 0;
}
static int spdif_write_trailer(AVFormatContext *s)
{
IEC958Context *ctx = s->priv_data;
av_freep(&ctx->buffer);
return 0;
}
static int spdif_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
IEC958Context *ctx = s->priv_data;
int ret, padding;
ctx->pkt_size = FFALIGN(pkt->size, 2) << 3;
ret = ctx->header_info(s, pkt);
if (ret < 0)
return -1;
padding = (ctx->pkt_offset - BURST_HEADER_SIZE - pkt->size) >> 1;
if (padding < 0) {
av_log(s, AV_LOG_ERROR, "bitrate is too high\n");
return -1;
}
put_le16(s->pb, SYNCWORD1); //Pa
put_le16(s->pb, SYNCWORD2); //Pb
put_le16(s->pb, ctx->data_type); //Pc
put_le16(s->pb, ctx->pkt_size); //Pd
#if HAVE_BIGENDIAN
put_buffer(s->pb, pkt->data, pkt->size & ~1);
#else
av_fast_malloc(&ctx->buffer, &ctx->buffer_size, pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!ctx->buffer)
return AVERROR(ENOMEM);
bswap_buf16((uint16_t *)ctx->buffer, (uint16_t *)pkt->data, pkt->size >> 1);
put_buffer(s->pb, ctx->buffer, pkt->size & ~1);
#endif
if (pkt->size & 1)
put_be16(s->pb, pkt->data[pkt->size - 1]);
for (; padding > 0; padding--)
put_be16(s->pb, 0);
av_log(s, AV_LOG_DEBUG, "type=%x len=%i pkt_offset=%i\n",
ctx->data_type, pkt->size, ctx->pkt_offset);
put_flush_packet(s->pb);
return 0;
}
AVOutputFormat spdif_muxer = {
"spdif",
NULL_IF_CONFIG_SMALL("IEC958 - S/PDIF (IEC-61937)"),
NULL,
"spdif",
sizeof(IEC958Context),
CODEC_ID_AC3,
CODEC_ID_NONE,
spdif_write_header,
spdif_write_packet,
spdif_write_trailer,
};
| 123linslouis-android-video-cutter | jni/libavformat/spdif.c | C | asf20 | 10,076 |
/*
* ADTS muxer.
* Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com>
* Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/get_bits.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/avcodec.h"
#include "avformat.h"
#include "adts.h"
int ff_adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf, int size)
{
GetBitContext gb;
PutBitContext pb;
init_get_bits(&gb, buf, size * 8);
adts->objecttype = get_bits(&gb, 5) - 1;
adts->sample_rate_index = get_bits(&gb, 4);
adts->channel_conf = get_bits(&gb, 4);
if (adts->objecttype > 3U) {
av_log(s, AV_LOG_ERROR, "MPEG-4 AOT %d is not allowed in ADTS\n", adts->objecttype+1);
return -1;
}
if (adts->sample_rate_index == 15) {
av_log(s, AV_LOG_ERROR, "Escape sample rate index illegal in ADTS\n");
return -1;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "960/120 MDCT window is not allowed in ADTS\n");
return -1;
}
if (get_bits(&gb, 1)) {
av_log(s, AV_LOG_ERROR, "Scalable configurations are not allowed in ADTS\n");
return -1;
}
if (get_bits(&gb, 1)) {
av_log_missing_feature(s, "Signaled SBR or PS", 0);
return -1;
}
if (!adts->channel_conf) {
init_put_bits(&pb, adts->pce_data, MAX_PCE_SIZE);
put_bits(&pb, 3, 5); //ID_PCE
adts->pce_size = (ff_copy_pce_data(&pb, &gb) + 3) / 8;
flush_put_bits(&pb);
}
adts->write_adts = 1;
return 0;
}
static int adts_write_header(AVFormatContext *s)
{
ADTSContext *adts = s->priv_data;
AVCodecContext *avc = s->streams[0]->codec;
if(avc->extradata_size > 0 &&
ff_adts_decode_extradata(s, adts, avc->extradata, avc->extradata_size) < 0)
return -1;
return 0;
}
int ff_adts_write_frame_header(ADTSContext *ctx,
uint8_t *buf, int size, int pce_size)
{
PutBitContext pb;
init_put_bits(&pb, buf, ADTS_HEADER_SIZE);
/* adts_fixed_header */
put_bits(&pb, 12, 0xfff); /* syncword */
put_bits(&pb, 1, 0); /* ID */
put_bits(&pb, 2, 0); /* layer */
put_bits(&pb, 1, 1); /* protection_absent */
put_bits(&pb, 2, ctx->objecttype); /* profile_objecttype */
put_bits(&pb, 4, ctx->sample_rate_index);
put_bits(&pb, 1, 0); /* private_bit */
put_bits(&pb, 3, ctx->channel_conf); /* channel_configuration */
put_bits(&pb, 1, 0); /* original_copy */
put_bits(&pb, 1, 0); /* home */
/* adts_variable_header */
put_bits(&pb, 1, 0); /* copyright_identification_bit */
put_bits(&pb, 1, 0); /* copyright_identification_start */
put_bits(&pb, 13, ADTS_HEADER_SIZE + size + pce_size); /* aac_frame_length */
put_bits(&pb, 11, 0x7ff); /* adts_buffer_fullness */
put_bits(&pb, 2, 0); /* number_of_raw_data_blocks_in_frame */
flush_put_bits(&pb);
return 0;
}
static int adts_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ADTSContext *adts = s->priv_data;
ByteIOContext *pb = s->pb;
uint8_t buf[ADTS_HEADER_SIZE];
if (!pkt->size)
return 0;
if(adts->write_adts) {
ff_adts_write_frame_header(adts, buf, pkt->size, adts->pce_size);
put_buffer(pb, buf, ADTS_HEADER_SIZE);
if(adts->pce_size) {
put_buffer(pb, adts->pce_data, adts->pce_size);
adts->pce_size = 0;
}
}
put_buffer(pb, pkt->data, pkt->size);
put_flush_packet(pb);
return 0;
}
AVOutputFormat adts_muxer = {
"adts",
NULL_IF_CONFIG_SMALL("ADTS AAC"),
"audio/aac",
"aac,adts",
sizeof(ADTSContext),
CODEC_ID_AAC,
CODEC_ID_NONE,
adts_write_header,
adts_write_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/adtsenc.c | C | asf20 | 4,577 |
/*
* WAV muxer and demuxer
* Copyright (c) 2001, 2002 Fabrice Bellard
*
* Sony Wave64 demuxer
* RF64 demuxer
* Copyright (c) 2009 Daniel Verkamp
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "raw.h"
#include "riff.h"
typedef struct {
int64_t data;
int64_t data_end;
int64_t minpts;
int64_t maxpts;
int last_duration;
int w64;
} WAVContext;
#if CONFIG_WAV_MUXER
static int wav_write_header(AVFormatContext *s)
{
WAVContext *wav = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t fmt, fact;
put_tag(pb, "RIFF");
put_le32(pb, 0); /* file length */
put_tag(pb, "WAVE");
/* format header */
fmt = ff_start_tag(pb, "fmt ");
if (ff_put_wav_header(pb, s->streams[0]->codec) < 0) {
av_log(s, AV_LOG_ERROR, "%s codec not supported in WAVE format\n",
s->streams[0]->codec->codec ? s->streams[0]->codec->codec->name : "NONE");
av_free(wav);
return -1;
}
ff_end_tag(pb, fmt);
if (s->streams[0]->codec->codec_tag != 0x01 /* hence for all other than PCM */
&& !url_is_streamed(s->pb)) {
fact = ff_start_tag(pb, "fact");
put_le32(pb, 0);
ff_end_tag(pb, fact);
}
av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
wav->maxpts = wav->last_duration = 0;
wav->minpts = INT64_MAX;
/* data header */
wav->data = ff_start_tag(pb, "data");
put_flush_packet(pb);
return 0;
}
static int wav_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
WAVContext *wav = s->priv_data;
put_buffer(pb, pkt->data, pkt->size);
if(pkt->pts != AV_NOPTS_VALUE) {
wav->minpts = FFMIN(wav->minpts, pkt->pts);
wav->maxpts = FFMAX(wav->maxpts, pkt->pts);
wav->last_duration = pkt->duration;
} else
av_log(s, AV_LOG_ERROR, "wav_write_packet: NOPTS\n");
return 0;
}
static int wav_write_trailer(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
WAVContext *wav = s->priv_data;
int64_t file_size;
if (!url_is_streamed(s->pb)) {
ff_end_tag(pb, wav->data);
/* update file size */
file_size = url_ftell(pb);
url_fseek(pb, 4, SEEK_SET);
put_le32(pb, (uint32_t)(file_size - 8));
url_fseek(pb, file_size, SEEK_SET);
put_flush_packet(pb);
if(s->streams[0]->codec->codec_tag != 0x01) {
/* Update num_samps in fact chunk */
int number_of_samples;
number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration,
s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num,
s->streams[0]->time_base.den);
url_fseek(pb, wav->data-12, SEEK_SET);
put_le32(pb, number_of_samples);
url_fseek(pb, file_size, SEEK_SET);
put_flush_packet(pb);
}
}
return 0;
}
AVOutputFormat wav_muxer = {
"wav",
NULL_IF_CONFIG_SMALL("WAV format"),
"audio/x-wav",
"wav",
sizeof(WAVContext),
CODEC_ID_PCM_S16LE,
CODEC_ID_NONE,
wav_write_header,
wav_write_packet,
wav_write_trailer,
.codec_tag= (const AVCodecTag* const []){ff_codec_wav_tags, 0},
};
#endif /* CONFIG_WAV_MUXER */
#if CONFIG_WAV_DEMUXER
/* return the size of the found tag */
static int64_t find_tag(ByteIOContext *pb, uint32_t tag1)
{
unsigned int tag;
int64_t size;
for (;;) {
if (url_feof(pb))
return -1;
tag = get_le32(pb);
size = get_le32(pb);
if (tag == tag1)
break;
url_fseek(pb, size, SEEK_CUR);
}
return size;
}
static int wav_probe(AVProbeData *p)
{
/* check file header */
if (p->buf_size <= 32)
return 0;
if (!memcmp(p->buf + 8, "WAVE", 4)) {
if (!memcmp(p->buf, "RIFF", 4))
/*
Since ACT demuxer has standard WAV header at top of it's own,
returning score is decreased to avoid probe conflict
between ACT and WAV.
*/
return AVPROBE_SCORE_MAX - 1;
else if (!memcmp(p->buf, "RF64", 4) &&
!memcmp(p->buf + 12, "ds64", 4))
return AVPROBE_SCORE_MAX;
}
return 0;
}
/* wav input */
static int wav_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
int64_t size, av_uninit(data_size);
int rf64;
unsigned int tag;
ByteIOContext *pb = s->pb;
AVStream *st;
WAVContext *wav = s->priv_data;
/* check RIFF header */
tag = get_le32(pb);
rf64 = tag == MKTAG('R', 'F', '6', '4');
if (!rf64 && tag != MKTAG('R', 'I', 'F', 'F'))
return -1;
get_le32(pb); /* file size */
tag = get_le32(pb);
if (tag != MKTAG('W', 'A', 'V', 'E'))
return -1;
if (rf64) {
if (get_le32(pb) != MKTAG('d', 's', '6', '4'))
return -1;
size = get_le32(pb);
if (size < 16)
return -1;
get_le64(pb); /* RIFF size */
data_size = get_le64(pb);
url_fskip(pb, size - 16); /* skip rest of ds64 chunk */
}
/* parse fmt header */
size = find_tag(pb, MKTAG('f', 'm', 't', ' '));
if (size < 0)
return -1;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
ff_get_wav_header(pb, st->codec, size);
st->need_parsing = AVSTREAM_PARSE_FULL;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
size = find_tag(pb, MKTAG('d', 'a', 't', 'a'));
if (rf64)
size = data_size;
if (size < 0)
return -1;
if (!size) {
wav->data_end = INT64_MAX;
} else
wav->data_end= url_ftell(pb) + size;
return 0;
}
/** Find chunk with w64 GUID by skipping over other chunks
* @return the size of the found chunk
*/
static int64_t find_guid(ByteIOContext *pb, const uint8_t guid1[16])
{
uint8_t guid[16];
int64_t size;
while (!url_feof(pb)) {
get_buffer(pb, guid, 16);
size = get_le64(pb);
if (size <= 24)
return -1;
if (!memcmp(guid, guid1, 16))
return size;
url_fskip(pb, FFALIGN(size, INT64_C(8)) - 24);
}
return -1;
}
static const uint8_t guid_data[16] = { 'd', 'a', 't', 'a',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A };
#define MAX_SIZE 4096
static int wav_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
int ret, size;
int64_t left;
AVStream *st;
WAVContext *wav = s->priv_data;
st = s->streams[0];
left = wav->data_end - url_ftell(s->pb);
if (left <= 0){
if (CONFIG_W64_DEMUXER && wav->w64)
left = find_guid(s->pb, guid_data) - 24;
else
left = find_tag(s->pb, MKTAG('d', 'a', 't', 'a'));
if (left < 0)
return AVERROR_EOF;
wav->data_end= url_ftell(s->pb) + left;
}
size = MAX_SIZE;
if (st->codec->block_align > 1) {
if (size < st->codec->block_align)
size = st->codec->block_align;
size = (size / st->codec->block_align) * st->codec->block_align;
}
size = FFMIN(size, left);
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return ret;
pkt->stream_index = 0;
return ret;
}
static int wav_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
AVStream *st;
st = s->streams[0];
switch (st->codec->codec_id) {
case CODEC_ID_MP2:
case CODEC_ID_MP3:
case CODEC_ID_AC3:
case CODEC_ID_DTS:
/* use generic seeking with dynamically generated indexes */
return -1;
default:
break;
}
return pcm_read_seek(s, stream_index, timestamp, flags);
}
AVInputFormat wav_demuxer = {
"wav",
NULL_IF_CONFIG_SMALL("WAV format"),
sizeof(WAVContext),
wav_probe,
wav_read_header,
wav_read_packet,
NULL,
wav_read_seek,
.flags= AVFMT_GENERIC_INDEX,
.codec_tag= (const AVCodecTag* const []){ff_codec_wav_tags, 0},
};
#endif /* CONFIG_WAV_DEMUXER */
#if CONFIG_W64_DEMUXER
static const uint8_t guid_riff[16] = { 'r', 'i', 'f', 'f',
0x2E, 0x91, 0xCF, 0x11, 0xA5, 0xD6, 0x28, 0xDB, 0x04, 0xC1, 0x00, 0x00 };
static const uint8_t guid_wave[16] = { 'w', 'a', 'v', 'e',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A };
static const uint8_t guid_fmt [16] = { 'f', 'm', 't', ' ',
0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A };
static int w64_probe(AVProbeData *p)
{
if (p->buf_size <= 40)
return 0;
if (!memcmp(p->buf, guid_riff, 16) &&
!memcmp(p->buf + 24, guid_wave, 16))
return AVPROBE_SCORE_MAX;
else
return 0;
}
static int w64_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
int64_t size;
ByteIOContext *pb = s->pb;
WAVContext *wav = s->priv_data;
AVStream *st;
uint8_t guid[16];
get_buffer(pb, guid, 16);
if (memcmp(guid, guid_riff, 16))
return -1;
if (get_le64(pb) < 16 + 8 + 16 + 8 + 16 + 8) /* riff + wave + fmt + sizes */
return -1;
get_buffer(pb, guid, 16);
if (memcmp(guid, guid_wave, 16)) {
av_log(s, AV_LOG_ERROR, "could not find wave guid\n");
return -1;
}
size = find_guid(pb, guid_fmt);
if (size < 0) {
av_log(s, AV_LOG_ERROR, "could not find fmt guid\n");
return -1;
}
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
/* subtract chunk header size - normal wav file doesn't count it */
ff_get_wav_header(pb, st->codec, size - 24);
url_fskip(pb, FFALIGN(size, INT64_C(8)) - size);
st->need_parsing = AVSTREAM_PARSE_FULL;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
size = find_guid(pb, guid_data);
if (size < 0) {
av_log(s, AV_LOG_ERROR, "could not find data guid\n");
return -1;
}
wav->data_end = url_ftell(pb) + size - 24;
wav->w64 = 1;
return 0;
}
AVInputFormat w64_demuxer = {
"w64",
NULL_IF_CONFIG_SMALL("Sony Wave64 format"),
sizeof(WAVContext),
w64_probe,
w64_read_header,
wav_read_packet,
NULL,
wav_read_seek,
.flags = AVFMT_GENERIC_INDEX,
.codec_tag = (const AVCodecTag* const []){ff_codec_wav_tags, 0},
};
#endif /* CONFIG_W64_DEMUXER */
| 123linslouis-android-video-cutter | jni/libavformat/wav.c | C | asf20 | 11,263 |
/*
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_ASF_H
#define AVFORMAT_ASF_H
#include <stdint.h>
#include "avformat.h"
#include "metadata.h"
#define PACKET_SIZE 3200
typedef struct {
int num;
unsigned char seq;
/* use for reading */
AVPacket pkt;
int frag_offset;
int timestamp;
int64_t duration;
int ds_span; /* descrambling */
int ds_packet_size;
int ds_chunk_size;
int64_t packet_pos;
uint16_t stream_language_index;
} ASFStream;
typedef uint8_t ff_asf_guid[16];
typedef struct {
ff_asf_guid guid; ///< generated by client computer
uint64_t file_size; /**< in bytes
* invalid if broadcasting */
uint64_t create_time; /**< time of creation, in 100-nanosecond units since 1.1.1601
* invalid if broadcasting */
uint64_t play_time; /**< play time, in 100-nanosecond units
* invalid if broadcasting */
uint64_t send_time; /**< time to send file, in 100-nanosecond units
* invalid if broadcasting (could be ignored) */
uint32_t preroll; /**< timestamp of the first packet, in milliseconds
* if nonzero - subtract from time */
uint32_t ignore; ///< preroll is 64bit - but let's just ignore it
uint32_t flags; /**< 0x01 - broadcast
* 0x02 - seekable
* rest is reserved should be 0 */
uint32_t min_pktsize; /**< size of a data packet
* invalid if broadcasting */
uint32_t max_pktsize; /**< shall be the same as for min_pktsize
* invalid if broadcasting */
uint32_t max_bitrate; /**< bandwidth of stream in bps
* should be the sum of bitrates of the
* individual media streams */
} ASFMainHeader;
typedef struct {
uint32_t packet_number;
uint16_t packet_count;
} ASFIndex;
typedef struct {
uint32_t seqno;
int is_streamed;
int asfid2avid[128]; ///< conversion table from asf ID 2 AVStream ID
ASFStream streams[128]; ///< it's max number and it's not that big
uint32_t stream_bitrates[128]; ///< max number of streams, bitrate for each (for streaming)
char stream_languages[128][6]; ///< max number of streams, language for each (RFC1766, e.g. en-US)
/* non streamed additonnal info */
uint64_t nb_packets; ///< how many packets are there in the file, invalid if broadcasting
int64_t duration; ///< in 100ns units
/* packet filling */
unsigned char multi_payloads_present;
int packet_size_left;
int packet_timestamp_start;
int packet_timestamp_end;
unsigned int packet_nb_payloads;
int packet_nb_frames;
uint8_t packet_buf[PACKET_SIZE];
ByteIOContext pb;
/* only for reading */
uint64_t data_offset; ///< beginning of the first data packet
uint64_t data_object_offset; ///< data object offset (excl. GUID & size)
uint64_t data_object_size; ///< size of the data object
int index_read;
ASFMainHeader hdr;
int packet_flags;
int packet_property;
int packet_timestamp;
int packet_segsizetype;
int packet_segments;
int packet_seq;
int packet_replic_size;
int packet_key_frame;
int packet_padsize;
unsigned int packet_frag_offset;
unsigned int packet_frag_size;
int64_t packet_frag_timestamp;
int packet_multi_size;
int packet_obj_size;
int packet_time_delta;
int packet_time_start;
int64_t packet_pos;
int stream_index;
int64_t last_indexed_pts;
ASFIndex* index_ptr;
uint32_t nb_index_count;
uint32_t nb_index_memory_alloc;
uint16_t maximum_packet;
ASFStream* asf_st; ///< currently decoded stream
} ASFContext;
extern const ff_asf_guid ff_asf_header;
extern const ff_asf_guid ff_asf_file_header;
extern const ff_asf_guid ff_asf_stream_header;
extern const ff_asf_guid ff_asf_ext_stream_header;
extern const ff_asf_guid ff_asf_audio_stream;
extern const ff_asf_guid ff_asf_audio_conceal_none;
extern const ff_asf_guid ff_asf_audio_conceal_spread;
extern const ff_asf_guid ff_asf_video_stream;
extern const ff_asf_guid ff_asf_video_conceal_none;
extern const ff_asf_guid ff_asf_command_stream;
extern const ff_asf_guid ff_asf_comment_header;
extern const ff_asf_guid ff_asf_codec_comment_header;
extern const ff_asf_guid ff_asf_codec_comment1_header;
extern const ff_asf_guid ff_asf_data_header;
extern const ff_asf_guid ff_asf_head1_guid;
extern const ff_asf_guid ff_asf_head2_guid;
extern const ff_asf_guid ff_asf_extended_content_header;
extern const ff_asf_guid ff_asf_simple_index_header;
extern const ff_asf_guid ff_asf_ext_stream_embed_stream_header;
extern const ff_asf_guid ff_asf_ext_stream_audio_stream;
extern const ff_asf_guid ff_asf_metadata_header;
extern const ff_asf_guid ff_asf_marker_header;
extern const ff_asf_guid ff_asf_my_guid;
extern const ff_asf_guid ff_asf_language_guid;
extern const ff_asf_guid ff_asf_content_encryption;
extern const ff_asf_guid ff_asf_ext_content_encryption;
extern const ff_asf_guid ff_asf_digital_signature;
extern const AVMetadataConv ff_asf_metadata_conv[];
#define ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT 0x80 //1000 0000
// ASF data packet structure
// =========================
//
//
// -----------------------------------
// | Error Correction Data | Optional
// -----------------------------------
// | Payload Parsing Information (PPI) |
// -----------------------------------
// | Payload Data |
// -----------------------------------
// | Padding Data |
// -----------------------------------
// PPI_FLAG - Payload parsing information flags
#define ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT 1
#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE 0x02 //0000 0010
#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD 0x04 //0000 0100
#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD 0x06 //0000 0110
#define ASF_PPI_MASK_SEQUENCE_FIELD_SIZE 0x06 //0000 0110
#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE 0x08 //0000 1000
#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD 0x10 //0001 0000
#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD 0x18 //0001 1000
#define ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE 0x18 //0001 1000
#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE 0x20 //0010 0000
#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD 0x40 //0100 0000
#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD 0x60 //0110 0000
#define ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE 0x60 //0110 0000
// PL_FLAG - Payload flags
#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE 0x01 //0000 0001
#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD 0x02 //0000 0010
#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD 0x03 //0000 0011
#define ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE 0x03 //0000 0011
#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE 0x04 //0000 0100
#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD 0x08 //0000 1000
#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD 0x0c //0000 1100
#define ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE 0x0c //0000 1100
#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE 0x10 //0001 0000
#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD 0x20 //0010 0000
#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD 0x30 //0011 0000
#define ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE 0x30 //0011 0000
#define ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
#define ASF_PL_MASK_STREAM_NUMBER_LENGTH_FIELD_SIZE 0xc0 //1100 0000
#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD 0x80 //1000 0000
#define ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE 0xc0 //1100 0000
#define ASF_PL_FLAG_KEY_FRAME 0x80 //1000 0000
extern AVInputFormat asf_demuxer;
int ff_put_str16_nolen(ByteIOContext *s, const char *tag);
#endif /* AVFORMAT_ASF_H */
| 123linslouis-android-video-cutter | jni/libavformat/asf.h | C | asf20 | 9,220 |
/*
* Creative Voice File muxer.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "voc.h"
typedef struct voc_enc_context {
int param_written;
} VocEncContext;
static int voc_write_header(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
const int header_size = 26;
const int version = 0x0114;
if (s->nb_streams != 1
|| s->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
return AVERROR_PATCHWELCOME;
put_buffer(pb, ff_voc_magic, sizeof(ff_voc_magic) - 1);
put_le16(pb, header_size);
put_le16(pb, version);
put_le16(pb, ~version + 0x1234);
return 0;
}
static int voc_write_packet(AVFormatContext *s, AVPacket *pkt)
{
VocEncContext *voc = s->priv_data;
AVCodecContext *enc = s->streams[0]->codec;
ByteIOContext *pb = s->pb;
if (!voc->param_written) {
if (enc->codec_tag > 0xFF) {
put_byte(pb, VOC_TYPE_NEW_VOICE_DATA);
put_le24(pb, pkt->size + 12);
put_le32(pb, enc->sample_rate);
put_byte(pb, enc->bits_per_coded_sample);
put_byte(pb, enc->channels);
put_le16(pb, enc->codec_tag);
put_le32(pb, 0);
} else {
if (s->streams[0]->codec->channels > 1) {
put_byte(pb, VOC_TYPE_EXTENDED);
put_le24(pb, 4);
put_le16(pb, 65536-256000000/(enc->sample_rate*enc->channels));
put_byte(pb, enc->codec_tag);
put_byte(pb, enc->channels - 1);
}
put_byte(pb, VOC_TYPE_VOICE_DATA);
put_le24(pb, pkt->size + 2);
put_byte(pb, 256 - 1000000 / enc->sample_rate);
put_byte(pb, enc->codec_tag);
}
voc->param_written = 1;
} else {
put_byte(pb, VOC_TYPE_VOICE_DATA_CONT);
put_le24(pb, pkt->size);
}
put_buffer(pb, pkt->data, pkt->size);
return 0;
}
static int voc_write_trailer(AVFormatContext *s)
{
put_byte(s->pb, 0);
return 0;
}
AVOutputFormat voc_muxer = {
"voc",
NULL_IF_CONFIG_SMALL("Creative Voice file format"),
"audio/x-voc",
"voc",
sizeof(VocEncContext),
CODEC_ID_PCM_U8,
CODEC_ID_NONE,
voc_write_header,
voc_write_packet,
voc_write_trailer,
.codec_tag=(const AVCodecTag* const []){ff_voc_codec_tags, 0},
};
| 123linslouis-android-video-cutter | jni/libavformat/vocenc.c | C | asf20 | 3,110 |
/*
* MXF muxer
* Copyright (c) 2008 GUCAS, Zhentan Feng <spyfeng at gmail dot com>
* Copyright (c) 2008 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* References
* SMPTE 336M KLV Data Encoding Protocol Using Key-Length-Value
* SMPTE 377M MXF File Format Specifications
* SMPTE 379M MXF Generic Container
* SMPTE 381M Mapping MPEG Streams into the MXF Generic Container
* SMPTE RP210: SMPTE Metadata Dictionary
* SMPTE RP224: Registry of SMPTE Universal Labels
*/
//#define DEBUG
#include <math.h>
#include <time.h>
#include "libavutil/random_seed.h"
#include "libavcodec/bytestream.h"
#include "audiointerleave.h"
#include "avformat.h"
#include "mxf.h"
static const int NTSC_samples_per_frame[] = { 1602, 1601, 1602, 1601, 1602, 0 };
static const int PAL_samples_per_frame[] = { 1920, 0 };
AVOutputFormat mxf_d10_muxer;
#define EDIT_UNITS_PER_BODY 250
#define KAG_SIZE 512
typedef struct {
int local_tag;
UID uid;
} MXFLocalTagPair;
typedef struct {
uint8_t flags;
uint64_t offset;
unsigned slice_offset; ///< offset of audio slice
} MXFIndexEntry;
typedef struct {
AudioInterleaveContext aic;
UID track_essence_element_key;
int index; ///< index in mxf_essence_container_uls table
const UID *codec_ul;
int order; ///< interleaving order if dts are equal
int interlaced; ///< wether picture is interlaced
int temporal_reordering;
AVRational aspect_ratio; ///< display aspect ratio
int closed_gop; ///< gop is closed, used in mpeg-2 frame parsing
} MXFStreamContext;
typedef struct {
UID container_ul;
UID element_ul;
UID codec_ul;
void (*write_desc)();
} MXFContainerEssenceEntry;
static const struct {
enum CodecID id;
int index;
} mxf_essence_mappings[] = {
{ CODEC_ID_MPEG2VIDEO, 0 },
{ CODEC_ID_PCM_S24LE, 1 },
{ CODEC_ID_PCM_S16LE, 1 },
{ CODEC_ID_NONE }
};
static void mxf_write_wav_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_aes3_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st);
static void mxf_write_generic_sound_desc(AVFormatContext *s, AVStream *st);
static const MXFContainerEssenceEntry mxf_essence_container_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x60,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x00,0x00,0x00 },
mxf_write_mpegvideo_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x03,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x16,0x01,0x03,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_aes3_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x16,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_wav_desc },
// D-10 625/50 PAL 50mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x01 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 525/60 NTSC 50mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x02,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x02 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x02,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 625/50 PAL 40mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x03,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x03 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x03,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 525/60 NTSC 40mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x04,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x04 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x04,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 625/50 PAL 30mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x05,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x05 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x05,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
// D-10 525/60 NTSC 30mb/s
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x06,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x05,0x01,0x01,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x06 },
mxf_write_cdci_desc },
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x06,0x01 },
{ 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x06,0x01,0x10,0x00 },
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 },
mxf_write_generic_sound_desc },
{ { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
{ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
{ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
NULL },
};
typedef struct MXFContext {
int64_t footer_partition_offset;
int essence_container_count;
AVRational time_base;
int header_written;
MXFIndexEntry *index_entries;
unsigned edit_units_count;
uint64_t timestamp; ///< timestamp, as year(16),month(8),day(8),hour(8),minutes(8),msec/4(8)
uint8_t slice_count; ///< index slice count minus 1 (1 if no audio, 0 otherwise)
int last_indexed_edit_unit;
uint64_t *body_partition_offset;
unsigned body_partitions_count;
int last_key_index; ///< index of last key frame
uint64_t duration;
AVStream *timecode_track;
int timecode_base; ///< rounded time code base (25 or 30)
int timecode_start; ///< frame number computed from mpeg-2 gop header timecode
int timecode_drop_frame; ///< time code use drop frame method frop mpeg-2 essence gop header
int edit_unit_byte_count; ///< fixed edit unit byte count
uint64_t body_offset;
uint32_t instance_number;
uint8_t umid[16]; ///< unique material identifier
} MXFContext;
static const uint8_t uuid_base[] = { 0xAD,0xAB,0x44,0x24,0x2f,0x25,0x4d,0xc7,0x92,0xff,0x29,0xbd };
static const uint8_t umid_ul[] = { 0x06,0x0A,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x01,0x0D,0x00,0x13 };
/**
* complete key for operation pattern, partitions, and primer pack
*/
static const uint8_t op1a_ul[] = { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x01,0x09,0x00 };
static const uint8_t footer_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x04,0x04,0x00 }; // ClosedComplete
static const uint8_t primer_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x05,0x01,0x00 };
static const uint8_t index_table_segment_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x10,0x01,0x00 };
static const uint8_t random_index_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x11,0x01,0x00 };
static const uint8_t header_open_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x02,0x01,0x00 }; // OpenIncomplete
static const uint8_t header_closed_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x02,0x04,0x00 }; // ClosedComplete
static const uint8_t klv_fill_key[] = { 0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x03,0x01,0x02,0x10,0x01,0x00,0x00,0x00 };
static const uint8_t body_partition_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x02,0x01,0x01,0x03,0x04,0x00 }; // ClosedComplete
/**
* partial key for header metadata
*/
static const uint8_t header_metadata_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01 };
static const uint8_t multiple_desc_ul[] = { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x0D,0x01,0x03,0x01,0x02,0x7F,0x01,0x00 };
/**
* SMPTE RP210 http://www.smpte-ra.org/mdd/index.html
*/
static const MXFLocalTagPair mxf_local_tag_batch[] = {
// preface set
{ 0x3C0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x02,0x00,0x00,0x00,0x00}}, /* Instance UID */
{ 0x3B02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x02,0x04,0x00,0x00}}, /* Last Modified Date */
{ 0x3B05, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x03,0x01,0x02,0x01,0x05,0x00,0x00,0x00}}, /* Version */
{ 0x3B06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x06,0x04,0x00,0x00}}, /* Identifications reference */
{ 0x3B03, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x02,0x01,0x00,0x00}}, /* Content Storage reference */
{ 0x3B09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x03,0x00,0x00,0x00,0x00}}, /* Operational Pattern UL */
{ 0x3B0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x10,0x02,0x01,0x00,0x00}}, /* Essence Containers UL batch */
{ 0x3B0B, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x01,0x02,0x02,0x10,0x02,0x02,0x00,0x00}}, /* DM Schemes UL batch */
// Identification
{ 0x3C09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x01,0x00,0x00,0x00}}, /* This Generation UID */
{ 0x3C01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x02,0x01,0x00,0x00}}, /* Company Name */
{ 0x3C02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x03,0x01,0x00,0x00}}, /* Product Name */
{ 0x3C04, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x05,0x01,0x00,0x00}}, /* Version String */
{ 0x3C05, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x20,0x07,0x01,0x07,0x00,0x00,0x00}}, /* Product ID */
{ 0x3C06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x02,0x03,0x00,0x00}}, /* Modification Date */
// Content Storage
{ 0x1901, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x05,0x01,0x00,0x00}}, /* Package strong reference batch */
{ 0x1902, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x05,0x02,0x00,0x00}}, /* Package strong reference batch */
// Essence Container Data
{ 0x2701, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x06,0x01,0x00,0x00,0x00}}, /* Linked Package UID */
{ 0x3F07, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x01,0x03,0x04,0x04,0x00,0x00,0x00,0x00}}, /* BodySID */
// Package
{ 0x4401, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x10,0x00,0x00,0x00,0x00}}, /* Package UID */
{ 0x4405, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x01,0x03,0x00,0x00}}, /* Package Creation Date */
{ 0x4404, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x10,0x02,0x05,0x00,0x00}}, /* Package Modified Date */
{ 0x4403, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x06,0x05,0x00,0x00}}, /* Tracks Strong reference array */
{ 0x4701, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x02,0x03,0x00,0x00}}, /* Descriptor */
// Track
{ 0x4801, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x01,0x07,0x01,0x01,0x00,0x00,0x00,0x00}}, /* Track ID */
{ 0x4804, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x01,0x04,0x01,0x03,0x00,0x00,0x00,0x00}}, /* Track Number */
{ 0x4B01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x05,0x30,0x04,0x05,0x00,0x00,0x00,0x00}}, /* Edit Rate */
{ 0x4B02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x03,0x01,0x03,0x00,0x00}}, /* Origin */
{ 0x4803, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x02,0x04,0x00,0x00}}, /* Sequence reference */
// Sequence
{ 0x0201, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x07,0x01,0x00,0x00,0x00,0x00,0x00}}, /* Data Definition UL */
{ 0x0202, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x02,0x01,0x01,0x03,0x00,0x00}}, /* Duration */
{ 0x1001, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x06,0x09,0x00,0x00}}, /* Structural Components reference array */
// Source Clip
{ 0x1201, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x03,0x01,0x04,0x00,0x00}}, /* Start position */
{ 0x1101, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x03,0x01,0x00,0x00,0x00}}, /* SourcePackageID */
{ 0x1102, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x03,0x02,0x00,0x00,0x00}}, /* SourceTrackID */
// Timecode Component
{ 0x1501, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x07,0x02,0x01,0x03,0x01,0x05,0x00,0x00}}, /* Start Time Code */
{ 0x1502, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x04,0x01,0x01,0x02,0x06,0x00,0x00}}, /* Rounded Time Code Base */
{ 0x1503, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x04,0x01,0x01,0x05,0x00,0x00,0x00}}, /* Drop Frame */
// File Descriptor
{ 0x3F01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x06,0x01,0x01,0x04,0x06,0x0B,0x00,0x00}}, /* Sub Descriptors reference array */
{ 0x3006, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x06,0x01,0x01,0x03,0x05,0x00,0x00,0x00}}, /* Linked Track ID */
{ 0x3001, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x06,0x01,0x01,0x00,0x00,0x00,0x00}}, /* SampleRate */
{ 0x3004, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x06,0x01,0x01,0x04,0x01,0x02,0x00,0x00}}, /* Essence Container */
// Generic Picture Essence Descriptor
{ 0x320C, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x03,0x01,0x04,0x00,0x00,0x00}}, /* Frame Layout */
{ 0x320D, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x03,0x02,0x05,0x00,0x00,0x00}}, /* Video Line Map */
{ 0x3203, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x02,0x02,0x00,0x00,0x00}}, /* Stored Width */
{ 0x3202, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x02,0x01,0x00,0x00,0x00}}, /* Stored Height */
{ 0x3209, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x01,0x0C,0x00,0x00,0x00}}, /* Display Width */
{ 0x3208, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x01,0x0B,0x00,0x00,0x00}}, /* Display Height */
{ 0x320E, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x01,0x01,0x01,0x00,0x00,0x00}}, /* Aspect Ratio */
{ 0x3201, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x06,0x01,0x00,0x00,0x00,0x00}}, /* Picture Essence Coding */
// CDCI Picture Essence Descriptor
{ 0x3301, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x01,0x05,0x03,0x0A,0x00,0x00,0x00}}, /* Component Depth */
{ 0x3302, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x01,0x04,0x01,0x05,0x01,0x05,0x00,0x00,0x00}}, /* Horizontal Subsampling */
// Generic Sound Essence Descriptor
{ 0x3D02, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x02,0x03,0x01,0x04,0x00,0x00,0x00}}, /* Locked/Unlocked */
{ 0x3D03, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x03,0x01,0x01,0x01,0x00,0x00}}, /* Audio sampling rate */
{ 0x3D07, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x01,0x01,0x04,0x00,0x00,0x00}}, /* ChannelCount */
{ 0x3D01, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x02,0x03,0x03,0x04,0x00,0x00,0x00}}, /* Quantization bits */
{ 0x3D06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x02,0x04,0x02,0x04,0x02,0x00,0x00,0x00,0x00}}, /* Sound Essence Compression */
// Index Table Segment
{ 0x3F0B, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x05,0x30,0x04,0x06,0x00,0x00,0x00,0x00}}, /* Index Edit Rate */
{ 0x3F0C, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x07,0x02,0x01,0x03,0x01,0x0A,0x00,0x00}}, /* Index Start Position */
{ 0x3F0D, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x07,0x02,0x02,0x01,0x01,0x02,0x00,0x00}}, /* Index Duration */
{ 0x3F05, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x06,0x02,0x01,0x00,0x00,0x00,0x00}}, /* Edit Unit Byte Count */
{ 0x3F06, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x01,0x03,0x04,0x05,0x00,0x00,0x00,0x00}}, /* IndexSID */
{ 0x3F08, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x04,0x04,0x04,0x04,0x01,0x01,0x00,0x00,0x00}}, /* Slice Count */
{ 0x3F09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x04,0x04,0x01,0x06,0x00,0x00,0x00}}, /* Delta Entry Array */
{ 0x3F0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x04,0x04,0x02,0x05,0x00,0x00,0x00}}, /* Index Entry Array */
// MPEG video Descriptor
{ 0x8000, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x01,0x06,0x02,0x01,0x0B,0x00,0x00}}, /* BitRate */
{ 0x8007, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x01,0x06,0x02,0x01,0x0A,0x00,0x00}}, /* ProfileAndLevel */
// Wave Audio Essence Descriptor
{ 0x3D09, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x03,0x03,0x05,0x00,0x00,0x00}}, /* Average Bytes Per Second */
{ 0x3D0A, {0x06,0x0E,0x2B,0x34,0x01,0x01,0x01,0x05,0x04,0x02,0x03,0x02,0x01,0x00,0x00,0x00}}, /* Block Align */
};
static void mxf_write_uuid(ByteIOContext *pb, enum MXFMetadataSetType type, int value)
{
put_buffer(pb, uuid_base, 12);
put_be16(pb, type);
put_be16(pb, value);
}
static void mxf_write_umid(AVFormatContext *s, int type)
{
MXFContext *mxf = s->priv_data;
put_buffer(s->pb, umid_ul, 13);
put_be24(s->pb, mxf->instance_number);
put_buffer(s->pb, mxf->umid, 15);
put_byte(s->pb, type);
}
static void mxf_write_refs_count(ByteIOContext *pb, int ref_count)
{
put_be32(pb, ref_count);
put_be32(pb, 16);
}
static int klv_ber_length(uint64_t len)
{
if (len < 128)
return 1;
else
return (av_log2(len) >> 3) + 2;
}
static int klv_encode_ber_length(ByteIOContext *pb, uint64_t len)
{
// Determine the best BER size
int size;
if (len < 128) {
//short form
put_byte(pb, len);
return 1;
}
size = (av_log2(len) >> 3) + 1;
// long form
put_byte(pb, 0x80 + size);
while(size) {
size--;
put_byte(pb, len >> 8 * size & 0xff);
}
return 0;
}
static void klv_encode_ber4_length(ByteIOContext *pb, int len)
{
put_byte(pb, 0x80 + 3);
put_be24(pb, len);
}
/*
* Get essence container ul index
*/
static int mxf_get_essence_container_ul_index(enum CodecID id)
{
int i;
for (i = 0; mxf_essence_mappings[i].id; i++)
if (mxf_essence_mappings[i].id == id)
return mxf_essence_mappings[i].index;
return -1;
}
static void mxf_write_primer_pack(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
int local_tag_number, i = 0;
local_tag_number = FF_ARRAY_ELEMS(mxf_local_tag_batch);
put_buffer(pb, primer_pack_key, 16);
klv_encode_ber_length(pb, local_tag_number * 18 + 8);
put_be32(pb, local_tag_number); // local_tag num
put_be32(pb, 18); // item size, always 18 according to the specs
for (i = 0; i < local_tag_number; i++) {
put_be16(pb, mxf_local_tag_batch[i].local_tag);
put_buffer(pb, mxf_local_tag_batch[i].uid, 16);
}
}
static void mxf_write_local_tag(ByteIOContext *pb, int size, int tag)
{
put_be16(pb, tag);
put_be16(pb, size);
}
static void mxf_write_metadata_key(ByteIOContext *pb, unsigned int value)
{
put_buffer(pb, header_metadata_key, 13);
put_be24(pb, value);
}
static void mxf_free(AVFormatContext *s)
{
int i;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
av_freep(&st->priv_data);
}
}
static const MXFCodecUL *mxf_get_data_definition_ul(int type)
{
const MXFCodecUL *uls = ff_mxf_data_definition_uls;
while (uls->uid[0]) {
if (type == uls->id)
break;
uls++;
}
return uls;
}
static void mxf_write_essence_container_refs(AVFormatContext *s)
{
MXFContext *c = s->priv_data;
ByteIOContext *pb = s->pb;
int i;
mxf_write_refs_count(pb, c->essence_container_count);
av_log(s,AV_LOG_DEBUG, "essence container count:%d\n", c->essence_container_count);
for (i = 0; i < c->essence_container_count; i++) {
MXFStreamContext *sc = s->streams[i]->priv_data;
put_buffer(pb, mxf_essence_container_uls[sc->index].container_ul, 16);
}
}
static void mxf_write_preface(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
mxf_write_metadata_key(pb, 0x012f00);
PRINT_KEY(s, "preface key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 130 + 16 * mxf->essence_container_count);
// write preface set uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, Preface, 0);
PRINT_KEY(s, "preface uid", pb->buf_ptr - 16);
// last modified date
mxf_write_local_tag(pb, 8, 0x3B02);
put_be64(pb, mxf->timestamp);
// write version
mxf_write_local_tag(pb, 2, 0x3B05);
put_be16(pb, 258); // v1.2
// write identification_refs
mxf_write_local_tag(pb, 16 + 8, 0x3B06);
mxf_write_refs_count(pb, 1);
mxf_write_uuid(pb, Identification, 0);
// write content_storage_refs
mxf_write_local_tag(pb, 16, 0x3B03);
mxf_write_uuid(pb, ContentStorage, 0);
// operational pattern
mxf_write_local_tag(pb, 16, 0x3B09);
put_buffer(pb, op1a_ul, 16);
// write essence_container_refs
mxf_write_local_tag(pb, 8 + 16 * mxf->essence_container_count, 0x3B0A);
mxf_write_essence_container_refs(s);
// write dm_scheme_refs
mxf_write_local_tag(pb, 8, 0x3B0B);
put_be64(pb, 0);
}
/*
* Write a local tag containing an ascii string as utf-16
*/
static void mxf_write_local_tag_utf16(ByteIOContext *pb, int tag, const char *value)
{
int i, size = strlen(value);
mxf_write_local_tag(pb, size*2, tag);
for (i = 0; i < size; i++)
put_be16(pb, value[i]);
}
static void mxf_write_identification(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
const char *company = "FFmpeg";
const char *product = "OP1a Muxer";
const char *version;
int length;
mxf_write_metadata_key(pb, 0x013000);
PRINT_KEY(s, "identification key", pb->buf_ptr - 16);
version = s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT ?
"0.0.0" : AV_STRINGIFY(LIBAVFORMAT_VERSION);
length = 84 + (strlen(company)+strlen(product)+strlen(version))*2; // utf-16
klv_encode_ber_length(pb, length);
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, Identification, 0);
PRINT_KEY(s, "identification uid", pb->buf_ptr - 16);
// write generation uid
mxf_write_local_tag(pb, 16, 0x3C09);
mxf_write_uuid(pb, Identification, 1);
mxf_write_local_tag_utf16(pb, 0x3C01, company); // Company Name
mxf_write_local_tag_utf16(pb, 0x3C02, product); // Product Name
mxf_write_local_tag_utf16(pb, 0x3C04, version); // Version String
// write product uid
mxf_write_local_tag(pb, 16, 0x3C05);
mxf_write_uuid(pb, Identification, 2);
// modification date
mxf_write_local_tag(pb, 8, 0x3C06);
put_be64(pb, mxf->timestamp);
}
static void mxf_write_content_storage(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
mxf_write_metadata_key(pb, 0x011800);
PRINT_KEY(s, "content storage key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 92);
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, ContentStorage, 0);
PRINT_KEY(s, "content storage uid", pb->buf_ptr - 16);
// write package reference
mxf_write_local_tag(pb, 16 * 2 + 8, 0x1901);
mxf_write_refs_count(pb, 2);
mxf_write_uuid(pb, MaterialPackage, 0);
mxf_write_uuid(pb, SourcePackage, 0);
// write essence container data
mxf_write_local_tag(pb, 8 + 16, 0x1902);
mxf_write_refs_count(pb, 1);
mxf_write_uuid(pb, EssenceContainerData, 0);
}
static void mxf_write_track(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
MXFStreamContext *sc = st->priv_data;
mxf_write_metadata_key(pb, 0x013b00);
PRINT_KEY(s, "track key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 80);
// write track uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? Track : Track + TypeBottom, st->index);
PRINT_KEY(s, "track uid", pb->buf_ptr - 16);
// write track id
mxf_write_local_tag(pb, 4, 0x4801);
put_be32(pb, st->index+2);
// write track number
mxf_write_local_tag(pb, 4, 0x4804);
if (type == MaterialPackage)
put_be32(pb, 0); // track number of material package is 0
else
put_buffer(pb, sc->track_essence_element_key + 12, 4);
mxf_write_local_tag(pb, 8, 0x4B01);
put_be32(pb, mxf->time_base.den);
put_be32(pb, mxf->time_base.num);
// write origin
mxf_write_local_tag(pb, 8, 0x4B02);
put_be64(pb, 0);
// write sequence refs
mxf_write_local_tag(pb, 16, 0x4803);
mxf_write_uuid(pb, type == MaterialPackage ? Sequence: Sequence + TypeBottom, st->index);
}
static const uint8_t smpte_12m_timecode_track_data_ul[] = { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x01,0x01,0x00,0x00,0x00 };
static void mxf_write_common_fields(AVFormatContext *s, AVStream *st)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
// find data define uls
mxf_write_local_tag(pb, 16, 0x0201);
if (st == mxf->timecode_track)
put_buffer(pb, smpte_12m_timecode_track_data_ul, 16);
else {
const MXFCodecUL *data_def_ul = mxf_get_data_definition_ul(st->codec->codec_type);
put_buffer(pb, data_def_ul->uid, 16);
}
// write duration
mxf_write_local_tag(pb, 8, 0x0202);
put_be64(pb, mxf->duration);
}
static void mxf_write_sequence(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
enum MXFMetadataSetType component;
mxf_write_metadata_key(pb, 0x010f00);
PRINT_KEY(s, "sequence key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 80);
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? Sequence: Sequence + TypeBottom, st->index);
PRINT_KEY(s, "sequence uid", pb->buf_ptr - 16);
mxf_write_common_fields(s, st);
// write structural component
mxf_write_local_tag(pb, 16 + 8, 0x1001);
mxf_write_refs_count(pb, 1);
if (st == mxf->timecode_track)
component = TimecodeComponent;
else
component = SourceClip;
if (type == SourcePackage)
component += TypeBottom;
mxf_write_uuid(pb, component, st->index);
}
static void mxf_write_timecode_component(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
mxf_write_metadata_key(pb, 0x011400);
klv_encode_ber_length(pb, 75);
// UID
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? TimecodeComponent :
TimecodeComponent + TypeBottom, st->index);
mxf_write_common_fields(s, st);
// Start Time Code
mxf_write_local_tag(pb, 8, 0x1501);
put_be64(pb, mxf->timecode_start);
// Rounded Time Code Base
mxf_write_local_tag(pb, 2, 0x1502);
put_be16(pb, mxf->timecode_base);
// Drop Frame
mxf_write_local_tag(pb, 1, 0x1503);
put_byte(pb, mxf->timecode_drop_frame);
}
static void mxf_write_structural_component(AVFormatContext *s, AVStream *st, enum MXFMetadataSetType type)
{
ByteIOContext *pb = s->pb;
int i;
mxf_write_metadata_key(pb, 0x011100);
PRINT_KEY(s, "sturctural component key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 108);
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type == MaterialPackage ? SourceClip: SourceClip + TypeBottom, st->index);
PRINT_KEY(s, "structural component uid", pb->buf_ptr - 16);
mxf_write_common_fields(s, st);
// write start_position
mxf_write_local_tag(pb, 8, 0x1201);
put_be64(pb, 0);
// write source package uid, end of the reference
mxf_write_local_tag(pb, 32, 0x1101);
if (type == SourcePackage) {
for (i = 0; i < 4; i++)
put_be64(pb, 0);
} else
mxf_write_umid(s, 1);
// write source track id
mxf_write_local_tag(pb, 4, 0x1102);
if (type == SourcePackage)
put_be32(pb, 0);
else
put_be32(pb, st->index+2);
}
static void mxf_write_multi_descriptor(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
const uint8_t *ul;
int i;
mxf_write_metadata_key(pb, 0x014400);
PRINT_KEY(s, "multiple descriptor key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 64 + 16 * s->nb_streams);
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, MultipleDescriptor, 0);
PRINT_KEY(s, "multi_desc uid", pb->buf_ptr - 16);
// write sample rate
mxf_write_local_tag(pb, 8, 0x3001);
put_be32(pb, mxf->time_base.den);
put_be32(pb, mxf->time_base.num);
// write essence container ul
mxf_write_local_tag(pb, 16, 0x3004);
if (mxf->essence_container_count > 1)
ul = multiple_desc_ul;
else {
MXFStreamContext *sc = s->streams[0]->priv_data;
ul = mxf_essence_container_uls[sc->index].container_ul;
}
put_buffer(pb, ul, 16);
// write sub descriptor refs
mxf_write_local_tag(pb, s->nb_streams * 16 + 8, 0x3F01);
mxf_write_refs_count(pb, s->nb_streams);
for (i = 0; i < s->nb_streams; i++)
mxf_write_uuid(pb, SubDescriptor, i);
}
static void mxf_write_generic_desc(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
MXFContext *mxf = s->priv_data;
MXFStreamContext *sc = st->priv_data;
ByteIOContext *pb = s->pb;
put_buffer(pb, key, 16);
klv_encode_ber4_length(pb, size+20+8+12+20);
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, SubDescriptor, st->index);
mxf_write_local_tag(pb, 4, 0x3006);
put_be32(pb, st->index+2);
mxf_write_local_tag(pb, 8, 0x3001);
put_be32(pb, mxf->time_base.den);
put_be32(pb, mxf->time_base.num);
mxf_write_local_tag(pb, 16, 0x3004);
put_buffer(pb, mxf_essence_container_uls[sc->index].container_ul, 16);
}
static const UID mxf_mpegvideo_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 };
static const UID mxf_wav_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 };
static const UID mxf_aes3_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 };
static const UID mxf_cdci_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01,0x01,0x28,0x00 };
static const UID mxf_generic_sound_descriptor_key = { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0D,0x01,0x01,0x01,0x01,0x01,0x42,0x00 };
static void mxf_write_cdci_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
MXFStreamContext *sc = st->priv_data;
ByteIOContext *pb = s->pb;
int stored_height = (st->codec->height+15)/16*16;
int display_height;
int f1, f2;
mxf_write_generic_desc(s, st, key, size+8+8+8+8+8+8+5+16+sc->interlaced*4+12+20);
mxf_write_local_tag(pb, 4, 0x3203);
put_be32(pb, st->codec->width);
mxf_write_local_tag(pb, 4, 0x3202);
put_be32(pb, stored_height>>sc->interlaced);
mxf_write_local_tag(pb, 4, 0x3209);
put_be32(pb, st->codec->width);
if (st->codec->height == 608) // PAL + VBI
display_height = 576;
else if (st->codec->height == 512) // NTSC + VBI
display_height = 486;
else
display_height = st->codec->height;
mxf_write_local_tag(pb, 4, 0x3208);
put_be32(pb, display_height>>sc->interlaced);
// component depth
mxf_write_local_tag(pb, 4, 0x3301);
put_be32(pb, 8);
// horizontal subsampling
mxf_write_local_tag(pb, 4, 0x3302);
put_be32(pb, 2);
// frame layout
mxf_write_local_tag(pb, 1, 0x320C);
put_byte(pb, sc->interlaced);
// video line map
switch (st->codec->height) {
case 576: f1 = 23; f2 = 336; break;
case 608: f1 = 7; f2 = 320; break;
case 480: f1 = 20; f2 = 283; break;
case 512: f1 = 7; f2 = 270; break;
case 720: f1 = 26; f2 = 0; break; // progressive
case 1080: f1 = 21; f2 = 584; break;
default: f1 = 0; f2 = 0; break;
}
if (!sc->interlaced) {
f2 = 0;
f1 *= 2;
}
mxf_write_local_tag(pb, 12+sc->interlaced*4, 0x320D);
put_be32(pb, sc->interlaced ? 2 : 1);
put_be32(pb, 4);
put_be32(pb, f1);
if (sc->interlaced)
put_be32(pb, f2);
mxf_write_local_tag(pb, 8, 0x320E);
put_be32(pb, sc->aspect_ratio.num);
put_be32(pb, sc->aspect_ratio.den);
mxf_write_local_tag(pb, 16, 0x3201);
put_buffer(pb, *sc->codec_ul, 16);
}
static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_cdci_common(s, st, mxf_cdci_descriptor_key, 0);
}
static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st)
{
ByteIOContext *pb = s->pb;
int profile_and_level = (st->codec->profile<<4) | st->codec->level;
mxf_write_cdci_common(s, st, mxf_mpegvideo_descriptor_key, 8+5);
// bit rate
mxf_write_local_tag(pb, 4, 0x8000);
put_be32(pb, st->codec->bit_rate);
// profile and level
mxf_write_local_tag(pb, 1, 0x8007);
if (!st->codec->profile)
profile_and_level |= 0x80; // escape bit
put_byte(pb, profile_and_level);
}
static void mxf_write_generic_sound_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
ByteIOContext *pb = s->pb;
mxf_write_generic_desc(s, st, key, size+5+12+8+8);
// audio locked
mxf_write_local_tag(pb, 1, 0x3D02);
put_byte(pb, 1);
// write audio sampling rate
mxf_write_local_tag(pb, 8, 0x3D03);
put_be32(pb, st->codec->sample_rate);
put_be32(pb, 1);
mxf_write_local_tag(pb, 4, 0x3D07);
put_be32(pb, st->codec->channels);
mxf_write_local_tag(pb, 4, 0x3D01);
put_be32(pb, av_get_bits_per_sample(st->codec->codec_id));
}
static void mxf_write_wav_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size)
{
ByteIOContext *pb = s->pb;
mxf_write_generic_sound_common(s, st, key, size+6+8);
mxf_write_local_tag(pb, 2, 0x3D0A);
put_be16(pb, st->codec->block_align);
// avg bytes per sec
mxf_write_local_tag(pb, 4, 0x3D09);
put_be32(pb, st->codec->block_align*st->codec->sample_rate);
}
static void mxf_write_wav_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_wav_common(s, st, mxf_wav_descriptor_key, 0);
}
static void mxf_write_aes3_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_wav_common(s, st, mxf_aes3_descriptor_key, 0);
}
static void mxf_write_generic_sound_desc(AVFormatContext *s, AVStream *st)
{
mxf_write_generic_sound_common(s, st, mxf_generic_sound_descriptor_key, 0);
}
static void mxf_write_package(AVFormatContext *s, enum MXFMetadataSetType type)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
int i, track_count = s->nb_streams+1;
if (type == MaterialPackage) {
mxf_write_metadata_key(pb, 0x013600);
PRINT_KEY(s, "Material Package key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 92 + 16*track_count);
} else {
mxf_write_metadata_key(pb, 0x013700);
PRINT_KEY(s, "Source Package key", pb->buf_ptr - 16);
klv_encode_ber_length(pb, 112 + 16*track_count); // 20 bytes length for descriptor reference
}
// write uid
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, type, 0);
av_log(s,AV_LOG_DEBUG, "package type:%d\n", type);
PRINT_KEY(s, "package uid", pb->buf_ptr - 16);
// write package umid
mxf_write_local_tag(pb, 32, 0x4401);
mxf_write_umid(s, type == SourcePackage);
PRINT_KEY(s, "package umid second part", pb->buf_ptr - 16);
// package creation date
mxf_write_local_tag(pb, 8, 0x4405);
put_be64(pb, mxf->timestamp);
// package modified date
mxf_write_local_tag(pb, 8, 0x4404);
put_be64(pb, mxf->timestamp);
// write track refs
mxf_write_local_tag(pb, track_count*16 + 8, 0x4403);
mxf_write_refs_count(pb, track_count);
mxf_write_uuid(pb, type == MaterialPackage ? Track :
Track + TypeBottom, -1); // timecode track
for (i = 0; i < s->nb_streams; i++)
mxf_write_uuid(pb, type == MaterialPackage ? Track : Track + TypeBottom, i);
// write multiple descriptor reference
if (type == SourcePackage) {
mxf_write_local_tag(pb, 16, 0x4701);
if (s->nb_streams > 1) {
mxf_write_uuid(pb, MultipleDescriptor, 0);
mxf_write_multi_descriptor(s);
} else
mxf_write_uuid(pb, SubDescriptor, 0);
}
// write timecode track
mxf_write_track(s, mxf->timecode_track, type);
mxf_write_sequence(s, mxf->timecode_track, type);
mxf_write_timecode_component(s, mxf->timecode_track, type);
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
mxf_write_track(s, st, type);
mxf_write_sequence(s, st, type);
mxf_write_structural_component(s, st, type);
if (type == SourcePackage) {
MXFStreamContext *sc = st->priv_data;
mxf_essence_container_uls[sc->index].write_desc(s, st);
}
}
}
static int mxf_write_essence_container_data(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
mxf_write_metadata_key(pb, 0x012300);
klv_encode_ber_length(pb, 72);
mxf_write_local_tag(pb, 16, 0x3C0A); // Instance UID
mxf_write_uuid(pb, EssenceContainerData, 0);
mxf_write_local_tag(pb, 32, 0x2701); // Linked Package UID
mxf_write_umid(s, 1);
mxf_write_local_tag(pb, 4, 0x3F07); // BodySID
put_be32(pb, 1);
mxf_write_local_tag(pb, 4, 0x3F06); // IndexSID
put_be32(pb, 2);
return 0;
}
static int mxf_write_header_metadata_sets(AVFormatContext *s)
{
mxf_write_preface(s);
mxf_write_identification(s);
mxf_write_content_storage(s);
mxf_write_package(s, MaterialPackage);
mxf_write_package(s, SourcePackage);
mxf_write_essence_container_data(s);
return 0;
}
static unsigned klv_fill_size(uint64_t size)
{
unsigned pad = KAG_SIZE - (size & (KAG_SIZE-1));
if (pad < 20) // smallest fill item possible
return pad + KAG_SIZE;
else
return pad & (KAG_SIZE-1);
}
static void mxf_write_index_table_segment(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
int i, j;
int temporal_reordering = 0;
int key_index = mxf->last_key_index;
av_log(s, AV_LOG_DEBUG, "edit units count %d\n", mxf->edit_units_count);
if (!mxf->edit_units_count && !mxf->edit_unit_byte_count)
return;
put_buffer(pb, index_table_segment_key, 16);
if (mxf->edit_unit_byte_count) {
klv_encode_ber_length(pb, 80);
} else {
klv_encode_ber_length(pb, 85 + 12+(s->nb_streams+1)*6 +
12+mxf->edit_units_count*(11+mxf->slice_count*4));
}
// instance id
mxf_write_local_tag(pb, 16, 0x3C0A);
mxf_write_uuid(pb, IndexTableSegment, 0);
// index edit rate
mxf_write_local_tag(pb, 8, 0x3F0B);
put_be32(pb, mxf->time_base.den);
put_be32(pb, mxf->time_base.num);
// index start position
mxf_write_local_tag(pb, 8, 0x3F0C);
put_be64(pb, mxf->last_indexed_edit_unit);
// index duration
mxf_write_local_tag(pb, 8, 0x3F0D);
if (mxf->edit_unit_byte_count)
put_be64(pb, 0); // index table covers whole container
else
put_be64(pb, mxf->edit_units_count);
// edit unit byte count
mxf_write_local_tag(pb, 4, 0x3F05);
put_be32(pb, mxf->edit_unit_byte_count);
// index sid
mxf_write_local_tag(pb, 4, 0x3F06);
put_be32(pb, 2);
// body sid
mxf_write_local_tag(pb, 4, 0x3F07);
put_be32(pb, 1);
if (!mxf->edit_unit_byte_count) {
// real slice count - 1
mxf_write_local_tag(pb, 1, 0x3F08);
put_byte(pb, mxf->slice_count);
// delta entry array
mxf_write_local_tag(pb, 8 + (s->nb_streams+1)*6, 0x3F09);
put_be32(pb, s->nb_streams+1); // num of entries
put_be32(pb, 6); // size of one entry
// write system item delta entry
put_byte(pb, 0);
put_byte(pb, 0); // slice entry
put_be32(pb, 0); // element delta
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MXFStreamContext *sc = st->priv_data;
put_byte(pb, sc->temporal_reordering);
if (sc->temporal_reordering)
temporal_reordering = 1;
if (i == 0) { // video track
put_byte(pb, 0); // slice number
put_be32(pb, KAG_SIZE); // system item size including klv fill
} else { // audio track
unsigned audio_frame_size = sc->aic.samples[0]*sc->aic.sample_size;
audio_frame_size += klv_fill_size(audio_frame_size);
put_byte(pb, 1);
put_be32(pb, (i-1)*audio_frame_size); // element delta
}
}
mxf_write_local_tag(pb, 8 + mxf->edit_units_count*(11+mxf->slice_count*4), 0x3F0A);
put_be32(pb, mxf->edit_units_count); // num of entries
put_be32(pb, 11+mxf->slice_count*4); // size of one entry
for (i = 0; i < mxf->edit_units_count; i++) {
int temporal_offset = 0;
if (temporal_reordering) {
for (j = i+1; j < mxf->edit_units_count; j++) {
temporal_offset++;
if (mxf->index_entries[j].flags & 0x10) { // backward prediction
// next is not b, so is reordered
if (!(mxf->index_entries[i+1].flags & 0x10)) {
if ((mxf->index_entries[i].flags & 0x11) == 0) // I frame
temporal_offset = 0;
else
temporal_offset = -temporal_offset;
}
break;
}
}
}
put_byte(pb, temporal_offset);
if (!(mxf->index_entries[i].flags & 0x33)) { // I frame
if (mxf->index_entries[i].flags & 0x40 && // seq header
(!temporal_reordering || !temporal_offset))
mxf->index_entries[i].flags |= 0x80; // random access
mxf->last_key_index = key_index;
key_index = i;
}
if ((mxf->index_entries[i].flags & 0x30) == 0x30) { // back and forward prediction
put_byte(pb, mxf->last_key_index - i);
} else {
put_byte(pb, key_index - i); // key frame offset
if ((mxf->index_entries[i].flags & 0x20) == 0x20) // only forward
mxf->last_key_index = key_index;
}
put_byte(pb, mxf->index_entries[i].flags);
// stream offset
put_be64(pb, mxf->index_entries[i].offset);
if (s->nb_streams > 1)
put_be32(pb, mxf->index_entries[i].slice_offset);
}
mxf->last_key_index = key_index - mxf->edit_units_count;
mxf->last_indexed_edit_unit += mxf->edit_units_count;
mxf->edit_units_count = 0;
}
}
static void mxf_write_klv_fill(AVFormatContext *s)
{
unsigned pad = klv_fill_size(url_ftell(s->pb));
if (pad) {
put_buffer(s->pb, klv_fill_key, 16);
pad -= 16 + 4;
klv_encode_ber4_length(s->pb, pad);
for (; pad; pad--)
put_byte(s->pb, 0);
assert(!(url_ftell(s->pb) & (KAG_SIZE-1)));
}
}
static void mxf_write_partition(AVFormatContext *s, int bodysid,
int indexsid,
const uint8_t *key, int write_metadata)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t header_byte_count_offset;
unsigned index_byte_count = 0;
uint64_t partition_offset = url_ftell(pb);
if (!mxf->edit_unit_byte_count && mxf->edit_units_count)
index_byte_count = 85 + 12+(s->nb_streams+1)*6 +
12+mxf->edit_units_count*(11+mxf->slice_count*4);
else if (mxf->edit_unit_byte_count && indexsid)
index_byte_count = 80;
if (index_byte_count) {
// add encoded ber length
index_byte_count += 16 + klv_ber_length(index_byte_count);
index_byte_count += klv_fill_size(index_byte_count);
}
if (!memcmp(key, body_partition_key, 16)) {
mxf->body_partition_offset =
av_realloc(mxf->body_partition_offset,
(mxf->body_partitions_count+1)*
sizeof(*mxf->body_partition_offset));
mxf->body_partition_offset[mxf->body_partitions_count++] = partition_offset;
}
// write klv
put_buffer(pb, key, 16);
klv_encode_ber_length(pb, 88 + 16 * mxf->essence_container_count);
// write partition value
put_be16(pb, 1); // majorVersion
put_be16(pb, 2); // minorVersion
put_be32(pb, KAG_SIZE); // KAGSize
put_be64(pb, partition_offset); // ThisPartition
if (!memcmp(key, body_partition_key, 16) && mxf->body_partitions_count > 1)
put_be64(pb, mxf->body_partition_offset[mxf->body_partitions_count-2]); // PreviousPartition
else if (!memcmp(key, footer_partition_key, 16) && mxf->body_partitions_count)
put_be64(pb, mxf->body_partition_offset[mxf->body_partitions_count-1]); // PreviousPartition
else
put_be64(pb, 0);
put_be64(pb, mxf->footer_partition_offset); // footerPartition
// set offset
header_byte_count_offset = url_ftell(pb);
put_be64(pb, 0); // headerByteCount, update later
// indexTable
put_be64(pb, index_byte_count); // indexByteCount
put_be32(pb, index_byte_count ? indexsid : 0); // indexSID
// BodyOffset
if (bodysid && mxf->edit_units_count && mxf->body_partitions_count) {
put_be64(pb, mxf->body_offset);
} else
put_be64(pb, 0);
put_be32(pb, bodysid); // bodySID
// operational pattern
put_buffer(pb, op1a_ul, 16);
// essence container
mxf_write_essence_container_refs(s);
if (write_metadata) {
// mark the start of the headermetadata and calculate metadata size
int64_t pos, start;
unsigned header_byte_count;
mxf_write_klv_fill(s);
start = url_ftell(s->pb);
mxf_write_primer_pack(s);
mxf_write_header_metadata_sets(s);
pos = url_ftell(s->pb);
header_byte_count = pos - start + klv_fill_size(pos);
// update header_byte_count
url_fseek(pb, header_byte_count_offset, SEEK_SET);
put_be64(pb, header_byte_count);
url_fseek(pb, pos, SEEK_SET);
}
put_flush_packet(pb);
}
static const UID mxf_mpeg2_codec_uls[] = {
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x10,0x00 }, // MP-ML I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x11,0x00 }, // MP-ML Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x02,0x00 }, // 422P-ML I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x03,0x00 }, // 422P-ML Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x02,0x00 }, // MP-HL I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x03,0x00 }, // MP-HL Long GOP
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x02,0x00 }, // 422P-HL I-Frame
{ 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x03,0x00 }, // 422P-HL Long GOP
};
static const UID *mxf_get_mpeg2_codec_ul(AVCodecContext *avctx)
{
int long_gop = avctx->gop_size > 1 || avctx->has_b_frames;
if (avctx->profile == 4) { // Main
if (avctx->level == 8) // Main
return &mxf_mpeg2_codec_uls[0+long_gop];
else if (avctx->level == 4) // High
return &mxf_mpeg2_codec_uls[4+long_gop];
} else if (avctx->profile == 0) { // 422
if (avctx->level == 5) // Main
return &mxf_mpeg2_codec_uls[2+long_gop];
else if (avctx->level == 2) // High
return &mxf_mpeg2_codec_uls[6+long_gop];
}
return NULL;
}
static int mxf_parse_mpeg2_frame(AVFormatContext *s, AVStream *st, AVPacket *pkt, int *flags)
{
MXFStreamContext *sc = st->priv_data;
MXFContext *mxf = s->priv_data;
uint32_t c = -1;
int i;
*flags = 0;
for(i = 0; i < pkt->size - 4; i++) {
c = (c<<8) + pkt->data[i];
if (c == 0x1b5) {
if ((pkt->data[i+1] & 0xf0) == 0x10) { // seq ext
st->codec->profile = pkt->data[i+1] & 0x07;
st->codec->level = pkt->data[i+2] >> 4;
} else if (i + 5 < pkt->size && (pkt->data[i+1] & 0xf0) == 0x80) { // pict coding ext
sc->interlaced = !(pkt->data[i+5] & 0x80); // progressive frame
break;
}
} else if (c == 0x1b8) { // gop
if (pkt->data[i+4]>>6 & 0x01) { // closed
sc->closed_gop = 1;
if (*flags & 0x40) // sequence header present
*flags |= 0x80; // random access
}
if (!mxf->header_written) {
unsigned hours = (pkt->data[i+1]>>2) & 0x1f;
unsigned minutes = ((pkt->data[i+1] & 0x03) << 4) | (pkt->data[i+2]>>4);
unsigned seconds = ((pkt->data[i+2] & 0x07) << 3) | (pkt->data[i+3]>>5);
unsigned frames = ((pkt->data[i+3] & 0x1f) << 1) | (pkt->data[i+4]>>7);
mxf->timecode_drop_frame = !!(pkt->data[i+1] & 0x80);
mxf->timecode_start = (hours*3600 + minutes*60 + seconds) *
mxf->timecode_base + frames;
if (mxf->timecode_drop_frame) {
unsigned tminutes = 60 * hours + minutes;
mxf->timecode_start -= 2 * (tminutes - tminutes / 10);
}
av_log(s, AV_LOG_DEBUG, "frame %d %d:%d:%d%c%d\n", mxf->timecode_start,
hours, minutes, seconds, mxf->timecode_drop_frame ? ';':':', frames);
}
} else if (c == 0x1b3) { // seq
*flags |= 0x40;
switch ((pkt->data[i+4]>>4) & 0xf) {
case 2: sc->aspect_ratio = (AVRational){ 4, 3}; break;
case 3: sc->aspect_ratio = (AVRational){ 16, 9}; break;
case 4: sc->aspect_ratio = (AVRational){221,100}; break;
default:
av_reduce(&sc->aspect_ratio.num, &sc->aspect_ratio.den,
st->codec->width, st->codec->height, 1024*1024);
}
} else if (c == 0x100) { // pic
int pict_type = (pkt->data[i+2]>>3) & 0x07;
if (pict_type == 2) { // P frame
*flags |= 0x22;
sc->closed_gop = 0; // reset closed gop, don't matter anymore
} else if (pict_type == 3) { // B frame
if (sc->closed_gop)
*flags |= 0x13; // only backward prediction
else
*flags |= 0x33;
sc->temporal_reordering = -1;
} else if (!pict_type) {
av_log(s, AV_LOG_ERROR, "error parsing mpeg2 frame\n");
return 0;
}
}
}
if (s->oformat != &mxf_d10_muxer)
sc->codec_ul = mxf_get_mpeg2_codec_ul(st->codec);
return !!sc->codec_ul;
}
static uint64_t mxf_parse_timestamp(time_t timestamp)
{
struct tm *time = gmtime(×tamp);
return (uint64_t)(time->tm_year+1900) << 48 |
(uint64_t)(time->tm_mon+1) << 40 |
(uint64_t) time->tm_mday << 32 |
time->tm_hour << 24 |
time->tm_min << 16 |
time->tm_sec << 8;
}
static void mxf_gen_umid(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
uint32_t seed = ff_random_get_seed();
uint64_t umid = seed + 0x5294713400000000LL;
AV_WB64(mxf->umid , umid);
AV_WB64(mxf->umid+8, umid>>8);
mxf->instance_number = seed;
}
static int mxf_write_header(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
int i;
uint8_t present[FF_ARRAY_ELEMS(mxf_essence_container_uls)] = {0};
const int *samples_per_frame = NULL;
if (!s->nb_streams)
return -1;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MXFStreamContext *sc = av_mallocz(sizeof(*sc));
if (!sc)
return AVERROR(ENOMEM);
st->priv_data = sc;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (i != 0) {
av_log(s, AV_LOG_ERROR, "video stream must be first track\n");
return -1;
}
if (fabs(av_q2d(st->codec->time_base) - 1/25.0) < 0.0001) {
samples_per_frame = PAL_samples_per_frame;
mxf->time_base = (AVRational){ 1, 25 };
mxf->timecode_base = 25;
} else if (fabs(av_q2d(st->codec->time_base) - 1001/30000.0) < 0.0001) {
samples_per_frame = NTSC_samples_per_frame;
mxf->time_base = (AVRational){ 1001, 30000 };
mxf->timecode_base = 30;
} else {
av_log(s, AV_LOG_ERROR, "unsupported video frame rate\n");
return -1;
}
av_set_pts_info(st, 64, mxf->time_base.num, mxf->time_base.den);
if (s->oformat == &mxf_d10_muxer) {
if (st->codec->bit_rate == 50000000)
if (mxf->time_base.den == 25) sc->index = 3;
else sc->index = 5;
else if (st->codec->bit_rate == 40000000)
if (mxf->time_base.den == 25) sc->index = 7;
else sc->index = 9;
else if (st->codec->bit_rate == 30000000)
if (mxf->time_base.den == 25) sc->index = 11;
else sc->index = 13;
else {
av_log(s, AV_LOG_ERROR, "error MXF D-10 only support 30/40/50 mbit/s\n");
return -1;
}
mxf->edit_unit_byte_count = KAG_SIZE; // system element
mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)st->codec->bit_rate *
mxf->time_base.num / (8*mxf->time_base.den);
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
mxf->edit_unit_byte_count += 16 + 4 + 4 + samples_per_frame[0]*8*4;
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
}
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->sample_rate != 48000) {
av_log(s, AV_LOG_ERROR, "only 48khz is implemented\n");
return -1;
}
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
if (s->oformat == &mxf_d10_muxer) {
if (st->index != 1) {
av_log(s, AV_LOG_ERROR, "MXF D-10 only support one audio track\n");
return -1;
}
if (st->codec->codec_id != CODEC_ID_PCM_S16LE &&
st->codec->codec_id != CODEC_ID_PCM_S24LE) {
av_log(s, AV_LOG_ERROR, "MXF D-10 only support 16 or 24 bits le audio\n");
}
sc->index = ((MXFStreamContext*)s->streams[0]->priv_data)->index + 1;
} else
mxf->slice_count = 1;
}
if (!sc->index) {
sc->index = mxf_get_essence_container_ul_index(st->codec->codec_id);
if (sc->index == -1) {
av_log(s, AV_LOG_ERROR, "track %d: could not find essence container ul, "
"codec not currently supported in container\n", i);
return -1;
}
}
sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul;
memcpy(sc->track_essence_element_key, mxf_essence_container_uls[sc->index].element_ul, 15);
sc->track_essence_element_key[15] = present[sc->index];
PRINT_KEY(s, "track essence element key", sc->track_essence_element_key);
if (!present[sc->index])
mxf->essence_container_count++;
present[sc->index]++;
}
if (s->oformat == &mxf_d10_muxer) {
mxf->essence_container_count = 1;
}
if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
mxf_gen_umid(s);
for (i = 0; i < s->nb_streams; i++) {
MXFStreamContext *sc = s->streams[i]->priv_data;
// update element count
sc->track_essence_element_key[13] = present[sc->index];
sc->order = AV_RB32(sc->track_essence_element_key+12);
}
if (s->timestamp)
mxf->timestamp = mxf_parse_timestamp(s->timestamp);
mxf->duration = -1;
mxf->timecode_track = av_mallocz(sizeof(*mxf->timecode_track));
if (!mxf->timecode_track)
return AVERROR(ENOMEM);
mxf->timecode_track->priv_data = av_mallocz(sizeof(MXFStreamContext));
if (!mxf->timecode_track->priv_data)
return AVERROR(ENOMEM);
mxf->timecode_track->index = -1;
if (!samples_per_frame)
samples_per_frame = PAL_samples_per_frame;
if (ff_audio_interleave_init(s, samples_per_frame, mxf->time_base) < 0)
return -1;
return 0;
}
static const uint8_t system_metadata_pack_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x05,0x01,0x01,0x0D,0x01,0x03,0x01,0x04,0x01,0x01,0x00 };
static const uint8_t system_metadata_package_set_key[] = { 0x06,0x0E,0x2B,0x34,0x02,0x43,0x01,0x01,0x0D,0x01,0x03,0x01,0x04,0x01,0x02,0x01 };
static uint32_t ff_framenum_to_12m_time_code(unsigned frame, int drop, int fps)
{
return (0 << 31) | // color frame flag
(0 << 30) | // drop frame flag
( ((frame % fps) / 10) << 28) | // tens of frames
( ((frame % fps) % 10) << 24) | // units of frames
(0 << 23) | // field phase (NTSC), b0 (PAL)
((((frame / fps) % 60) / 10) << 20) | // tens of seconds
((((frame / fps) % 60) % 10) << 16) | // units of seconds
(0 << 15) | // b0 (NTSC), b2 (PAL)
((((frame / (fps * 60)) % 60) / 10) << 12) | // tens of minutes
((((frame / (fps * 60)) % 60) % 10) << 8) | // units of minutes
(0 << 7) | // b1
(0 << 6) | // b2 (NSC), field phase (PAL)
((((frame / (fps * 3600) % 24)) / 10) << 4) | // tens of hours
( (frame / (fps * 3600) % 24)) % 10; // units of hours
}
static void mxf_write_system_item(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
unsigned frame;
uint32_t time_code;
frame = mxf->timecode_start + mxf->last_indexed_edit_unit + mxf->edit_units_count;
// write system metadata pack
put_buffer(pb, system_metadata_pack_key, 16);
klv_encode_ber4_length(pb, 57);
put_byte(pb, 0x5c); // UL, user date/time stamp, picture and sound item present
put_byte(pb, 0x04); // content package rate
put_byte(pb, 0x00); // content package type
put_be16(pb, 0x00); // channel handle
put_be16(pb, frame); // continuity count
if (mxf->essence_container_count > 1)
put_buffer(pb, multiple_desc_ul, 16);
else {
MXFStreamContext *sc = s->streams[0]->priv_data;
put_buffer(pb, mxf_essence_container_uls[sc->index].container_ul, 16);
}
put_byte(pb, 0);
put_be64(pb, 0);
put_be64(pb, 0); // creation date/time stamp
put_byte(pb, 0x81); // SMPTE 12M time code
time_code = ff_framenum_to_12m_time_code(frame, mxf->timecode_drop_frame, mxf->timecode_base);
put_be32(pb, time_code);
put_be32(pb, 0); // binary group data
put_be64(pb, 0);
// write system metadata package set
put_buffer(pb, system_metadata_package_set_key, 16);
klv_encode_ber4_length(pb, 35);
put_byte(pb, 0x83); // UMID
put_be16(pb, 0x20);
mxf_write_umid(s, 1);
}
static void mxf_write_d10_video_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
int packet_size = (uint64_t)st->codec->bit_rate*mxf->time_base.num /
(8*mxf->time_base.den); // frame size
int pad;
packet_size += 16 + 4;
packet_size += klv_fill_size(packet_size);
klv_encode_ber4_length(pb, pkt->size);
put_buffer(pb, pkt->data, pkt->size);
// ensure CBR muxing by padding to correct video frame size
pad = packet_size - pkt->size - 16 - 4;
if (pad > 20) {
put_buffer(s->pb, klv_fill_key, 16);
pad -= 16 + 4;
klv_encode_ber4_length(s->pb, pad);
for (; pad; pad--)
put_byte(s->pb, 0);
assert(!(url_ftell(s->pb) & (KAG_SIZE-1)));
} else {
av_log(s, AV_LOG_WARNING, "cannot fill d-10 video packet\n");
for (; pad > 0; pad--)
put_byte(s->pb, 0);
}
}
static void mxf_write_d10_audio_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
int frame_size = pkt->size / st->codec->block_align;
uint8_t *samples = pkt->data;
uint8_t *end = pkt->data + pkt->size;
int i;
klv_encode_ber4_length(pb, 4 + frame_size*4*8);
put_byte(pb, (frame_size == 1920 ? 0 : (mxf->edit_units_count-1) % 5 + 1));
put_le16(pb, frame_size);
put_byte(pb, (1<<st->codec->channels)-1);
while (samples < end) {
for (i = 0; i < st->codec->channels; i++) {
uint32_t sample;
if (st->codec->codec_id == CODEC_ID_PCM_S24LE) {
sample = AV_RL24(samples)<< 4;
samples += 3;
} else {
sample = AV_RL16(samples)<<12;
samples += 2;
}
put_le32(pb, sample | i);
}
for (; i < 8; i++)
put_le32(pb, i);
}
}
static int mxf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
MXFStreamContext *sc = st->priv_data;
int flags = 0;
if (!mxf->edit_unit_byte_count && !(mxf->edit_units_count % EDIT_UNITS_PER_BODY)) {
mxf->index_entries = av_realloc(mxf->index_entries,
(mxf->edit_units_count + EDIT_UNITS_PER_BODY)*sizeof(*mxf->index_entries));
if (!mxf->index_entries) {
av_log(s, AV_LOG_ERROR, "could not allocate index entries\n");
return -1;
}
}
if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
if (!mxf_parse_mpeg2_frame(s, st, pkt, &flags)) {
av_log(s, AV_LOG_ERROR, "could not get mpeg2 profile and level\n");
return -1;
}
}
if (!mxf->header_written) {
if (mxf->edit_unit_byte_count) {
mxf_write_partition(s, 1, 2, header_open_partition_key, 1);
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
} else {
mxf_write_partition(s, 0, 0, header_open_partition_key, 1);
}
mxf->header_written = 1;
}
if (st->index == 0) {
if (!mxf->edit_unit_byte_count &&
(!mxf->edit_units_count || mxf->edit_units_count > EDIT_UNITS_PER_BODY) &&
!(flags & 0x33)) { // I frame, Gop start
mxf_write_klv_fill(s);
mxf_write_partition(s, 1, 2, body_partition_key, 0);
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
}
mxf_write_klv_fill(s);
mxf_write_system_item(s);
if (!mxf->edit_unit_byte_count) {
mxf->index_entries[mxf->edit_units_count].offset = mxf->body_offset;
mxf->index_entries[mxf->edit_units_count].flags = flags;
mxf->body_offset += KAG_SIZE; // size of system element
}
mxf->edit_units_count++;
} else if (!mxf->edit_unit_byte_count && st->index == 1) {
mxf->index_entries[mxf->edit_units_count-1].slice_offset =
mxf->body_offset - mxf->index_entries[mxf->edit_units_count-1].offset;
}
mxf_write_klv_fill(s);
put_buffer(pb, sc->track_essence_element_key, 16); // write key
if (s->oformat == &mxf_d10_muxer) {
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
mxf_write_d10_video_packet(s, st, pkt);
else
mxf_write_d10_audio_packet(s, st, pkt);
} else {
klv_encode_ber4_length(pb, pkt->size); // write length
put_buffer(pb, pkt->data, pkt->size);
mxf->body_offset += 16+4+pkt->size + klv_fill_size(16+4+pkt->size);
}
put_flush_packet(pb);
return 0;
}
static void mxf_write_random_index_pack(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
uint64_t pos = url_ftell(pb);
int i;
put_buffer(pb, random_index_pack_key, 16);
klv_encode_ber_length(pb, 28 + 12*mxf->body_partitions_count);
if (mxf->edit_unit_byte_count)
put_be32(pb, 1); // BodySID of header partition
else
put_be32(pb, 0);
put_be64(pb, 0); // offset of header partition
for (i = 0; i < mxf->body_partitions_count; i++) {
put_be32(pb, 1); // BodySID
put_be64(pb, mxf->body_partition_offset[i]);
}
put_be32(pb, 0); // BodySID of footer partition
put_be64(pb, mxf->footer_partition_offset);
put_be32(pb, url_ftell(pb) - pos + 4);
}
static int mxf_write_footer(AVFormatContext *s)
{
MXFContext *mxf = s->priv_data;
ByteIOContext *pb = s->pb;
mxf->duration = mxf->last_indexed_edit_unit + mxf->edit_units_count;
mxf_write_klv_fill(s);
mxf->footer_partition_offset = url_ftell(pb);
if (mxf->edit_unit_byte_count) { // no need to repeat index
mxf_write_partition(s, 0, 0, footer_partition_key, 0);
} else {
mxf_write_partition(s, 0, 2, footer_partition_key, 0);
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
}
mxf_write_klv_fill(s);
mxf_write_random_index_pack(s);
if (!url_is_streamed(s->pb)) {
url_fseek(pb, 0, SEEK_SET);
if (mxf->edit_unit_byte_count) {
mxf_write_partition(s, 1, 2, header_closed_partition_key, 1);
mxf_write_klv_fill(s);
mxf_write_index_table_segment(s);
} else {
mxf_write_partition(s, 0, 0, header_closed_partition_key, 1);
}
}
put_flush_packet(pb);
ff_audio_interleave_close(s);
av_freep(&mxf->index_entries);
av_freep(&mxf->body_partition_offset);
av_freep(&mxf->timecode_track->priv_data);
av_freep(&mxf->timecode_track);
mxf_free(s);
return 0;
}
static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
{
int i, stream_count = 0;
for (i = 0; i < s->nb_streams; i++)
stream_count += !!s->streams[i]->last_in_packet_buffer;
if (stream_count && (s->nb_streams == stream_count || flush)) {
AVPacketList *pktl = s->packet_buffer;
if (s->nb_streams != stream_count) {
AVPacketList *last = NULL;
// find last packet in edit unit
while (pktl) {
if (!stream_count || pktl->pkt.stream_index == 0)
break;
last = pktl;
pktl = pktl->next;
stream_count--;
}
// purge packet queue
while (pktl) {
AVPacketList *next = pktl->next;
if(s->streams[pktl->pkt.stream_index]->last_in_packet_buffer == pktl)
s->streams[pktl->pkt.stream_index]->last_in_packet_buffer= NULL;
av_free_packet(&pktl->pkt);
av_freep(&pktl);
pktl = next;
}
if (last)
last->next = NULL;
else {
s->packet_buffer = NULL;
s->packet_buffer_end= NULL;
goto out;
}
pktl = s->packet_buffer;
}
*out = pktl->pkt;
//av_log(s, AV_LOG_DEBUG, "out st:%d dts:%lld\n", (*out).stream_index, (*out).dts);
s->packet_buffer = pktl->next;
if(s->streams[pktl->pkt.stream_index]->last_in_packet_buffer == pktl)
s->streams[pktl->pkt.stream_index]->last_in_packet_buffer= NULL;
if(!s->packet_buffer)
s->packet_buffer_end= NULL;
av_freep(&pktl);
return 1;
} else {
out:
av_init_packet(out);
return 0;
}
}
static int mxf_compare_timestamps(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
{
MXFStreamContext *sc = s->streams[pkt ->stream_index]->priv_data;
MXFStreamContext *sc2 = s->streams[next->stream_index]->priv_data;
return next->dts > pkt->dts ||
(next->dts == pkt->dts && sc->order < sc2->order);
}
static int mxf_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
{
return ff_audio_rechunk_interleave(s, out, pkt, flush,
mxf_interleave_get_packet, mxf_compare_timestamps);
}
AVOutputFormat mxf_muxer = {
"mxf",
NULL_IF_CONFIG_SMALL("Material eXchange Format"),
"application/mxf",
"mxf",
sizeof(MXFContext),
CODEC_ID_PCM_S16LE,
CODEC_ID_MPEG2VIDEO,
mxf_write_header,
mxf_write_packet,
mxf_write_footer,
AVFMT_NOTIMESTAMPS,
NULL,
mxf_interleave,
};
AVOutputFormat mxf_d10_muxer = {
"mxf_d10",
NULL_IF_CONFIG_SMALL("Material eXchange Format, D-10 Mapping"),
"application/mxf",
NULL,
sizeof(MXFContext),
CODEC_ID_PCM_S16LE,
CODEC_ID_MPEG2VIDEO,
mxf_write_header,
mxf_write_packet,
mxf_write_footer,
AVFMT_NOTIMESTAMPS,
NULL,
mxf_interleave,
};
| 123linslouis-android-video-cutter | jni/libavformat/mxfenc.c | C | asf20 | 73,709 |
/*
* Adobe Filmstrip demuxer
* Copyright (c) 2010 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Adobe Filmstrip demuxer
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define RAND_TAG MKBETAG('R','a','n','d')
typedef struct {
int leading;
} FilmstripDemuxContext;
static int read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
FilmstripDemuxContext *film = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
if (url_is_streamed(s->pb))
return AVERROR(EIO);
url_fseek(pb, url_fsize(pb) - 36, SEEK_SET);
if (get_be32(pb) != RAND_TAG) {
av_log(s, AV_LOG_ERROR, "magic number not found");
return AVERROR_INVALIDDATA;
}
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->nb_frames = get_be32(pb);
if (get_be16(pb) != 0) {
av_log_ask_for_sample(s, "unsupported packing method\n");
return AVERROR_INVALIDDATA;
}
url_fskip(pb, 2);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = PIX_FMT_RGBA;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = get_be16(pb);
st->codec->height = get_be16(pb);
film->leading = get_be16(pb);
av_set_pts_info(st, 64, 1, get_be16(pb));
url_fseek(pb, 0, SEEK_SET);
return 0;
}
static int read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FilmstripDemuxContext *film = s->priv_data;
AVStream *st = s->streams[0];
if (url_feof(s->pb))
return AVERROR(EIO);
pkt->dts = url_ftell(s->pb) / (st->codec->width * (st->codec->height + film->leading) * 4);
pkt->size = av_get_packet(s->pb, pkt, st->codec->width * st->codec->height * 4);
url_fskip(s->pb, st->codec->width * film->leading * 4);
if (pkt->size < 0)
return pkt->size;
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
url_fseek(s->pb, FFMAX(timestamp, 0) * st->codec->width * st->codec->height * 4, SEEK_SET);
return 0;
}
AVInputFormat filmstrip_demuxer = {
"filmstrip",
NULL_IF_CONFIG_SMALL("Adobe Filmstrip"),
sizeof(FilmstripDemuxContext),
NULL,
read_header,
read_packet,
NULL,
read_seek,
.extensions = "flm",
};
| 123linslouis-android-video-cutter | jni/libavformat/filmstripdec.c | C | asf20 | 3,191 |
/*
* SoX native format demuxer
* Copyright (c) 2009 Daniel Verkamp <daniel@drv.nu>
*
* Based on libSoX sox-fmt.c
* Copyright (c) 2008 robs@users.sourceforge.net
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* SoX native format demuxer
* @file
* @author Daniel Verkamp
* @sa http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "raw.h"
#include "sox.h"
static int sox_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == SOX_TAG || AV_RB32(p->buf) == SOX_TAG)
return AVPROBE_SCORE_MAX;
return 0;
}
static int sox_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
unsigned header_size, comment_size;
double sample_rate, sample_rate_frac;
AVStream *st;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (get_le32(pb) == SOX_TAG) {
st->codec->codec_id = CODEC_ID_PCM_S32LE;
header_size = get_le32(pb);
url_fskip(pb, 8); /* sample count */
sample_rate = av_int2dbl(get_le64(pb));
st->codec->channels = get_le32(pb);
comment_size = get_le32(pb);
} else {
st->codec->codec_id = CODEC_ID_PCM_S32BE;
header_size = get_be32(pb);
url_fskip(pb, 8); /* sample count */
sample_rate = av_int2dbl(get_be64(pb));
st->codec->channels = get_be32(pb);
comment_size = get_be32(pb);
}
if (comment_size > 0xFFFFFFFFU - SOX_FIXED_HDR - 4U) {
av_log(s, AV_LOG_ERROR, "invalid comment size (%u)\n", comment_size);
return -1;
}
if (sample_rate <= 0 || sample_rate > INT_MAX) {
av_log(s, AV_LOG_ERROR, "invalid sample rate (%f)\n", sample_rate);
return -1;
}
sample_rate_frac = sample_rate - floor(sample_rate);
if (sample_rate_frac)
av_log(s, AV_LOG_WARNING,
"truncating fractional part of sample rate (%f)\n",
sample_rate_frac);
if ((header_size + 4) & 7 || header_size < SOX_FIXED_HDR + comment_size
|| st->codec->channels > 65535) /* Reserve top 16 bits */ {
av_log(s, AV_LOG_ERROR, "invalid header\n");
return -1;
}
if (comment_size && comment_size < UINT_MAX) {
char *comment = av_malloc(comment_size+1);
if (get_buffer(pb, comment, comment_size) != comment_size) {
av_freep(&comment);
return AVERROR(EIO);
}
comment[comment_size] = 0;
av_metadata_set2(&s->metadata, "comment", comment,
AV_METADATA_DONT_STRDUP_VAL);
}
url_fskip(pb, header_size - SOX_FIXED_HDR - comment_size);
st->codec->sample_rate = sample_rate;
st->codec->bits_per_coded_sample = 32;
st->codec->bit_rate = st->codec->sample_rate *
st->codec->bits_per_coded_sample *
st->codec->channels;
st->codec->block_align = st->codec->bits_per_coded_sample *
st->codec->channels / 8;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
#define SOX_SAMPLES 1024
static int sox_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
int ret, size;
if (url_feof(s->pb))
return AVERROR_EOF;
size = SOX_SAMPLES*s->streams[0]->codec->block_align;
ret = av_get_packet(s->pb, pkt, size);
if (ret < 0)
return AVERROR(EIO);
pkt->stream_index = 0;
pkt->size = ret;
return 0;
}
AVInputFormat sox_demuxer = {
"sox",
NULL_IF_CONFIG_SMALL("SoX native format"),
0,
sox_probe,
sox_read_header,
sox_read_packet,
NULL,
pcm_read_seek,
};
| 123linslouis-android-video-cutter | jni/libavformat/soxdec.c | C | asf20 | 4,637 |
/*
* GXF muxer.
* Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "gxf.h"
#include "riff.h"
#include "audiointerleave.h"
#define GXF_AUDIO_PACKET_SIZE 65536
typedef struct GXFStreamContext {
AudioInterleaveContext aic;
uint32_t track_type;
uint32_t sample_size;
uint32_t sample_rate;
uint16_t media_type;
uint16_t media_info;
int frame_rate_index;
int lines_index;
int fields;
int iframes;
int pframes;
int bframes;
int p_per_gop;
int b_per_i_or_p; ///< number of B frames per I frame or P frame
int first_gop_closed;
unsigned order; ///< interleaving order
} GXFStreamContext;
typedef struct GXFContext {
uint32_t nb_fields;
uint16_t audio_tracks;
uint16_t mpeg_tracks;
int64_t creation_time;
uint32_t umf_start_offset;
uint32_t umf_track_offset;
uint32_t umf_media_offset;
uint32_t umf_length;
uint16_t umf_track_size;
uint16_t umf_media_size;
AVRational time_base;
int flags;
GXFStreamContext timecode_track;
unsigned *flt_entries; ///< offsets of packets /1024, starts after 2nd video field
unsigned flt_entries_nb;
uint64_t *map_offsets; ///< offset of map packets
unsigned map_offsets_nb;
unsigned packet_count;
} GXFContext;
static const struct {
int height, index;
} gxf_lines_tab[] = {
{ 480, 1 }, /* NTSC */
{ 512, 1 }, /* NTSC + VBI */
{ 576, 2 }, /* PAL */
{ 608, 2 }, /* PAL + VBI */
{ 1080, 4 },
{ 720, 6 },
};
static const AVCodecTag gxf_media_types[] = {
{ CODEC_ID_MJPEG , 3 }, /* NTSC */
{ CODEC_ID_MJPEG , 4 }, /* PAL */
{ CODEC_ID_PCM_S24LE , 9 },
{ CODEC_ID_PCM_S16LE , 10 },
{ CODEC_ID_MPEG2VIDEO, 11 }, /* NTSC */
{ CODEC_ID_MPEG2VIDEO, 12 }, /* PAL */
{ CODEC_ID_DVVIDEO , 13 }, /* NTSC */
{ CODEC_ID_DVVIDEO , 14 }, /* PAL */
{ CODEC_ID_DVVIDEO , 15 }, /* 50M NTSC */
{ CODEC_ID_DVVIDEO , 16 }, /* 50M PAL */
{ CODEC_ID_AC3 , 17 },
//{ CODEC_ID_NONE, , 18 }, /* Non compressed 24 bit audio */
{ CODEC_ID_MPEG2VIDEO, 20 }, /* MPEG HD */
{ CODEC_ID_MPEG1VIDEO, 22 }, /* NTSC */
{ CODEC_ID_MPEG1VIDEO, 23 }, /* PAL */
{ CODEC_ID_NONE, 0 },
};
#define SERVER_PATH "EXT:/PDR/default/"
#define ES_NAME_PATTERN "EXT:/PDR/default/ES."
static int gxf_find_lines_index(AVStream *st)
{
GXFStreamContext *sc = st->priv_data;
int i;
for (i = 0; i < 6; ++i) {
if (st->codec->height == gxf_lines_tab[i].height) {
sc->lines_index = gxf_lines_tab[i].index;
return 0;
}
}
return -1;
}
static void gxf_write_padding(ByteIOContext *pb, int64_t to_pad)
{
for (; to_pad > 0; to_pad--) {
put_byte(pb, 0);
}
}
static int64_t updatePacketSize(ByteIOContext *pb, int64_t pos)
{
int64_t curpos;
int size;
size = url_ftell(pb) - pos;
if (size % 4) {
gxf_write_padding(pb, 4 - size % 4);
size = url_ftell(pb) - pos;
}
curpos = url_ftell(pb);
url_fseek(pb, pos + 6, SEEK_SET);
put_be32(pb, size);
url_fseek(pb, curpos, SEEK_SET);
return curpos - pos;
}
static int64_t updateSize(ByteIOContext *pb, int64_t pos)
{
int64_t curpos;
curpos = url_ftell(pb);
url_fseek(pb, pos, SEEK_SET);
put_be16(pb, curpos - pos - 2);
url_fseek(pb, curpos, SEEK_SET);
return curpos - pos;
}
static void gxf_write_packet_header(ByteIOContext *pb, GXFPktType type)
{
put_be32(pb, 0); /* packet leader for synchro */
put_byte(pb, 1);
put_byte(pb, type); /* map packet */
put_be32(pb, 0); /* size */
put_be32(pb, 0); /* reserved */
put_byte(pb, 0xE1); /* trailer 1 */
put_byte(pb, 0xE2); /* trailer 2 */
}
static int gxf_write_mpeg_auxiliary(ByteIOContext *pb, AVStream *st)
{
GXFStreamContext *sc = st->priv_data;
char buffer[1024];
int size, starting_line;
if (sc->iframes) {
sc->p_per_gop = sc->pframes / sc->iframes;
if (sc->pframes % sc->iframes)
sc->p_per_gop++;
if (sc->pframes) {
sc->b_per_i_or_p = sc->bframes / sc->pframes;
if (sc->bframes % sc->pframes)
sc->b_per_i_or_p++;
}
if (sc->p_per_gop > 9)
sc->p_per_gop = 9; /* ensure value won't take more than one char */
if (sc->b_per_i_or_p > 9)
sc->b_per_i_or_p = 9; /* ensure value won't take more than one char */
}
if (st->codec->height == 512 || st->codec->height == 608)
starting_line = 7; // VBI
else if (st->codec->height == 480)
starting_line = 20;
else
starting_line = 23; // default PAL
size = snprintf(buffer, 1024, "Ver 1\nBr %.6f\nIpg 1\nPpi %d\nBpiop %d\n"
"Pix 0\nCf %d\nCg %d\nSl %d\nnl16 %d\nVi 1\nf1 1\n",
(float)st->codec->bit_rate, sc->p_per_gop, sc->b_per_i_or_p,
st->codec->pix_fmt == PIX_FMT_YUV422P ? 2 : 1, sc->first_gop_closed == 1,
starting_line, st->codec->height / 16);
put_byte(pb, TRACK_MPG_AUX);
put_byte(pb, size + 1);
put_buffer(pb, (uint8_t *)buffer, size + 1);
return size + 3;
}
static int gxf_write_timecode_auxiliary(ByteIOContext *pb, GXFStreamContext *sc)
{
put_byte(pb, 0); /* fields */
put_byte(pb, 0); /* seconds */
put_byte(pb, 0); /* minutes */
put_byte(pb, 0); /* flags + hours */
/* reserved */
put_be32(pb, 0);
return 8;
}
static int gxf_write_track_description(AVFormatContext *s, GXFStreamContext *sc, int index)
{
ByteIOContext *pb = s->pb;
int64_t pos;
int mpeg = sc->track_type == 4 || sc->track_type == 9;
/* track description section */
put_byte(pb, sc->media_type + 0x80);
put_byte(pb, index + 0xC0);
pos = url_ftell(pb);
put_be16(pb, 0); /* size */
/* media file name */
put_byte(pb, TRACK_NAME);
put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
put_tag(pb, ES_NAME_PATTERN);
put_be16(pb, sc->media_info);
put_byte(pb, 0);
if (!mpeg) {
/* auxiliary information */
put_byte(pb, TRACK_AUX);
put_byte(pb, 8);
if (sc->track_type == 3)
gxf_write_timecode_auxiliary(pb, sc);
else
put_le64(pb, 0);
}
/* file system version */
put_byte(pb, TRACK_VER);
put_byte(pb, 4);
put_be32(pb, 0);
if (mpeg)
gxf_write_mpeg_auxiliary(pb, s->streams[index]);
/* frame rate */
put_byte(pb, TRACK_FPS);
put_byte(pb, 4);
put_be32(pb, sc->frame_rate_index);
/* lines per frame */
put_byte(pb, TRACK_LINES);
put_byte(pb, 4);
put_be32(pb, sc->lines_index);
/* fields per frame */
put_byte(pb, TRACK_FPF);
put_byte(pb, 4);
put_be32(pb, sc->fields);
return updateSize(pb, pos);
}
static int gxf_write_material_data_section(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos;
const char *filename = strrchr(s->filename, '/');
pos = url_ftell(pb);
put_be16(pb, 0); /* size */
/* name */
if (filename)
filename++;
else
filename = s->filename;
put_byte(pb, MAT_NAME);
put_byte(pb, strlen(SERVER_PATH) + strlen(filename) + 1);
put_tag(pb, SERVER_PATH);
put_tag(pb, filename);
put_byte(pb, 0);
/* first field */
put_byte(pb, MAT_FIRST_FIELD);
put_byte(pb, 4);
put_be32(pb, 0);
/* last field */
put_byte(pb, MAT_LAST_FIELD);
put_byte(pb, 4);
put_be32(pb, gxf->nb_fields);
/* reserved */
put_byte(pb, MAT_MARK_IN);
put_byte(pb, 4);
put_be32(pb, 0);
put_byte(pb, MAT_MARK_OUT);
put_byte(pb, 4);
put_be32(pb, gxf->nb_fields);
/* estimated size */
put_byte(pb, MAT_SIZE);
put_byte(pb, 4);
put_be32(pb, url_fsize(pb) / 1024);
return updateSize(pb, pos);
}
static int gxf_write_track_description_section(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos;
int i;
pos = url_ftell(pb);
put_be16(pb, 0); /* size */
for (i = 0; i < s->nb_streams; ++i)
gxf_write_track_description(s, s->streams[i]->priv_data, i);
gxf_write_track_description(s, &gxf->timecode_track, s->nb_streams);
return updateSize(pb, pos);
}
static int gxf_write_map_packet(AVFormatContext *s, int rewrite)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos = url_ftell(pb);
if (!rewrite) {
if (!(gxf->map_offsets_nb % 30)) {
gxf->map_offsets = av_realloc(gxf->map_offsets,
(gxf->map_offsets_nb+30)*sizeof(*gxf->map_offsets));
if (!gxf->map_offsets) {
av_log(s, AV_LOG_ERROR, "could not realloc map offsets\n");
return -1;
}
}
gxf->map_offsets[gxf->map_offsets_nb++] = pos; // do not increment here
}
gxf_write_packet_header(pb, PKT_MAP);
/* preamble */
put_byte(pb, 0xE0); /* version */
put_byte(pb, 0xFF); /* reserved */
gxf_write_material_data_section(s);
gxf_write_track_description_section(s);
return updatePacketSize(pb, pos);
}
static int gxf_write_flt_packet(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos = url_ftell(pb);
int fields_per_flt = (gxf->nb_fields+1) / 1000 + 1;
int flt_entries = gxf->nb_fields / fields_per_flt - 1;
int i = 0;
gxf_write_packet_header(pb, PKT_FLT);
put_le32(pb, fields_per_flt); /* number of fields */
put_le32(pb, flt_entries); /* number of active flt entries */
if (gxf->flt_entries) {
for (i = 0; i < flt_entries; i++)
put_le32(pb, gxf->flt_entries[(i*fields_per_flt)>>1]);
}
for (; i < 1000; i++)
put_le32(pb, 0);
return updatePacketSize(pb, pos);
}
static int gxf_write_umf_material_description(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int timecode_base = gxf->time_base.den == 60000 ? 60 : 50;
// XXX drop frame
uint32_t timecode =
gxf->nb_fields / (timecode_base * 3600) % 24 << 24 | // hours
gxf->nb_fields / (timecode_base * 60) % 60 << 16 | // minutes
gxf->nb_fields / timecode_base % 60 << 8 | // seconds
gxf->nb_fields % timecode_base; // fields
put_le32(pb, gxf->flags);
put_le32(pb, gxf->nb_fields); /* length of the longest track */
put_le32(pb, gxf->nb_fields); /* length of the shortest track */
put_le32(pb, 0); /* mark in */
put_le32(pb, gxf->nb_fields); /* mark out */
put_le32(pb, 0); /* timecode mark in */
put_le32(pb, timecode); /* timecode mark out */
put_le64(pb, s->timestamp); /* modification time */
put_le64(pb, s->timestamp); /* creation time */
put_le16(pb, 0); /* reserved */
put_le16(pb, 0); /* reserved */
put_le16(pb, gxf->audio_tracks);
put_le16(pb, 1); /* timecode track count */
put_le16(pb, 0); /* reserved */
put_le16(pb, gxf->mpeg_tracks);
return 48;
}
static int gxf_write_umf_payload(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
put_le32(pb, gxf->umf_length); /* total length of the umf data */
put_le32(pb, 3); /* version */
put_le32(pb, s->nb_streams+1);
put_le32(pb, gxf->umf_track_offset); /* umf track section offset */
put_le32(pb, gxf->umf_track_size);
put_le32(pb, s->nb_streams+1);
put_le32(pb, gxf->umf_media_offset);
put_le32(pb, gxf->umf_media_size);
put_le32(pb, gxf->umf_length); /* user data offset */
put_le32(pb, 0); /* user data size */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
return 48;
}
static int gxf_write_umf_track_description(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
GXFContext *gxf = s->priv_data;
int64_t pos = url_ftell(pb);
int i;
gxf->umf_track_offset = pos - gxf->umf_start_offset;
for (i = 0; i < s->nb_streams; ++i) {
GXFStreamContext *sc = s->streams[i]->priv_data;
put_le16(pb, sc->media_info);
put_le16(pb, 1);
}
put_le16(pb, gxf->timecode_track.media_info);
put_le16(pb, 1);
return url_ftell(pb) - pos;
}
static int gxf_write_umf_media_mpeg(ByteIOContext *pb, AVStream *st)
{
GXFStreamContext *sc = st->priv_data;
if (st->codec->pix_fmt == PIX_FMT_YUV422P)
put_le32(pb, 2);
else
put_le32(pb, 1); /* default to 420 */
put_le32(pb, sc->first_gop_closed == 1); /* closed = 1, open = 0, unknown = 255 */
put_le32(pb, 3); /* top = 1, bottom = 2, frame = 3, unknown = 0 */
put_le32(pb, 1); /* I picture per GOP */
put_le32(pb, sc->p_per_gop);
put_le32(pb, sc->b_per_i_or_p);
if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
put_le32(pb, 2);
else if (st->codec->codec_id == CODEC_ID_MPEG1VIDEO)
put_le32(pb, 1);
else
put_le32(pb, 0);
put_le32(pb, 0); /* reserved */
return 32;
}
static int gxf_write_umf_media_timecode(ByteIOContext *pb, GXFStreamContext *sc)
{
put_le32(pb, 1); /* non drop frame */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
return 32;
}
static int gxf_write_umf_media_dv(ByteIOContext *pb, GXFStreamContext *sc)
{
int i;
for (i = 0; i < 8; i++) {
put_be32(pb, 0);
}
return 32;
}
static int gxf_write_umf_media_audio(ByteIOContext *pb, GXFStreamContext *sc)
{
put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
put_le32(pb, 0); /* number of fields over which to ramp up sound level */
put_le32(pb, 0); /* number of fields over which to ramp down sound level */
put_le32(pb, 0); /* reserved */
put_le32(pb, 0); /* reserved */
return 32;
}
#if 0
static int gxf_write_umf_media_mjpeg(ByteIOContext *pb, GXFStreamContext *sc)
{
put_be64(pb, 0); /* FIXME FLOAT max chroma quant level */
put_be64(pb, 0); /* FIXME FLOAT max luma quant level */
put_be64(pb, 0); /* FIXME FLOAT min chroma quant level */
put_be64(pb, 0); /* FIXME FLOAT min luma quant level */
return 32;
}
#endif
static int gxf_write_umf_media_description(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos;
int i, j;
pos = url_ftell(pb);
gxf->umf_media_offset = pos - gxf->umf_start_offset;
for (i = 0; i <= s->nb_streams; ++i) {
GXFStreamContext *sc;
int64_t startpos, curpos;
if (i == s->nb_streams)
sc = &gxf->timecode_track;
else
sc = s->streams[i]->priv_data;
startpos = url_ftell(pb);
put_le16(pb, 0); /* length */
put_le16(pb, sc->media_info);
put_le16(pb, 0); /* reserved */
put_le16(pb, 0); /* reserved */
put_le32(pb, gxf->nb_fields);
put_le32(pb, 0); /* attributes rw, ro */
put_le32(pb, 0); /* mark in */
put_le32(pb, gxf->nb_fields); /* mark out */
put_buffer(pb, ES_NAME_PATTERN, sizeof(ES_NAME_PATTERN));
put_be16(pb, sc->media_info);
for (j = sizeof(ES_NAME_PATTERN)+2; j < 88; j++)
put_byte(pb, 0);
put_le32(pb, sc->track_type);
put_le32(pb, sc->sample_rate);
put_le32(pb, sc->sample_size);
put_le32(pb, 0); /* reserved */
if (sc == &gxf->timecode_track)
gxf_write_umf_media_timecode(pb, sc); /* 8 0bytes */
else {
AVStream *st = s->streams[i];
switch (st->codec->codec_id) {
case CODEC_ID_MPEG2VIDEO:
gxf_write_umf_media_mpeg(pb, st);
break;
case CODEC_ID_PCM_S16LE:
gxf_write_umf_media_audio(pb, sc);
break;
case CODEC_ID_DVVIDEO:
gxf_write_umf_media_dv(pb, sc);
break;
}
}
curpos = url_ftell(pb);
url_fseek(pb, startpos, SEEK_SET);
put_le16(pb, curpos - startpos);
url_fseek(pb, curpos, SEEK_SET);
}
return url_ftell(pb) - pos;
}
static int gxf_write_umf_packet(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t pos = url_ftell(pb);
gxf_write_packet_header(pb, PKT_UMF);
/* preamble */
put_byte(pb, 3); /* first and last (only) packet */
put_be32(pb, gxf->umf_length); /* data length */
gxf->umf_start_offset = url_ftell(pb);
gxf_write_umf_payload(s);
gxf_write_umf_material_description(s);
gxf->umf_track_size = gxf_write_umf_track_description(s);
gxf->umf_media_size = gxf_write_umf_media_description(s);
gxf->umf_length = url_ftell(pb) - gxf->umf_start_offset;
return updatePacketSize(pb, pos);
}
static const int GXF_samples_per_frame[] = { 32768, 0 };
static void gxf_init_timecode_track(GXFStreamContext *sc, GXFStreamContext *vsc)
{
if (!vsc)
return;
sc->media_type = vsc->sample_rate == 60 ? 7 : 8;
sc->sample_rate = vsc->sample_rate;
sc->media_info = ('T'<<8) | '0';
sc->track_type = 3;
sc->frame_rate_index = vsc->frame_rate_index;
sc->lines_index = vsc->lines_index;
sc->sample_size = 16;
sc->fields = vsc->fields;
}
static int gxf_write_header(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
GXFContext *gxf = s->priv_data;
GXFStreamContext *vsc = NULL;
uint8_t tracks[255] = {0};
int i, media_info = 0;
if (url_is_streamed(pb)) {
av_log(s, AV_LOG_ERROR, "gxf muxer does not support streamed output, patch welcome");
return -1;
}
gxf->flags |= 0x00080000; /* material is simple clip */
for (i = 0; i < s->nb_streams; ++i) {
AVStream *st = s->streams[i];
GXFStreamContext *sc = av_mallocz(sizeof(*sc));
if (!sc)
return AVERROR(ENOMEM);
st->priv_data = sc;
sc->media_type = ff_codec_get_tag(gxf_media_types, st->codec->codec_id);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->codec_id != CODEC_ID_PCM_S16LE) {
av_log(s, AV_LOG_ERROR, "only 16 BIT PCM LE allowed for now\n");
return -1;
}
if (st->codec->sample_rate != 48000) {
av_log(s, AV_LOG_ERROR, "only 48000hz sampling rate is allowed\n");
return -1;
}
if (st->codec->channels != 1) {
av_log(s, AV_LOG_ERROR, "only mono tracks are allowed\n");
return -1;
}
sc->track_type = 2;
sc->sample_rate = st->codec->sample_rate;
av_set_pts_info(st, 64, 1, sc->sample_rate);
sc->sample_size = 16;
sc->frame_rate_index = -2;
sc->lines_index = -2;
sc->fields = -2;
gxf->audio_tracks++;
gxf->flags |= 0x04000000; /* audio is 16 bit pcm */
media_info = 'A';
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (i != 0) {
av_log(s, AV_LOG_ERROR, "video stream must be the first track\n");
return -1;
}
/* FIXME check from time_base ? */
if (st->codec->height == 480 || st->codec->height == 512) { /* NTSC or NTSC+VBI */
sc->frame_rate_index = 5;
sc->sample_rate = 60;
gxf->flags |= 0x00000080;
gxf->time_base = (AVRational){ 1001, 60000 };
} else { /* assume PAL */
sc->frame_rate_index = 6;
sc->media_type++;
sc->sample_rate = 50;
gxf->flags |= 0x00000040;
gxf->time_base = (AVRational){ 1, 50 };
}
av_set_pts_info(st, 64, gxf->time_base.num, gxf->time_base.den);
if (gxf_find_lines_index(st) < 0)
sc->lines_index = -1;
sc->sample_size = st->codec->bit_rate;
sc->fields = 2; /* interlaced */
vsc = sc;
switch (st->codec->codec_id) {
case CODEC_ID_MJPEG:
sc->track_type = 1;
gxf->flags |= 0x00004000;
media_info = 'J';
break;
case CODEC_ID_MPEG1VIDEO:
sc->track_type = 9;
gxf->mpeg_tracks++;
media_info = 'L';
break;
case CODEC_ID_MPEG2VIDEO:
sc->first_gop_closed = -1;
sc->track_type = 4;
gxf->mpeg_tracks++;
gxf->flags |= 0x00008000;
media_info = 'M';
break;
case CODEC_ID_DVVIDEO:
if (st->codec->pix_fmt == PIX_FMT_YUV422P) {
sc->media_type += 2;
sc->track_type = 6;
gxf->flags |= 0x00002000;
media_info = 'E';
} else {
sc->track_type = 5;
gxf->flags |= 0x00001000;
media_info = 'D';
}
break;
default:
av_log(s, AV_LOG_ERROR, "video codec not supported\n");
return -1;
}
}
/* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */
sc->media_info = media_info<<8 | ('0'+tracks[media_info]++);
sc->order = s->nb_streams - st->index;
}
if (ff_audio_interleave_init(s, GXF_samples_per_frame, (AVRational){ 1, 48000 }) < 0)
return -1;
gxf_init_timecode_track(&gxf->timecode_track, vsc);
gxf->flags |= 0x200000; // time code track is non-drop frame
gxf_write_map_packet(s, 0);
gxf_write_flt_packet(s);
gxf_write_umf_packet(s);
gxf->packet_count = 3;
put_flush_packet(pb);
return 0;
}
static int gxf_write_eos_packet(ByteIOContext *pb)
{
int64_t pos = url_ftell(pb);
gxf_write_packet_header(pb, PKT_EOS);
return updatePacketSize(pb, pos);
}
static int gxf_write_trailer(AVFormatContext *s)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
int64_t end;
int i;
ff_audio_interleave_close(s);
gxf_write_eos_packet(pb);
end = url_ftell(pb);
url_fseek(pb, 0, SEEK_SET);
/* overwrite map, flt and umf packets with new values */
gxf_write_map_packet(s, 1);
gxf_write_flt_packet(s);
gxf_write_umf_packet(s);
put_flush_packet(pb);
/* update duration in all map packets */
for (i = 1; i < gxf->map_offsets_nb; i++) {
url_fseek(pb, gxf->map_offsets[i], SEEK_SET);
gxf_write_map_packet(s, 1);
put_flush_packet(pb);
}
url_fseek(pb, end, SEEK_SET);
av_freep(&gxf->flt_entries);
av_freep(&gxf->map_offsets);
return 0;
}
static int gxf_parse_mpeg_frame(GXFStreamContext *sc, const uint8_t *buf, int size)
{
uint32_t c=-1;
int i;
for(i=0; i<size-4 && c!=0x100; i++){
c = (c<<8) + buf[i];
if(c == 0x1B8 && sc->first_gop_closed == -1) /* GOP start code */
sc->first_gop_closed= (buf[i+4]>>6)&1;
}
return (buf[i+1]>>3)&7;
}
static int gxf_write_media_preamble(AVFormatContext *s, AVPacket *pkt, int size)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
GXFStreamContext *sc = st->priv_data;
unsigned field_nb;
/* If the video is frame-encoded, the frame numbers shall be represented by
* even field numbers.
* see SMPTE360M-2004 6.4.2.1.3 Media field number */
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
field_nb = gxf->nb_fields;
} else {
field_nb = av_rescale_rnd(pkt->dts, gxf->time_base.den,
(int64_t)48000*gxf->time_base.num, AV_ROUND_UP);
}
put_byte(pb, sc->media_type);
put_byte(pb, st->index);
put_be32(pb, field_nb);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
put_be16(pb, 0);
put_be16(pb, size / 2);
} else if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
if (frame_type == FF_I_TYPE) {
put_byte(pb, 0x0d);
sc->iframes++;
} else if (frame_type == FF_B_TYPE) {
put_byte(pb, 0x0f);
sc->bframes++;
} else {
put_byte(pb, 0x0e);
sc->pframes++;
}
put_be24(pb, size);
} else if (st->codec->codec_id == CODEC_ID_DVVIDEO) {
put_byte(pb, size / 4096);
put_be24(pb, 0);
} else
put_be32(pb, size);
put_be32(pb, field_nb);
put_byte(pb, 1); /* flags */
put_byte(pb, 0); /* reserved */
return 16;
}
static int gxf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
GXFContext *gxf = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[pkt->stream_index];
int64_t pos = url_ftell(pb);
int padding = 0;
gxf_write_packet_header(pb, PKT_MEDIA);
if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO && pkt->size % 4) /* MPEG-2 frames must be padded */
padding = 4 - pkt->size % 4;
else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
padding = GXF_AUDIO_PACKET_SIZE - pkt->size;
gxf_write_media_preamble(s, pkt, pkt->size + padding);
put_buffer(pb, pkt->data, pkt->size);
gxf_write_padding(pb, padding);
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!(gxf->flt_entries_nb % 500)) {
gxf->flt_entries = av_realloc(gxf->flt_entries,
(gxf->flt_entries_nb+500)*sizeof(*gxf->flt_entries));
if (!gxf->flt_entries) {
av_log(s, AV_LOG_ERROR, "could not reallocate flt entries\n");
return -1;
}
}
gxf->flt_entries[gxf->flt_entries_nb++] = url_ftell(pb) / 1024;
gxf->nb_fields += 2; // count fields
}
updatePacketSize(pb, pos);
gxf->packet_count++;
if (gxf->packet_count == 100) {
gxf_write_map_packet(s, 0);
gxf->packet_count = 0;
}
put_flush_packet(pb);
return 0;
}
static int gxf_compare_field_nb(AVFormatContext *s, AVPacket *next, AVPacket *cur)
{
GXFContext *gxf = s->priv_data;
AVPacket *pkt[2] = { cur, next };
int i, field_nb[2];
GXFStreamContext *sc[2];
for (i = 0; i < 2; i++) {
AVStream *st = s->streams[pkt[i]->stream_index];
sc[i] = st->priv_data;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
field_nb[i] = av_rescale_rnd(pkt[i]->dts, gxf->time_base.den,
(int64_t)48000*gxf->time_base.num, AV_ROUND_UP);
field_nb[i] &= ~1; // compare against even field number because audio must be before video
} else
field_nb[i] = pkt[i]->dts; // dts are field based
}
return field_nb[1] > field_nb[0] ||
(field_nb[1] == field_nb[0] && sc[1]->order > sc[0]->order);
}
static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
{
if (pkt && s->streams[pkt->stream_index]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
pkt->duration = 2; // enforce 2 fields
return ff_audio_rechunk_interleave(s, out, pkt, flush,
av_interleave_packet_per_dts, gxf_compare_field_nb);
}
AVOutputFormat gxf_muxer = {
"gxf",
NULL_IF_CONFIG_SMALL("GXF format"),
NULL,
"gxf",
sizeof(GXFContext),
CODEC_ID_PCM_S16LE,
CODEC_ID_MPEG2VIDEO,
gxf_write_header,
gxf_write_packet,
gxf_write_trailer,
0,
NULL,
gxf_interleave_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/gxfenc.c | C | asf20 | 28,920 |
/*
* RTP packetization for AMR audio
* Copyright (c) 2007 Luca Abeni
* Copyright (c) 2009 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "rtpenc.h"
/**
* Packetize AMR frames into RTP packets according to RFC 3267,
* in octet-aligned mode.
*/
void ff_rtp_send_amr(AVFormatContext *s1, const uint8_t *buff, int size)
{
RTPMuxContext *s = s1->priv_data;
int max_header_toc_size = 1 + s->max_frames_per_packet;
uint8_t *p;
int len;
/* Test if the packet must be sent. */
len = s->buf_ptr - s->buf;
if (s->num_frames == s->max_frames_per_packet || (len && len + size - 1 > s->max_payload_size)) {
int header_size = s->num_frames + 1;
p = s->buf + max_header_toc_size - header_size;
if (p != s->buf)
memmove(p, s->buf, header_size);
ff_rtp_send_data(s1, p, s->buf_ptr - p, 1);
s->num_frames = 0;
}
if (!s->num_frames) {
s->buf[0] = 0xf0;
s->buf_ptr = s->buf + max_header_toc_size;
s->timestamp = s->cur_timestamp;
} else {
/* Mark the previous TOC entry as having more entries following. */
s->buf[1 + s->num_frames - 1] |= 0x80;
}
/* Copy the frame type and quality bits. */
s->buf[1 + s->num_frames++] = buff[0] & 0x7C;
buff++;
size--;
memcpy(s->buf_ptr, buff, size);
s->buf_ptr += size;
}
| 123linslouis-android-video-cutter | jni/libavformat/rtpenc_amr.c | C | asf20 | 2,139 |
/*
* copyright (c) 2007 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "rtpenc.h"
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size;
uint8_t *p;
const int max_frames_per_packet = s->max_frames_per_packet ? s->max_frames_per_packet : 5;
const int max_au_headers_size = 2 + 2 * max_frames_per_packet;
/* skip ADTS header, if present */
if ((s1->streams[0]->codec->extradata_size) == 0) {
size -= 7;
buff += 7;
}
max_packet_size = s->max_payload_size - max_au_headers_size;
/* test if the packet must be sent */
len = (s->buf_ptr - s->buf);
if ((s->num_frames == max_frames_per_packet) || (len && (len + size) > s->max_payload_size)) {
int au_size = s->num_frames * 2;
p = s->buf + max_au_headers_size - au_size - 2;
if (p != s->buf) {
memmove(p + 2, s->buf + 2, au_size);
}
/* Write the AU header size */
p[0] = ((au_size * 8) & 0xFF) >> 8;
p[1] = (au_size * 8) & 0xFF;
ff_rtp_send_data(s1, p, s->buf_ptr - p, 1);
s->num_frames = 0;
}
if (s->num_frames == 0) {
s->buf_ptr = s->buf + max_au_headers_size;
s->timestamp = s->cur_timestamp;
}
if (size <= max_packet_size) {
p = s->buf + s->num_frames++ * 2 + 2;
*p++ = size >> 5;
*p = (size & 0x1F) << 3;
memcpy(s->buf_ptr, buff, size);
s->buf_ptr += size;
} else {
int au_size = size;
max_packet_size = s->max_payload_size - 4;
p = s->buf;
p[0] = 0;
p[1] = 16;
while (size > 0) {
len = FFMIN(size, max_packet_size);
p[2] = au_size >> 5;
p[3] = (au_size & 0x1F) << 3;
memcpy(p + 4, buff, len);
ff_rtp_send_data(s1, p, len + 4, len == size);
size -= len;
buff += len;
}
}
}
| 123linslouis-android-video-cutter | jni/libavformat/rtpenc_aac.c | C | asf20 | 2,738 |
/*
* Beam Software SIFF demuxer
* Copyright (c) 2007 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
enum SIFFTags{
TAG_SIFF = MKTAG('S', 'I', 'F', 'F'),
TAG_BODY = MKTAG('B', 'O', 'D', 'Y'),
TAG_VBHD = MKTAG('V', 'B', 'H', 'D'),
TAG_SHDR = MKTAG('S', 'H', 'D', 'R'),
TAG_VBV1 = MKTAG('V', 'B', 'V', '1'),
TAG_SOUN = MKTAG('S', 'O', 'U', 'N'),
};
enum VBFlags{
VB_HAS_GMC = 0x01,
VB_HAS_AUDIO = 0x04,
VB_HAS_VIDEO = 0x08,
VB_HAS_PALETTE = 0x10,
VB_HAS_LENGTH = 0x20
};
typedef struct SIFFContext{
int frames;
int cur_frame;
int rate;
int bits;
int block_align;
int has_video;
int has_audio;
int curstrm;
int pktsize;
int gmcsize;
int sndsize;
int flags;
uint8_t gmc[4];
}SIFFContext;
static int siff_probe(AVProbeData *p)
{
uint32_t tag = AV_RL32(p->buf + 8);
/* check file header */
if (AV_RL32(p->buf) != TAG_SIFF ||
(tag != TAG_VBV1 && tag != TAG_SOUN))
return 0;
return AVPROBE_SCORE_MAX;
}
static int create_audio_stream(AVFormatContext *s, SIFFContext *c)
{
AVStream *ast;
ast = av_new_stream(s, 0);
if (!ast)
return -1;
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = CODEC_ID_PCM_U8;
ast->codec->channels = 1;
ast->codec->bits_per_coded_sample = c->bits;
ast->codec->sample_rate = c->rate;
ast->codec->frame_size = c->block_align;
av_set_pts_info(ast, 16, 1, c->rate);
return 0;
}
static int siff_parse_vbv1(AVFormatContext *s, SIFFContext *c, ByteIOContext *pb)
{
AVStream *st;
int width, height;
if (get_le32(pb) != TAG_VBHD){
av_log(s, AV_LOG_ERROR, "Header chunk is missing\n");
return -1;
}
if(get_be32(pb) != 32){
av_log(s, AV_LOG_ERROR, "Header chunk size is incorrect\n");
return -1;
}
if(get_le16(pb) != 1){
av_log(s, AV_LOG_ERROR, "Incorrect header version\n");
return -1;
}
width = get_le16(pb);
height = get_le16(pb);
url_fskip(pb, 4);
c->frames = get_le16(pb);
if(!c->frames){
av_log(s, AV_LOG_ERROR, "File contains no frames ???\n");
return -1;
}
c->bits = get_le16(pb);
c->rate = get_le16(pb);
c->block_align = c->rate * (c->bits >> 3);
url_fskip(pb, 16); //zeroes
st = av_new_stream(s, 0);
if (!st)
return -1;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_VB;
st->codec->codec_tag = MKTAG('V', 'B', 'V', '1');
st->codec->width = width;
st->codec->height = height;
st->codec->pix_fmt = PIX_FMT_PAL8;
av_set_pts_info(st, 16, 1, 12);
c->cur_frame = 0;
c->has_video = 1;
c->has_audio = !!c->rate;
c->curstrm = -1;
if (c->has_audio && create_audio_stream(s, c) < 0)
return -1;
return 0;
}
static int siff_parse_soun(AVFormatContext *s, SIFFContext *c, ByteIOContext *pb)
{
if (get_le32(pb) != TAG_SHDR){
av_log(s, AV_LOG_ERROR, "Header chunk is missing\n");
return -1;
}
if(get_be32(pb) != 8){
av_log(s, AV_LOG_ERROR, "Header chunk size is incorrect\n");
return -1;
}
url_fskip(pb, 4); //unknown value
c->rate = get_le16(pb);
c->bits = get_le16(pb);
c->block_align = c->rate * (c->bits >> 3);
return create_audio_stream(s, c);
}
static int siff_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
SIFFContext *c = s->priv_data;
uint32_t tag;
if (get_le32(pb) != TAG_SIFF)
return -1;
url_fskip(pb, 4); //ignore size
tag = get_le32(pb);
if (tag != TAG_VBV1 && tag != TAG_SOUN){
av_log(s, AV_LOG_ERROR, "Not a VBV file\n");
return -1;
}
if (tag == TAG_VBV1 && siff_parse_vbv1(s, c, pb) < 0)
return -1;
if (tag == TAG_SOUN && siff_parse_soun(s, c, pb) < 0)
return -1;
if (get_le32(pb) != MKTAG('B', 'O', 'D', 'Y')){
av_log(s, AV_LOG_ERROR, "'BODY' chunk is missing\n");
return -1;
}
url_fskip(pb, 4); //ignore size
return 0;
}
static int siff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SIFFContext *c = s->priv_data;
int size;
if (c->has_video){
if (c->cur_frame >= c->frames)
return AVERROR(EIO);
if (c->curstrm == -1){
c->pktsize = get_le32(s->pb) - 4;
c->flags = get_le16(s->pb);
c->gmcsize = (c->flags & VB_HAS_GMC) ? 4 : 0;
if (c->gmcsize)
get_buffer(s->pb, c->gmc, c->gmcsize);
c->sndsize = (c->flags & VB_HAS_AUDIO) ? get_le32(s->pb): 0;
c->curstrm = !!(c->flags & VB_HAS_AUDIO);
}
if (!c->curstrm){
size = c->pktsize - c->sndsize;
if (av_new_packet(pkt, size) < 0)
return AVERROR(ENOMEM);
AV_WL16(pkt->data, c->flags);
if (c->gmcsize)
memcpy(pkt->data + 2, c->gmc, c->gmcsize);
get_buffer(s->pb, pkt->data + 2 + c->gmcsize, size - c->gmcsize - 2);
pkt->stream_index = 0;
c->curstrm = -1;
}else{
if (av_get_packet(s->pb, pkt, c->sndsize - 4) < 0)
return AVERROR(EIO);
pkt->stream_index = 1;
c->curstrm = 0;
}
if(!c->cur_frame || c->curstrm)
pkt->flags |= AV_PKT_FLAG_KEY;
if (c->curstrm == -1)
c->cur_frame++;
}else{
size = av_get_packet(s->pb, pkt, c->block_align);
if(size <= 0)
return AVERROR(EIO);
}
return pkt->size;
}
AVInputFormat siff_demuxer = {
"siff",
NULL_IF_CONFIG_SMALL("Beam Software SIFF"),
sizeof(SIFFContext),
siff_probe,
siff_read_header,
siff_read_packet,
.extensions = "vb,son"
};
| 123linslouis-android-video-cutter | jni/libavformat/siff.c | C | asf20 | 6,691 |
/*
* Sierra VMD Format Demuxer
* Copyright (c) 2004 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Sierra VMD file demuxer
* by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
* for more information on the Sierra VMD file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define VMD_HEADER_SIZE 0x0330
#define BYTES_PER_FRAME_RECORD 16
typedef struct {
int stream_index;
int64_t frame_offset;
unsigned int frame_size;
int64_t pts;
int keyframe;
unsigned char frame_record[BYTES_PER_FRAME_RECORD];
} vmd_frame;
typedef struct VmdDemuxContext {
int video_stream_index;
int audio_stream_index;
unsigned int frame_count;
unsigned int frames_per_block;
vmd_frame *frame_table;
unsigned int current_frame;
int is_indeo3;
int sample_rate;
int64_t audio_sample_counter;
int skiphdr;
unsigned char vmd_header[VMD_HEADER_SIZE];
} VmdDemuxContext;
static int vmd_probe(AVProbeData *p)
{
int w, h;
if (p->buf_size < 16)
return 0;
/* check if the first 2 bytes of the file contain the appropriate size
* of a VMD header chunk */
if (AV_RL16(&p->buf[0]) != VMD_HEADER_SIZE - 2)
return 0;
w = AV_RL16(&p->buf[12]);
h = AV_RL16(&p->buf[14]);
if (!w || w > 2048 || !h || h > 2048)
return 0;
/* only return half certainty since this check is a bit sketchy */
return AVPROBE_SCORE_MAX / 2;
}
static int vmd_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
VmdDemuxContext *vmd = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st = NULL, *vst;
unsigned int toc_offset;
unsigned char *raw_frame_table;
int raw_frame_table_size;
int64_t current_offset;
int i, j;
unsigned int total_frames;
int64_t current_audio_pts = 0;
unsigned char chunk[BYTES_PER_FRAME_RECORD];
int num, den;
int sound_buffers;
/* fetch the main header, including the 2 header length bytes */
url_fseek(pb, 0, SEEK_SET);
if (get_buffer(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE)
return AVERROR(EIO);
if(vmd->vmd_header[16] == 'i' && vmd->vmd_header[17] == 'v' && vmd->vmd_header[18] == '3')
vmd->is_indeo3 = 1;
else
vmd->is_indeo3 = 0;
/* start up the decoders */
vst = av_new_stream(s, 0);
if (!vst)
return AVERROR(ENOMEM);
av_set_pts_info(vst, 33, 1, 10);
vmd->video_stream_index = vst->index;
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = vmd->is_indeo3 ? CODEC_ID_INDEO3 : CODEC_ID_VMDVIDEO;
vst->codec->codec_tag = 0; /* no fourcc */
vst->codec->width = AV_RL16(&vmd->vmd_header[12]);
vst->codec->height = AV_RL16(&vmd->vmd_header[14]);
if(vmd->is_indeo3 && vst->codec->width > 320){
vst->codec->width >>= 1;
vst->codec->height >>= 1;
}
vst->codec->extradata_size = VMD_HEADER_SIZE;
vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);
/* if sample rate is 0, assume no audio */
vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]);
if (vmd->sample_rate) {
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
vmd->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_VMDAUDIO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
st->codec->sample_rate = vmd->sample_rate;
st->codec->block_align = AV_RL16(&vmd->vmd_header[806]);
if (st->codec->block_align & 0x8000) {
st->codec->bits_per_coded_sample = 16;
st->codec->block_align = -(st->codec->block_align - 0x10000);
} else {
st->codec->bits_per_coded_sample = 8;
}
st->codec->bit_rate = st->codec->sample_rate *
st->codec->bits_per_coded_sample * st->codec->channels;
/* calculate pts */
num = st->codec->block_align;
den = st->codec->sample_rate * st->codec->channels;
av_reduce(&den, &num, den, num, (1UL<<31)-1);
av_set_pts_info(vst, 33, num, den);
av_set_pts_info(st, 33, num, den);
}
toc_offset = AV_RL32(&vmd->vmd_header[812]);
vmd->frame_count = AV_RL16(&vmd->vmd_header[6]);
vmd->frames_per_block = AV_RL16(&vmd->vmd_header[18]);
url_fseek(pb, toc_offset, SEEK_SET);
raw_frame_table = NULL;
vmd->frame_table = NULL;
sound_buffers = AV_RL16(&vmd->vmd_header[808]);
raw_frame_table_size = vmd->frame_count * 6;
if(vmd->frame_count * vmd->frames_per_block >= UINT_MAX / sizeof(vmd_frame) - sound_buffers){
av_log(s, AV_LOG_ERROR, "vmd->frame_count * vmd->frames_per_block too large\n");
return -1;
}
raw_frame_table = av_malloc(raw_frame_table_size);
vmd->frame_table = av_malloc((vmd->frame_count * vmd->frames_per_block + sound_buffers) * sizeof(vmd_frame));
if (!raw_frame_table || !vmd->frame_table) {
av_free(raw_frame_table);
av_free(vmd->frame_table);
return AVERROR(ENOMEM);
}
if (get_buffer(pb, raw_frame_table, raw_frame_table_size) !=
raw_frame_table_size) {
av_free(raw_frame_table);
av_free(vmd->frame_table);
return AVERROR(EIO);
}
total_frames = 0;
for (i = 0; i < vmd->frame_count; i++) {
current_offset = AV_RL32(&raw_frame_table[6 * i + 2]);
/* handle each entry in index block */
for (j = 0; j < vmd->frames_per_block; j++) {
int type;
uint32_t size;
get_buffer(pb, chunk, BYTES_PER_FRAME_RECORD);
type = chunk[0];
size = AV_RL32(&chunk[2]);
if(!size && type != 1)
continue;
switch(type) {
case 1: /* Audio Chunk */
if (!st) break;
/* first audio chunk contains several audio buffers */
vmd->frame_table[total_frames].frame_offset = current_offset;
vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
vmd->frame_table[total_frames].frame_size = size;
memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
vmd->frame_table[total_frames].pts = current_audio_pts;
total_frames++;
if(!current_audio_pts)
current_audio_pts += sound_buffers;
else
current_audio_pts++;
break;
case 2: /* Video Chunk */
vmd->frame_table[total_frames].frame_offset = current_offset;
vmd->frame_table[total_frames].stream_index = vmd->video_stream_index;
vmd->frame_table[total_frames].frame_size = size;
memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
vmd->frame_table[total_frames].pts = i;
total_frames++;
break;
}
current_offset += size;
}
}
av_free(raw_frame_table);
vmd->current_frame = 0;
vmd->frame_count = total_frames;
return 0;
}
static int vmd_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
VmdDemuxContext *vmd = s->priv_data;
ByteIOContext *pb = s->pb;
int ret = 0;
vmd_frame *frame;
if (vmd->current_frame >= vmd->frame_count)
return AVERROR(EIO);
frame = &vmd->frame_table[vmd->current_frame];
/* position the stream (will probably be there already) */
url_fseek(pb, frame->frame_offset, SEEK_SET);
if (av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD))
return AVERROR(ENOMEM);
pkt->pos= url_ftell(pb);
memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD);
if(vmd->is_indeo3)
ret = get_buffer(pb, pkt->data, frame->frame_size);
else
ret = get_buffer(pb, pkt->data + BYTES_PER_FRAME_RECORD,
frame->frame_size);
if (ret != frame->frame_size) {
av_free_packet(pkt);
ret = AVERROR(EIO);
}
pkt->stream_index = frame->stream_index;
pkt->pts = frame->pts;
av_log(s, AV_LOG_DEBUG, " dispatching %s frame with %d bytes and pts %"PRId64"\n",
(frame->frame_record[0] == 0x02) ? "video" : "audio",
frame->frame_size + BYTES_PER_FRAME_RECORD,
pkt->pts);
vmd->current_frame++;
return ret;
}
static int vmd_read_close(AVFormatContext *s)
{
VmdDemuxContext *vmd = s->priv_data;
av_free(vmd->frame_table);
return 0;
}
AVInputFormat vmd_demuxer = {
"vmd",
NULL_IF_CONFIG_SMALL("Sierra VMD format"),
sizeof(VmdDemuxContext),
vmd_probe,
vmd_read_header,
vmd_read_packet,
vmd_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/sierravmd.c | C | asf20 | 9,788 |
/*
* "Real" compatible muxer.
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "rm.h"
typedef struct {
int nb_packets;
int packet_total_size;
int packet_max_size;
/* codec related output */
int bit_rate;
float frame_rate;
int nb_frames; /* current frame number */
int total_frames; /* total number of frames */
int num;
AVCodecContext *enc;
} StreamInfo;
typedef struct {
StreamInfo streams[2];
StreamInfo *audio_stream, *video_stream;
int data_pos; /* position of the data after the header */
} RMMuxContext;
/* in ms */
#define BUFFER_DURATION 0
static void put_str(ByteIOContext *s, const char *tag)
{
put_be16(s,strlen(tag));
while (*tag) {
put_byte(s, *tag++);
}
}
static void put_str8(ByteIOContext *s, const char *tag)
{
put_byte(s, strlen(tag));
while (*tag) {
put_byte(s, *tag++);
}
}
static void rv10_write_header(AVFormatContext *ctx,
int data_size, int index_pos)
{
RMMuxContext *rm = ctx->priv_data;
ByteIOContext *s = ctx->pb;
StreamInfo *stream;
unsigned char *data_offset_ptr, *start_ptr;
const char *desc, *mimetype;
int nb_packets, packet_total_size, packet_max_size, size, packet_avg_size, i;
int bit_rate, v, duration, flags, data_pos;
AVMetadataTag *tag;
start_ptr = s->buf_ptr;
put_tag(s, ".RMF");
put_be32(s,18); /* header size */
put_be16(s,0);
put_be32(s,0);
put_be32(s,4 + ctx->nb_streams); /* num headers */
put_tag(s,"PROP");
put_be32(s, 50);
put_be16(s, 0);
packet_max_size = 0;
packet_total_size = 0;
nb_packets = 0;
bit_rate = 0;
duration = 0;
for(i=0;i<ctx->nb_streams;i++) {
StreamInfo *stream = &rm->streams[i];
bit_rate += stream->bit_rate;
if (stream->packet_max_size > packet_max_size)
packet_max_size = stream->packet_max_size;
nb_packets += stream->nb_packets;
packet_total_size += stream->packet_total_size;
/* select maximum duration */
v = (int) (1000.0 * (float)stream->total_frames / stream->frame_rate);
if (v > duration)
duration = v;
}
put_be32(s, bit_rate); /* max bit rate */
put_be32(s, bit_rate); /* avg bit rate */
put_be32(s, packet_max_size); /* max packet size */
if (nb_packets > 0)
packet_avg_size = packet_total_size / nb_packets;
else
packet_avg_size = 0;
put_be32(s, packet_avg_size); /* avg packet size */
put_be32(s, nb_packets); /* num packets */
put_be32(s, duration); /* duration */
put_be32(s, BUFFER_DURATION); /* preroll */
put_be32(s, index_pos); /* index offset */
/* computation of data the data offset */
data_offset_ptr = s->buf_ptr;
put_be32(s, 0); /* data offset : will be patched after */
put_be16(s, ctx->nb_streams); /* num streams */
flags = 1 | 2; /* save allowed & perfect play */
if (url_is_streamed(s))
flags |= 4; /* live broadcast */
put_be16(s, flags);
/* comments */
put_tag(s,"CONT");
size = 4 * 2 + 10;
for(i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
tag = av_metadata_get(ctx->metadata, ff_rm_metadata[i], NULL, 0);
if(tag) size += strlen(tag->value);
}
put_be32(s,size);
put_be16(s,0);
for(i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
tag = av_metadata_get(ctx->metadata, ff_rm_metadata[i], NULL, 0);
put_str(s, tag ? tag->value : "");
}
for(i=0;i<ctx->nb_streams;i++) {
int codec_data_size;
stream = &rm->streams[i];
if (stream->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
desc = "The Audio Stream";
mimetype = "audio/x-pn-realaudio";
codec_data_size = 73;
} else {
desc = "The Video Stream";
mimetype = "video/x-pn-realvideo";
codec_data_size = 34;
}
put_tag(s,"MDPR");
size = 10 + 9 * 4 + strlen(desc) + strlen(mimetype) + codec_data_size;
put_be32(s, size);
put_be16(s, 0);
put_be16(s, i); /* stream number */
put_be32(s, stream->bit_rate); /* max bit rate */
put_be32(s, stream->bit_rate); /* avg bit rate */
put_be32(s, stream->packet_max_size); /* max packet size */
if (stream->nb_packets > 0)
packet_avg_size = stream->packet_total_size /
stream->nb_packets;
else
packet_avg_size = 0;
put_be32(s, packet_avg_size); /* avg packet size */
put_be32(s, 0); /* start time */
put_be32(s, BUFFER_DURATION); /* preroll */
/* duration */
if (url_is_streamed(s) || !stream->total_frames)
put_be32(s, (int)(3600 * 1000));
else
put_be32(s, (int)(stream->total_frames * 1000 / stream->frame_rate));
put_str8(s, desc);
put_str8(s, mimetype);
put_be32(s, codec_data_size);
if (stream->enc->codec_type == AVMEDIA_TYPE_AUDIO) {
int coded_frame_size, fscode, sample_rate;
sample_rate = stream->enc->sample_rate;
coded_frame_size = (stream->enc->bit_rate *
stream->enc->frame_size) / (8 * sample_rate);
/* audio codec info */
put_tag(s, ".ra");
put_byte(s, 0xfd);
put_be32(s, 0x00040000); /* version */
put_tag(s, ".ra4");
put_be32(s, 0x01b53530); /* stream length */
put_be16(s, 4); /* unknown */
put_be32(s, 0x39); /* header size */
switch(sample_rate) {
case 48000:
case 24000:
case 12000:
fscode = 1;
break;
default:
case 44100:
case 22050:
case 11025:
fscode = 2;
break;
case 32000:
case 16000:
case 8000:
fscode = 3;
}
put_be16(s, fscode); /* codec additional info, for AC-3, seems
to be a frequency code */
/* special hack to compensate rounding errors... */
if (coded_frame_size == 557)
coded_frame_size--;
put_be32(s, coded_frame_size); /* frame length */
put_be32(s, 0x51540); /* unknown */
put_be32(s, 0x249f0); /* unknown */
put_be32(s, 0x249f0); /* unknown */
put_be16(s, 0x01);
/* frame length : seems to be very important */
put_be16(s, coded_frame_size);
put_be32(s, 0); /* unknown */
put_be16(s, stream->enc->sample_rate); /* sample rate */
put_be32(s, 0x10); /* unknown */
put_be16(s, stream->enc->channels);
put_str8(s, "Int0"); /* codec name */
put_str8(s, "dnet"); /* codec name */
put_be16(s, 0); /* title length */
put_be16(s, 0); /* author length */
put_be16(s, 0); /* copyright length */
put_byte(s, 0); /* end of header */
} else {
/* video codec info */
put_be32(s,34); /* size */
if(stream->enc->codec_id == CODEC_ID_RV10)
put_tag(s,"VIDORV10");
else
put_tag(s,"VIDORV20");
put_be16(s, stream->enc->width);
put_be16(s, stream->enc->height);
put_be16(s, (int) stream->frame_rate); /* frames per seconds ? */
put_be32(s,0); /* unknown meaning */
put_be16(s, (int) stream->frame_rate); /* unknown meaning */
put_be32(s,0); /* unknown meaning */
put_be16(s, 8); /* unknown meaning */
/* Seems to be the codec version: only use basic H263. The next
versions seems to add a diffential DC coding as in
MPEG... nothing new under the sun */
if(stream->enc->codec_id == CODEC_ID_RV10)
put_be32(s,0x10000000);
else
put_be32(s,0x20103001);
//put_be32(s,0x10003000);
}
}
/* patch data offset field */
data_pos = s->buf_ptr - start_ptr;
rm->data_pos = data_pos;
data_offset_ptr[0] = data_pos >> 24;
data_offset_ptr[1] = data_pos >> 16;
data_offset_ptr[2] = data_pos >> 8;
data_offset_ptr[3] = data_pos;
/* data stream */
put_tag(s,"DATA");
put_be32(s,data_size + 10 + 8);
put_be16(s,0);
put_be32(s, nb_packets); /* number of packets */
put_be32(s,0); /* next data header */
}
static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
int length, int key_frame)
{
int timestamp;
ByteIOContext *s = ctx->pb;
stream->nb_packets++;
stream->packet_total_size += length;
if (length > stream->packet_max_size)
stream->packet_max_size = length;
put_be16(s,0); /* version */
put_be16(s,length + 12);
put_be16(s, stream->num); /* stream number */
timestamp = (1000 * (float)stream->nb_frames) / stream->frame_rate;
put_be32(s, timestamp); /* timestamp */
put_byte(s, 0); /* reserved */
put_byte(s, key_frame ? 2 : 0); /* flags */
}
static int rm_write_header(AVFormatContext *s)
{
RMMuxContext *rm = s->priv_data;
StreamInfo *stream;
int n;
AVCodecContext *codec;
for(n=0;n<s->nb_streams;n++) {
s->streams[n]->id = n;
codec = s->streams[n]->codec;
stream = &rm->streams[n];
memset(stream, 0, sizeof(StreamInfo));
stream->num = n;
stream->bit_rate = codec->bit_rate;
stream->enc = codec;
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
rm->audio_stream = stream;
stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
/* XXX: dummy values */
stream->packet_max_size = 1024;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
case AVMEDIA_TYPE_VIDEO:
rm->video_stream = stream;
stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
/* XXX: dummy values */
stream->packet_max_size = 4096;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
default:
return -1;
}
}
rv10_write_header(s, 0, 0);
put_flush_packet(s->pb);
return 0;
}
static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags)
{
uint8_t *buf1;
RMMuxContext *rm = s->priv_data;
ByteIOContext *pb = s->pb;
StreamInfo *stream = rm->audio_stream;
int i;
/* XXX: suppress this malloc */
buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
write_packet_header(s, stream, size, !!(flags & AV_PKT_FLAG_KEY));
/* for AC-3, the words seem to be reversed */
for(i=0;i<size;i+=2) {
buf1[i] = buf[i+1];
buf1[i+1] = buf[i];
}
put_buffer(pb, buf1, size);
put_flush_packet(pb);
stream->nb_frames++;
av_free(buf1);
return 0;
}
static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int flags)
{
RMMuxContext *rm = s->priv_data;
ByteIOContext *pb = s->pb;
StreamInfo *stream = rm->video_stream;
int key_frame = !!(flags & AV_PKT_FLAG_KEY);
/* XXX: this is incorrect: should be a parameter */
/* Well, I spent some time finding the meaning of these bits. I am
not sure I understood everything, but it works !! */
#if 1
write_packet_header(s, stream, size + 7 + (size >= 0x4000)*4, key_frame);
/* bit 7: '1' if final packet of a frame converted in several packets */
put_byte(pb, 0x81);
/* bit 7: '1' if I frame. bits 6..0 : sequence number in current
frame starting from 1 */
if (key_frame) {
put_byte(pb, 0x81);
} else {
put_byte(pb, 0x01);
}
if(size >= 0x4000){
put_be32(pb, size); /* total frame size */
put_be32(pb, size); /* offset from the start or the end */
}else{
put_be16(pb, 0x4000 | size); /* total frame size */
put_be16(pb, 0x4000 | size); /* offset from the start or the end */
}
#else
/* full frame */
write_packet_header(s, size + 6);
put_byte(pb, 0xc0);
put_be16(pb, 0x4000 + size); /* total frame size */
put_be16(pb, 0x4000 + packet_number * 126); /* position in stream */
#endif
put_byte(pb, stream->nb_frames & 0xff);
put_buffer(pb, buf, size);
put_flush_packet(pb);
stream->nb_frames++;
return 0;
}
static int rm_write_packet(AVFormatContext *s, AVPacket *pkt)
{
if (s->streams[pkt->stream_index]->codec->codec_type ==
AVMEDIA_TYPE_AUDIO)
return rm_write_audio(s, pkt->data, pkt->size, pkt->flags);
else
return rm_write_video(s, pkt->data, pkt->size, pkt->flags);
}
static int rm_write_trailer(AVFormatContext *s)
{
RMMuxContext *rm = s->priv_data;
int data_size, index_pos, i;
ByteIOContext *pb = s->pb;
if (!url_is_streamed(s->pb)) {
/* end of file: finish to write header */
index_pos = url_fseek(pb, 0, SEEK_CUR);
data_size = index_pos - rm->data_pos;
/* FIXME: write index */
/* undocumented end header */
put_be32(pb, 0);
put_be32(pb, 0);
url_fseek(pb, 0, SEEK_SET);
for(i=0;i<s->nb_streams;i++)
rm->streams[i].total_frames = rm->streams[i].nb_frames;
rv10_write_header(s, data_size, 0);
} else {
/* undocumented end header */
put_be32(pb, 0);
put_be32(pb, 0);
}
put_flush_packet(pb);
return 0;
}
AVOutputFormat rm_muxer = {
"rm",
NULL_IF_CONFIG_SMALL("RealMedia format"),
"application/vnd.rn-realmedia",
"rm,ra",
sizeof(RMMuxContext),
CODEC_ID_AC3,
CODEC_ID_RV10,
rm_write_header,
rm_write_packet,
rm_write_trailer,
};
| 123linslouis-android-video-cutter | jni/libavformat/rmenc.c | C | asf20 | 14,971 |
/*
* RTP muxer definitions
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTPENC_H
#define AVFORMAT_RTPENC_H
#include "avformat.h"
#include "rtp.h"
struct RTPMuxContext {
AVFormatContext *ic;
AVStream *st;
int payload_type;
uint32_t ssrc;
uint16_t seq;
uint32_t timestamp;
uint32_t base_timestamp;
uint32_t cur_timestamp;
int max_payload_size;
int num_frames;
/* rtcp sender statistics receive */
int64_t last_rtcp_ntp_time; // TODO: move into statistics
int64_t first_rtcp_ntp_time; // TODO: move into statistics
/* rtcp sender statistics */
unsigned int packet_count; // TODO: move into statistics (outgoing)
unsigned int octet_count; // TODO: move into statistics (outgoing)
unsigned int last_octet_count; // TODO: move into statistics (outgoing)
int first_packet;
/* buffer for output */
uint8_t *buf;
uint8_t *buf_ptr;
int max_frames_per_packet;
};
typedef struct RTPMuxContext RTPMuxContext;
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m);
void ff_rtp_send_h264(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_amr(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_mpegvideo(AVFormatContext *s1, const uint8_t *buf1, int size);
#endif /* AVFORMAT_RTPENC_H */
| 123linslouis-android-video-cutter | jni/libavformat/rtpenc.h | C | asf20 | 2,271 |
/*
* RTP definitions
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTP_H
#define AVFORMAT_RTP_H
#include "libavcodec/avcodec.h"
/**
* Return the payload type for a given codec.
*
* @param codec The context of the codec
* @return In case of unknown payload type or dynamic payload type, a
* negative value is returned; otherwise, the payload type (the 'PT' field
* in the RTP header) is returned.
*/
int ff_rtp_get_payload_type(AVCodecContext *codec);
/**
* Initialize a codec context based on the payload type.
*
* Fill the codec_type and codec_id fields of a codec context with
* information depending on the payload type; for audio codecs, the
* channels and sample_rate fields are also filled.
*
* @param codec The context of the codec
* @param payload_type The payload type (the 'PT' field in the RTP header)
* @return In case of unknown payload type or dynamic payload type, a
* negative value is returned; otherwise, 0 is returned
*/
int ff_rtp_get_codec_info(AVCodecContext *codec, int payload_type);
/**
* Return the encoding name (as defined in
* http://www.iana.org/assignments/rtp-parameters) for a given payload type.
*
* @param payload_type The payload type (the 'PT' field in the RTP header)
* @return In case of unknown payload type or dynamic payload type, a pointer
* to an empty string is returned; otherwise, a pointer to a string containing
* the encoding name is returned
*/
const char *ff_rtp_enc_name(int payload_type);
/**
* Return the codec id for the given encoding name and codec type.
*
* @param buf A pointer to the string containing the encoding name
* @param codec_type The codec type
* @return In case of unknown encoding name, CODEC_ID_NONE is returned;
* otherwise, the codec id is returned
*/
enum CodecID ff_rtp_codec_id(const char *buf, enum AVMediaType codec_type);
#define RTP_PT_PRIVATE 96
#define RTP_VERSION 2
#define RTP_MAX_SDES 256 /**< maximum text length for SDES */
/* RTCP paquets use 0.5 % of the bandwidth */
#define RTCP_TX_RATIO_NUM 5
#define RTCP_TX_RATIO_DEN 1000
#endif /* AVFORMAT_RTP_H */
| 123linslouis-android-video-cutter | jni/libavformat/rtp.h | C | asf20 | 2,863 |
/*
* Copyright (c) 2009 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <strings.h>
#include "avformat.h"
#include "metadata.h"
#include "libavutil/avstring.h"
#if LIBAVFORMAT_VERSION_MAJOR < 53
#define SIZE_OFFSET(x) sizeof(((AVFormatContext*)0)->x),offsetof(AVFormatContext,x)
static const struct {
const char name[16];
int size;
int offset;
} compat_tab[] = {
{ "title", SIZE_OFFSET(title) },
{ "author", SIZE_OFFSET(author) },
{ "copyright", SIZE_OFFSET(copyright) },
{ "comment", SIZE_OFFSET(comment) },
{ "album", SIZE_OFFSET(album) },
{ "year", SIZE_OFFSET(year) },
{ "track", SIZE_OFFSET(track) },
{ "genre", SIZE_OFFSET(genre) },
{ "artist", SIZE_OFFSET(author) },
{ "creator", SIZE_OFFSET(author) },
{ "written_by", SIZE_OFFSET(author) },
{ "lead_performer", SIZE_OFFSET(author) },
{ "composer", SIZE_OFFSET(author) },
{ "performer", SIZE_OFFSET(author) },
{ "description", SIZE_OFFSET(comment) },
{ "albumtitle", SIZE_OFFSET(album) },
{ "date", SIZE_OFFSET(year) },
{ "date_written", SIZE_OFFSET(year) },
{ "date_released", SIZE_OFFSET(year) },
{ "tracknumber", SIZE_OFFSET(track) },
{ "part_number", SIZE_OFFSET(track) },
};
void ff_metadata_demux_compat(AVFormatContext *ctx)
{
AVMetadata *m;
int i, j;
if ((m = ctx->metadata))
for (j=0; j<m->count; j++)
for (i=0; i<FF_ARRAY_ELEMS(compat_tab); i++)
if (!strcasecmp(m->elems[j].key, compat_tab[i].name)) {
int *ptr = (int *)((char *)ctx+compat_tab[i].offset);
if (*ptr) continue;
if (compat_tab[i].size > sizeof(int))
av_strlcpy((char *)ptr, m->elems[j].value, compat_tab[i].size);
else
*ptr = atoi(m->elems[j].value);
}
for (i=0; i<ctx->nb_chapters; i++)
if ((m = ctx->chapters[i]->metadata))
for (j=0; j<m->count; j++)
if (!strcasecmp(m->elems[j].key, "title")) {
av_free(ctx->chapters[i]->title);
ctx->chapters[i]->title = av_strdup(m->elems[j].value);
}
for (i=0; i<ctx->nb_programs; i++)
if ((m = ctx->programs[i]->metadata))
for (j=0; j<m->count; j++) {
if (!strcasecmp(m->elems[j].key, "name")) {
av_free(ctx->programs[i]->name);
ctx->programs[i]->name = av_strdup(m->elems[j].value);
}
if (!strcasecmp(m->elems[j].key, "provider_name")) {
av_free(ctx->programs[i]->provider_name);
ctx->programs[i]->provider_name = av_strdup(m->elems[j].value);
}
}
for (i=0; i<ctx->nb_streams; i++)
if ((m = ctx->streams[i]->metadata))
for (j=0; j<m->count; j++) {
if (!strcasecmp(m->elems[j].key, "language"))
av_strlcpy(ctx->streams[i]->language, m->elems[j].value, 4);
if (!strcasecmp(m->elems[j].key, "filename")) {
av_free(ctx->streams[i]->filename);
ctx->streams[i]->filename= av_strdup(m->elems[j].value);
}
}
}
#define FILL_METADATA(s, key, value) { \
if (value && *value && !av_metadata_get(s->metadata, #key, NULL, 0)) \
av_metadata_set2(&s->metadata, #key, value, 0); \
}
#define FILL_METADATA_STR(s, key) FILL_METADATA(s, key, s->key)
#define FILL_METADATA_INT(s, key) { \
char number[10]; \
snprintf(number, sizeof(number), "%d", s->key); \
if(s->key) FILL_METADATA(s, key, number) }
void ff_metadata_mux_compat(AVFormatContext *ctx)
{
int i;
if (ctx->metadata && ctx->metadata->count > 0)
return;
FILL_METADATA_STR(ctx, title);
FILL_METADATA_STR(ctx, author);
FILL_METADATA_STR(ctx, copyright);
FILL_METADATA_STR(ctx, comment);
FILL_METADATA_STR(ctx, album);
FILL_METADATA_INT(ctx, year);
FILL_METADATA_INT(ctx, track);
FILL_METADATA_STR(ctx, genre);
for (i=0; i<ctx->nb_chapters; i++)
FILL_METADATA_STR(ctx->chapters[i], title);
for (i=0; i<ctx->nb_programs; i++) {
FILL_METADATA_STR(ctx->programs[i], name);
FILL_METADATA_STR(ctx->programs[i], provider_name);
}
for (i=0; i<ctx->nb_streams; i++) {
FILL_METADATA_STR(ctx->streams[i], language);
FILL_METADATA_STR(ctx->streams[i], filename);
}
}
#endif /* LIBAVFORMAT_VERSION_MAJOR < 53 */
| 123linslouis-android-video-cutter | jni/libavformat/metadata_compat.c | C | asf20 | 5,744 |
/*
* seek utility functions for use within format handlers
*
* Copyright (c) 2009 Ivan Schreter
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "seek.h"
#include "libavutil/mem.h"
#include "internal.h"
// NOTE: implementation should be moved here in another patch, to keep patches
// separated.
/**
* helper structure describing keyframe search state of one stream
*/
typedef struct {
int64_t pos_lo; ///< position of the frame with low timestamp in file or INT64_MAX if not found (yet)
int64_t ts_lo; ///< frame presentation timestamp or same as pos_lo for byte seeking
int64_t pos_hi; ///< position of the frame with high timestamp in file or INT64_MAX if not found (yet)
int64_t ts_hi; ///< frame presentation timestamp or same as pos_hi for byte seeking
int64_t last_pos; ///< last known position of a frame, for multi-frame packets
int64_t term_ts; ///< termination timestamp (which TS we already read)
AVRational term_ts_tb; ///< timebase for term_ts
int64_t first_ts; ///< first packet timestamp in this iteration (to fill term_ts later)
AVRational first_ts_tb; ///< timebase for first_ts
int terminated; ///< termination flag for the current iteration
} AVSyncPoint;
/**
* Compute a distance between timestamps.
*
* Distances are only comparable, if same time bases are used for computing
* distances.
*
* @param ts_hi high timestamp
* @param tb_hi high timestamp time base
* @param ts_lo low timestamp
* @param tb_lo low timestamp time base
* @return representation of distance between high and low timestamps
*/
static int64_t ts_distance(int64_t ts_hi,
AVRational tb_hi,
int64_t ts_lo,
AVRational tb_lo)
{
int64_t hi, lo;
hi = ts_hi * tb_hi.num * tb_lo.den;
lo = ts_lo * tb_lo.num * tb_hi.den;
return hi - lo;
}
/**
* Partial search for keyframes in multiple streams.
*
* This routine searches in each stream for the next lower and the next higher
* timestamp compared to the given target timestamp. The search starts at the current
* file position and ends at the file position, where all streams have already been
* examined (or when all higher key frames are found in the first iteration).
*
* This routine is called iteratively with an exponential backoff to find the lower
* timestamp.
*
* @param s format context
* @param timestamp target timestamp (or position, if AVSEEK_FLAG_BYTE)
* @param timebase time base for timestamps
* @param flags seeking flags
* @param sync array with information per stream
* @param keyframes_to_find count of keyframes to find in total
* @param found_lo ptr to the count of already found low timestamp keyframes
* @param found_hi ptr to the count of already found high timestamp keyframes
* @param first_iter flag for first iteration
*/
static void search_hi_lo_keyframes(AVFormatContext *s,
int64_t timestamp,
AVRational timebase,
int flags,
AVSyncPoint *sync,
int keyframes_to_find,
int *found_lo,
int *found_hi,
int first_iter)
{
AVPacket pkt;
AVSyncPoint *sp;
AVStream *st;
int idx;
int flg;
int terminated_count = 0;
int64_t pos;
int64_t pts, dts; // PTS/DTS from stream
int64_t ts; // PTS in stream-local time base or position for byte seeking
AVRational ts_tb; // Time base of the stream or 1:1 for byte seeking
for (;;) {
if (av_read_frame(s, &pkt) < 0) {
// EOF or error, make sure high flags are set
for (idx = 0; idx < s->nb_streams; ++idx) {
if (s->streams[idx]->discard < AVDISCARD_ALL) {
sp = &sync[idx];
if (sp->pos_hi == INT64_MAX) {
// no high frame exists for this stream
(*found_hi)++;
sp->ts_hi = INT64_MAX;
sp->pos_hi = INT64_MAX - 1;
}
}
}
break;
}
idx = pkt.stream_index;
st = s->streams[idx];
if (st->discard >= AVDISCARD_ALL)
// this stream is not active, skip packet
continue;
sp = &sync[idx];
flg = pkt.flags;
pos = pkt.pos;
pts = pkt.pts;
dts = pkt.dts;
if (pts == AV_NOPTS_VALUE)
// some formats don't provide PTS, only DTS
pts = dts;
av_free_packet(&pkt);
// Multi-frame packets only return position for the very first frame.
// Other frames are read with position == -1. Therefore, we note down
// last known position of a frame and use it if a frame without
// position arrives. In this way, it's possible to seek to proper
// position. Additionally, for parsers not providing position at all,
// an approximation will be used (starting position of this iteration).
if (pos < 0)
pos = sp->last_pos;
else
sp->last_pos = pos;
// Evaluate key frames with known TS (or any frames, if AVSEEK_FLAG_ANY set).
if (pts != AV_NOPTS_VALUE &&
((flg & AV_PKT_FLAG_KEY) || (flags & AVSEEK_FLAG_ANY))) {
if (flags & AVSEEK_FLAG_BYTE) {
// for byte seeking, use position as timestamp
ts = pos;
ts_tb.num = 1;
ts_tb.den = 1;
} else {
// otherwise, get stream time_base
ts = pts;
ts_tb = st->time_base;
}
if (sp->first_ts == AV_NOPTS_VALUE) {
// Note down termination timestamp for the next iteration - when
// we encounter a packet with the same timestamp, we will ignore
// any further packets for this stream in next iteration (as they
// are already evaluated).
sp->first_ts = ts;
sp->first_ts_tb = ts_tb;
}
if (sp->term_ts != AV_NOPTS_VALUE &&
av_compare_ts(ts, ts_tb, sp->term_ts, sp->term_ts_tb) > 0) {
// past the end position from last iteration, ignore packet
if (!sp->terminated) {
sp->terminated = 1;
++terminated_count;
if (sp->pos_hi == INT64_MAX) {
// no high frame exists for this stream
(*found_hi)++;
sp->ts_hi = INT64_MAX;
sp->pos_hi = INT64_MAX - 1;
}
if (terminated_count == keyframes_to_find)
break; // all terminated, iteration done
}
continue;
}
if (av_compare_ts(ts, ts_tb, timestamp, timebase) <= 0) {
// keyframe found before target timestamp
if (sp->pos_lo == INT64_MAX) {
// found first keyframe lower than target timestamp
(*found_lo)++;
sp->ts_lo = ts;
sp->pos_lo = pos;
} else if (sp->ts_lo < ts) {
// found a better match (closer to target timestamp)
sp->ts_lo = ts;
sp->pos_lo = pos;
}
}
if (av_compare_ts(ts, ts_tb, timestamp, timebase) >= 0) {
// keyframe found after target timestamp
if (sp->pos_hi == INT64_MAX) {
// found first keyframe higher than target timestamp
(*found_hi)++;
sp->ts_hi = ts;
sp->pos_hi = pos;
if (*found_hi >= keyframes_to_find && first_iter) {
// We found high frame for all. They may get updated
// to TS closer to target TS in later iterations (which
// will stop at start position of previous iteration).
break;
}
} else if (sp->ts_hi > ts) {
// found a better match (actually, shouldn't happen)
sp->ts_hi = ts;
sp->pos_hi = pos;
}
}
}
}
// Clean up the parser.
ff_read_frame_flush(s);
}
int64_t ff_gen_syncpoint_search(AVFormatContext *s,
int stream_index,
int64_t pos,
int64_t ts_min,
int64_t ts,
int64_t ts_max,
int flags)
{
AVSyncPoint *sync, *sp;
AVStream *st;
int i;
int keyframes_to_find = 0;
int64_t curpos;
int64_t step;
int found_lo = 0, found_hi = 0;
int64_t min_distance, distance;
int64_t min_pos = 0;
int first_iter = 1;
AVRational time_base;
if (flags & AVSEEK_FLAG_BYTE) {
// for byte seeking, we have exact 1:1 "timestamps" - positions
time_base.num = 1;
time_base.den = 1;
} else {
if (stream_index >= 0) {
// we have a reference stream, which time base we use
st = s->streams[stream_index];
time_base = st->time_base;
} else {
// no reference stream, use AV_TIME_BASE as reference time base
time_base.num = 1;
time_base.den = AV_TIME_BASE;
}
}
// Initialize syncpoint structures for each stream.
sync = av_malloc(s->nb_streams * sizeof(AVSyncPoint));
if (!sync)
// cannot allocate helper structure
return -1;
for (i = 0; i < s->nb_streams; ++i) {
st = s->streams[i];
sp = &sync[i];
sp->pos_lo = INT64_MAX;
sp->ts_lo = INT64_MAX;
sp->pos_hi = INT64_MAX;
sp->ts_hi = INT64_MAX;
sp->terminated = 0;
sp->first_ts = AV_NOPTS_VALUE;
sp->term_ts = ts_max;
sp->term_ts_tb = time_base;
sp->last_pos = pos;
st->cur_dts = AV_NOPTS_VALUE;
if (st->discard < AVDISCARD_ALL)
++keyframes_to_find;
}
if (!keyframes_to_find) {
// no stream active, error
av_free(sync);
return -1;
}
// Find keyframes in all active streams with timestamp/position just before
// and just after requested timestamp/position.
step = s->pb->buffer_size;
curpos = FFMAX(pos - step / 2, 0);
for (;;) {
url_fseek(s->pb, curpos, SEEK_SET);
search_hi_lo_keyframes(s,
ts, time_base,
flags,
sync,
keyframes_to_find,
&found_lo, &found_hi,
first_iter);
if (found_lo == keyframes_to_find && found_hi == keyframes_to_find)
break; // have all keyframes we wanted
if (!curpos)
break; // cannot go back anymore
curpos = pos - step;
if (curpos < 0)
curpos = 0;
step *= 2;
// switch termination positions
for (i = 0; i < s->nb_streams; ++i) {
st = s->streams[i];
st->cur_dts = AV_NOPTS_VALUE;
sp = &sync[i];
if (sp->first_ts != AV_NOPTS_VALUE) {
sp->term_ts = sp->first_ts;
sp->term_ts_tb = sp->first_ts_tb;
sp->first_ts = AV_NOPTS_VALUE;
}
sp->terminated = 0;
sp->last_pos = curpos;
}
first_iter = 0;
}
// Find actual position to start decoding so that decoder synchronizes
// closest to ts and between ts_min and ts_max.
pos = INT64_MAX;
for (i = 0; i < s->nb_streams; ++i) {
st = s->streams[i];
if (st->discard < AVDISCARD_ALL) {
sp = &sync[i];
min_distance = INT64_MAX;
// Find timestamp closest to requested timestamp within min/max limits.
if (sp->pos_lo != INT64_MAX
&& av_compare_ts(ts_min, time_base, sp->ts_lo, st->time_base) <= 0
&& av_compare_ts(sp->ts_lo, st->time_base, ts_max, time_base) <= 0) {
// low timestamp is in range
min_distance = ts_distance(ts, time_base, sp->ts_lo, st->time_base);
min_pos = sp->pos_lo;
}
if (sp->pos_hi != INT64_MAX
&& av_compare_ts(ts_min, time_base, sp->ts_hi, st->time_base) <= 0
&& av_compare_ts(sp->ts_hi, st->time_base, ts_max, time_base) <= 0) {
// high timestamp is in range, check distance
distance = ts_distance(sp->ts_hi, st->time_base, ts, time_base);
if (distance < min_distance) {
min_distance = distance;
min_pos = sp->pos_hi;
}
}
if (min_distance == INT64_MAX) {
// no timestamp is in range, cannot seek
av_free(sync);
return -1;
}
if (min_pos < pos)
pos = min_pos;
}
}
url_fseek(s->pb, pos, SEEK_SET);
av_free(sync);
return pos;
}
AVParserState *ff_store_parser_state(AVFormatContext *s)
{
int i;
AVStream *st;
AVParserStreamState *ss;
AVParserState *state = av_malloc(sizeof(AVParserState));
if (!state)
return NULL;
state->stream_states = av_malloc(sizeof(AVParserStreamState) * s->nb_streams);
if (!state->stream_states) {
av_free(state);
return NULL;
}
state->fpos = url_ftell(s->pb);
// copy context structures
state->cur_st = s->cur_st;
state->packet_buffer = s->packet_buffer;
state->raw_packet_buffer = s->raw_packet_buffer;
state->raw_packet_buffer_remaining_size = s->raw_packet_buffer_remaining_size;
s->cur_st = NULL;
s->packet_buffer = NULL;
s->raw_packet_buffer = NULL;
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
// copy stream structures
state->nb_streams = s->nb_streams;
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
ss = &state->stream_states[i];
ss->parser = st->parser;
ss->last_IP_pts = st->last_IP_pts;
ss->cur_dts = st->cur_dts;
ss->reference_dts = st->reference_dts;
ss->cur_ptr = st->cur_ptr;
ss->cur_len = st->cur_len;
ss->probe_packets = st->probe_packets;
ss->cur_pkt = st->cur_pkt;
st->parser = NULL;
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE;
st->reference_dts = AV_NOPTS_VALUE;
st->cur_ptr = NULL;
st->cur_len = 0;
st->probe_packets = MAX_PROBE_PACKETS;
av_init_packet(&st->cur_pkt);
}
return state;
}
void ff_restore_parser_state(AVFormatContext *s, AVParserState *state)
{
int i;
AVStream *st;
AVParserStreamState *ss;
ff_read_frame_flush(s);
if (!state)
return;
url_fseek(s->pb, state->fpos, SEEK_SET);
// copy context structures
s->cur_st = state->cur_st;
s->packet_buffer = state->packet_buffer;
s->raw_packet_buffer = state->raw_packet_buffer;
s->raw_packet_buffer_remaining_size = state->raw_packet_buffer_remaining_size;
// copy stream structures
for (i = 0; i < state->nb_streams; i++) {
st = s->streams[i];
ss = &state->stream_states[i];
st->parser = ss->parser;
st->last_IP_pts = ss->last_IP_pts;
st->cur_dts = ss->cur_dts;
st->reference_dts = ss->reference_dts;
st->cur_ptr = ss->cur_ptr;
st->cur_len = ss->cur_len;
st->probe_packets = ss->probe_packets;
st->cur_pkt = ss->cur_pkt;
}
av_free(state->stream_states);
av_free(state);
}
static void free_packet_list(AVPacketList *pktl)
{
AVPacketList *cur;
while (pktl) {
cur = pktl;
pktl = cur->next;
av_free_packet(&cur->pkt);
av_free(cur);
}
}
void ff_free_parser_state(AVFormatContext *s, AVParserState *state)
{
int i;
AVParserStreamState *ss;
if (!state)
return;
for (i = 0; i < state->nb_streams; i++) {
ss = &state->stream_states[i];
if (ss->parser)
av_parser_close(ss->parser);
av_free_packet(&ss->cur_pkt);
}
free_packet_list(state->packet_buffer);
free_packet_list(state->raw_packet_buffer);
av_free(state->stream_states);
av_free(state);
}
| 123linslouis-android-video-cutter | jni/libavformat/seek.c | C | asf20 | 18,148 |
/*
* various utility functions for use within FFmpeg
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "internal.h"
#include "libavcodec/opt.h"
#include "metadata.h"
#include "libavutil/avstring.h"
#include "riff.h"
#include "audiointerleave.h"
#include <sys/time.h>
#include <time.h>
#include <strings.h>
#include <stdarg.h>
#if CONFIG_NETWORK
#include "network.h"
#endif
#undef NDEBUG
#include <assert.h>
/**
* @file
* various utility functions for use within FFmpeg
*/
unsigned avformat_version(void)
{
return LIBAVFORMAT_VERSION_INT;
}
const char *avformat_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
const char *avformat_license(void)
{
#define LICENSE_PREFIX "libavformat license: "
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
/* fraction handling */
/**
* f = val + (num / den) + 0.5.
*
* 'num' is normalized so that it is such as 0 <= num < den.
*
* @param f fractional number
* @param val integer value
* @param num must be >= 0
* @param den must be >= 1
*/
static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
{
num += (den >> 1);
if (num >= den) {
val += num / den;
num = num % den;
}
f->val = val;
f->num = num;
f->den = den;
}
/**
* Fractional addition to f: f = f + (incr / f->den).
*
* @param f fractional number
* @param incr increment, can be positive or negative
*/
static void av_frac_add(AVFrac *f, int64_t incr)
{
int64_t num, den;
num = f->num + incr;
den = f->den;
if (num < 0) {
f->val += num / den;
num = num % den;
if (num < 0) {
num += den;
f->val--;
}
} else if (num >= den) {
f->val += num / den;
num = num % den;
}
f->num = num;
}
/** head of registered input format linked list */
AVInputFormat *first_iformat = NULL;
/** head of registered output format linked list */
AVOutputFormat *first_oformat = NULL;
AVInputFormat *av_iformat_next(AVInputFormat *f)
{
if(f) return f->next;
else return first_iformat;
}
AVOutputFormat *av_oformat_next(AVOutputFormat *f)
{
if(f) return f->next;
else return first_oformat;
}
void av_register_input_format(AVInputFormat *format)
{
AVInputFormat **p;
p = &first_iformat;
while (*p != NULL) p = &(*p)->next;
*p = format;
format->next = NULL;
}
void av_register_output_format(AVOutputFormat *format)
{
AVOutputFormat **p;
p = &first_oformat;
while (*p != NULL) p = &(*p)->next;
*p = format;
format->next = NULL;
}
int av_match_ext(const char *filename, const char *extensions)
{
const char *ext, *p;
char ext1[32], *q;
if(!filename)
return 0;
ext = strrchr(filename, '.');
if (ext) {
ext++;
p = extensions;
for(;;) {
q = ext1;
while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
*q++ = *p++;
*q = '\0';
if (!strcasecmp(ext1, ext))
return 1;
if (*p == '\0')
break;
p++;
}
}
return 0;
}
static int match_format(const char *name, const char *names)
{
const char *p;
int len, namelen;
if (!name || !names)
return 0;
namelen = strlen(name);
while ((p = strchr(names, ','))) {
len = FFMAX(p - names, namelen);
if (!strncasecmp(name, names, len))
return 1;
names = p+1;
}
return !strcasecmp(name, names);
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
AVOutputFormat *guess_format(const char *short_name, const char *filename,
const char *mime_type)
{
return av_guess_format(short_name, filename, mime_type);
}
#endif
AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
const char *mime_type)
{
AVOutputFormat *fmt, *fmt_found;
int score_max, score;
/* specific test for image sequences */
#if CONFIG_IMAGE2_MUXER
if (!short_name && filename &&
av_filename_number_test(filename) &&
av_guess_image2_codec(filename) != CODEC_ID_NONE) {
return av_guess_format("image2", NULL, NULL);
}
#endif
/* Find the proper file type. */
fmt_found = NULL;
score_max = 0;
fmt = first_oformat;
while (fmt != NULL) {
score = 0;
if (fmt->name && short_name && !strcmp(fmt->name, short_name))
score += 100;
if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
score += 10;
if (filename && fmt->extensions &&
av_match_ext(filename, fmt->extensions)) {
score += 5;
}
if (score > score_max) {
score_max = score;
fmt_found = fmt;
}
fmt = fmt->next;
}
return fmt_found;
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
const char *mime_type)
{
AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
if (fmt) {
AVOutputFormat *stream_fmt;
char stream_format_name[64];
snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
if (stream_fmt)
fmt = stream_fmt;
}
return fmt;
}
#endif
enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type, enum AVMediaType type){
if(type == AVMEDIA_TYPE_VIDEO){
enum CodecID codec_id= CODEC_ID_NONE;
#if CONFIG_IMAGE2_MUXER
if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
codec_id= av_guess_image2_codec(filename);
}
#endif
if(codec_id == CODEC_ID_NONE)
codec_id= fmt->video_codec;
return codec_id;
}else if(type == AVMEDIA_TYPE_AUDIO)
return fmt->audio_codec;
else
return CODEC_ID_NONE;
}
AVInputFormat *av_find_input_format(const char *short_name)
{
AVInputFormat *fmt;
for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
if (match_format(short_name, fmt->name))
return fmt;
}
return NULL;
}
#if LIBAVFORMAT_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52")
{
av_destruct_packet_nofree(pkt);
}
FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
{
av_destruct_packet(pkt);
}
FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52")
{
return av_new_packet(pkt, size);
}
FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
{
return av_dup_packet(pkt);
}
FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
{
av_free_packet(pkt);
}
FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
{
av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n");
av_init_packet(pkt);
}
#endif
int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
{
int ret= av_new_packet(pkt, size);
if(ret<0)
return ret;
pkt->pos= url_ftell(s);
ret= get_buffer(s, pkt->data, size);
if(ret<=0)
av_free_packet(pkt);
else
av_shrink_packet(pkt, ret);
return ret;
}
int av_filename_number_test(const char *filename)
{
char buf[1024];
return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
}
AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
{
AVInputFormat *fmt1, *fmt;
int score;
fmt = NULL;
for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
continue;
score = 0;
if (fmt1->read_probe) {
score = fmt1->read_probe(pd);
} else if (fmt1->extensions) {
if (av_match_ext(pd->filename, fmt1->extensions)) {
score = 50;
}
}
if (score > *score_max) {
*score_max = score;
fmt = fmt1;
}else if (score == *score_max)
fmt = NULL;
}
return fmt;
}
AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
int score=0;
return av_probe_input_format2(pd, is_opened, &score);
}
static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
{
AVInputFormat *fmt;
fmt = av_probe_input_format2(pd, 1, &score);
if (fmt) {
av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
if (!strcmp(fmt->name, "mp3")) {
st->codec->codec_id = CODEC_ID_MP3;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "ac3")) {
st->codec->codec_id = CODEC_ID_AC3;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "eac3")) {
st->codec->codec_id = CODEC_ID_EAC3;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "mpegvideo")) {
st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
} else if (!strcmp(fmt->name, "m4v")) {
st->codec->codec_id = CODEC_ID_MPEG4;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
} else if (!strcmp(fmt->name, "h264")) {
st->codec->codec_id = CODEC_ID_H264;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
} else if (!strcmp(fmt->name, "dts")) {
st->codec->codec_id = CODEC_ID_DTS;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "aac")) {
st->codec->codec_id = CODEC_ID_AAC;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
}
}
return !!fmt;
}
/************************************************************/
/* input media file */
/**
* Open a media file from an IO stream. 'fmt' must be specified.
*/
int av_open_input_stream(AVFormatContext **ic_ptr,
ByteIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap)
{
int err;
AVFormatContext *ic;
AVFormatParameters default_ap;
if(!ap){
ap=&default_ap;
memset(ap, 0, sizeof(default_ap));
}
if(!ap->prealloced_context)
ic = avformat_alloc_context();
else
ic = *ic_ptr;
if (!ic) {
err = AVERROR(ENOMEM);
goto fail;
}
ic->iformat = fmt;
ic->pb = pb;
ic->duration = AV_NOPTS_VALUE;
ic->start_time = AV_NOPTS_VALUE;
av_strlcpy(ic->filename, filename, sizeof(ic->filename));
/* allocate private data */
if (fmt->priv_data_size > 0) {
ic->priv_data = av_mallocz(fmt->priv_data_size);
if (!ic->priv_data) {
err = AVERROR(ENOMEM);
goto fail;
}
} else {
ic->priv_data = NULL;
}
if (ic->iformat->read_header) {
err = ic->iformat->read_header(ic, ap);
if (err < 0)
goto fail;
}
if (pb && !ic->data_offset)
ic->data_offset = url_ftell(ic->pb);
#if LIBAVFORMAT_VERSION_MAJOR < 53
ff_metadata_demux_compat(ic);
#endif
ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
*ic_ptr = ic;
return 0;
fail:
if (ic) {
int i;
av_freep(&ic->priv_data);
for(i=0;i<ic->nb_streams;i++) {
AVStream *st = ic->streams[i];
if (st) {
av_free(st->priv_data);
av_free(st->codec->extradata);
}
av_free(st);
}
}
av_free(ic);
*ic_ptr = NULL;
return err;
}
/** size of probe buffer, for guessing file type from file contents */
#define PROBE_BUF_MIN 2048
#define PROBE_BUF_MAX (1<<20)
int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size)
{
AVProbeData pd = { filename ? filename : "", NULL, -offset };
unsigned char *buf = NULL;
int ret = 0, probe_size;
if (!max_probe_size) {
max_probe_size = PROBE_BUF_MAX;
} else if (max_probe_size > PROBE_BUF_MAX) {
max_probe_size = PROBE_BUF_MAX;
} else if (max_probe_size < PROBE_BUF_MIN) {
return AVERROR(EINVAL);
}
if (offset >= max_probe_size) {
return AVERROR(EINVAL);
}
for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
if (probe_size < offset) {
continue;
}
/* read probe data */
buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
if ((ret = get_buffer(*pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
/* fail if error was not end of file, otherwise, lower score */
if (ret != AVERROR_EOF) {
av_free(buf);
return ret;
}
score = 0;
ret = 0; /* error was end of file, nothing read */
}
pd.buf_size += ret;
pd.buf = &buf[offset];
memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
/* guess file format */
*fmt = av_probe_input_format2(&pd, 1, &score);
if(*fmt){
if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
}else
av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
}
}
if (!*fmt) {
av_free(buf);
return AVERROR_INVALIDDATA;
}
/* rewind. reuse probe buffer to avoid seeking */
if ((ret = ff_rewind_with_probe_data(*pb, buf, pd.buf_size)) < 0)
av_free(buf);
return ret;
}
int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
AVInputFormat *fmt,
int buf_size,
AVFormatParameters *ap)
{
int err;
AVProbeData probe_data, *pd = &probe_data;
ByteIOContext *pb = NULL;
void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
pd->filename = "";
if (filename)
pd->filename = filename;
pd->buf = NULL;
pd->buf_size = 0;
if (!fmt) {
/* guess format if no file can be opened */
fmt = av_probe_input_format(pd, 0);
}
/* Do not open file if the format does not need it. XXX: specific
hack needed to handle RTSP/TCP */
if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
/* if no file needed do not try to open one */
if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
goto fail;
}
if (buf_size > 0) {
url_setbufsize(pb, buf_size);
}
if (!fmt && (err = ff_probe_input_buffer(&pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
goto fail;
}
}
/* if still no format found, error */
if (!fmt) {
err = AVERROR_INVALIDDATA;
goto fail;
}
/* check filename in case an image number is expected */
if (fmt->flags & AVFMT_NEEDNUMBER) {
if (!av_filename_number_test(filename)) {
err = AVERROR_NUMEXPECTED;
goto fail;
}
}
err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
if (err)
goto fail;
return 0;
fail:
av_freep(&pd->buf);
if (pb)
url_fclose(pb);
if (ap && ap->prealloced_context)
av_free(*ic_ptr);
*ic_ptr = NULL;
return err;
}
/*******************************************************/
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
AVPacketList **plast_pktl){
AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
if (!pktl)
return NULL;
if (*packet_buffer)
(*plast_pktl)->next = pktl;
else
*packet_buffer = pktl;
/* add the packet in the buffered packet list */
*plast_pktl = pktl;
pktl->pkt= *pkt;
return &pktl->pkt;
}
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, i;
AVStream *st;
for(;;){
AVPacketList *pktl = s->raw_packet_buffer;
if (pktl) {
*pkt = pktl->pkt;
if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
!s->streams[pkt->stream_index]->probe_packets ||
s->raw_packet_buffer_remaining_size < pkt->size){
AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
av_freep(&pd->buf);
pd->buf_size = 0;
s->raw_packet_buffer = pktl->next;
s->raw_packet_buffer_remaining_size += pkt->size;
av_free(pktl);
return 0;
}
}
av_init_packet(pkt);
ret= s->iformat->read_packet(s, pkt);
if (ret < 0) {
if (!pktl || ret == AVERROR(EAGAIN))
return ret;
for (i = 0; i < s->nb_streams; i++)
s->streams[i]->probe_packets = 0;
continue;
}
st= s->streams[pkt->stream_index];
switch(st->codec->codec_type){
case AVMEDIA_TYPE_VIDEO:
if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
break;
case AVMEDIA_TYPE_AUDIO:
if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
break;
case AVMEDIA_TYPE_SUBTITLE:
if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
break;
}
if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
!st->probe_packets))
return ret;
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
s->raw_packet_buffer_remaining_size -= pkt->size;
if(st->codec->codec_id == CODEC_ID_PROBE){
AVProbeData *pd = &st->probe_data;
av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
--st->probe_packets;
pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
pd->buf_size += pkt->size;
memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
//FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
if(st->codec->codec_id != CODEC_ID_PROBE){
pd->buf_size=0;
av_freep(&pd->buf);
av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
}
}
}
}
}
/**********************************************************/
/**
* Get the number of samples of an audio frame. Return -1 on error.
*/
static int get_audio_frame_size(AVCodecContext *enc, int size)
{
int frame_size;
if(enc->codec_id == CODEC_ID_VORBIS)
return -1;
if (enc->frame_size <= 1) {
int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
if (bits_per_sample) {
if (enc->channels == 0)
return -1;
frame_size = (size << 3) / (bits_per_sample * enc->channels);
} else {
/* used for example by ADPCM codecs */
if (enc->bit_rate == 0)
return -1;
frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
}
} else {
frame_size = enc->frame_size;
}
return frame_size;
}
/**
* Return the frame duration in seconds. Return 0 if not available.
*/
static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
int frame_size;
*pnum = 0;
*pden = 0;
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if(st->time_base.num*1000LL > st->time_base.den){
*pnum = st->time_base.num;
*pden = st->time_base.den;
}else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
*pnum = st->codec->time_base.num;
*pden = st->codec->time_base.den;
if (pc && pc->repeat_pict) {
*pnum = (*pnum) * (1 + pc->repeat_pict);
}
//If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
//Thus if we have no parser in such case leave duration undefined.
if(st->codec->ticks_per_frame>1 && !pc){
*pnum = *pden = 0;
}
}
break;
case AVMEDIA_TYPE_AUDIO:
frame_size = get_audio_frame_size(st->codec, pkt->size);
if (frame_size < 0)
break;
*pnum = frame_size;
*pden = st->codec->sample_rate;
break;
default:
break;
}
}
static int is_intra_only(AVCodecContext *enc){
if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
return 1;
}else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
switch(enc->codec_id){
case CODEC_ID_MJPEG:
case CODEC_ID_MJPEGB:
case CODEC_ID_LJPEG:
case CODEC_ID_RAWVIDEO:
case CODEC_ID_DVVIDEO:
case CODEC_ID_HUFFYUV:
case CODEC_ID_FFVHUFF:
case CODEC_ID_ASV1:
case CODEC_ID_ASV2:
case CODEC_ID_VCR1:
case CODEC_ID_DNXHD:
case CODEC_ID_JPEG2000:
return 1;
default: break;
}
}
return 0;
}
static void update_initial_timestamps(AVFormatContext *s, int stream_index,
int64_t dts, int64_t pts)
{
AVStream *st= s->streams[stream_index];
AVPacketList *pktl= s->packet_buffer;
if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
return;
st->first_dts= dts - st->cur_dts;
st->cur_dts= dts;
for(; pktl; pktl= pktl->next){
if(pktl->pkt.stream_index != stream_index)
continue;
//FIXME think more about this check
if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
pktl->pkt.pts += st->first_dts;
if(pktl->pkt.dts != AV_NOPTS_VALUE)
pktl->pkt.dts += st->first_dts;
if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
st->start_time= pktl->pkt.pts;
}
if (st->start_time == AV_NOPTS_VALUE)
st->start_time = pts;
}
static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
AVPacketList *pktl= s->packet_buffer;
int64_t cur_dts= 0;
if(st->first_dts != AV_NOPTS_VALUE){
cur_dts= st->first_dts;
for(; pktl; pktl= pktl->next){
if(pktl->pkt.stream_index == pkt->stream_index){
if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
break;
cur_dts -= pkt->duration;
}
}
pktl= s->packet_buffer;
st->first_dts = cur_dts;
}else if(st->cur_dts)
return;
for(; pktl; pktl= pktl->next){
if(pktl->pkt.stream_index != pkt->stream_index)
continue;
if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
&& !pktl->pkt.duration){
pktl->pkt.dts= cur_dts;
if(!st->codec->has_b_frames)
pktl->pkt.pts= cur_dts;
cur_dts += pkt->duration;
pktl->pkt.duration= pkt->duration;
}else
break;
}
if(st->first_dts == AV_NOPTS_VALUE)
st->cur_dts= cur_dts;
}
static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
int num, den, presentation_delayed, delay, i;
int64_t offset;
if (s->flags & AVFMT_FLAG_NOFILLIN)
return;
if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
pkt->dts= AV_NOPTS_VALUE;
if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
//FIXME Set low_delay = 0 when has_b_frames = 1
st->codec->has_b_frames = 1;
/* do we have a video B-frame ? */
delay= st->codec->has_b_frames;
presentation_delayed = 0;
/* XXX: need has_b_frame, but cannot get it if the codec is
not initialized */
if (delay &&
pc && pc->pict_type != FF_B_TYPE)
presentation_delayed = 1;
if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
/*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
pkt->dts -= 1LL<<st->pts_wrap_bits;
}
// some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
// we take the conservative approach and discard both
// Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
pkt->dts= pkt->pts= AV_NOPTS_VALUE;
}
if (pkt->duration == 0) {
compute_frame_duration(&num, &den, st, pc, pkt);
if (den && num) {
pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
if(pkt->duration != 0 && s->packet_buffer)
update_initial_durations(s, st, pkt);
}
}
/* correct timestamps with byte offset if demuxers only have timestamps
on packet boundaries */
if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
/* this will estimate bitrate based on this frame's duration and size */
offset = av_rescale(pc->offset, pkt->duration, pkt->size);
if(pkt->pts != AV_NOPTS_VALUE)
pkt->pts += offset;
if(pkt->dts != AV_NOPTS_VALUE)
pkt->dts += offset;
}
if (pc && pc->dts_sync_point >= 0) {
// we have synchronization info from the parser
int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
if (den > 0) {
int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
if (pkt->dts != AV_NOPTS_VALUE) {
// got DTS from the stream, update reference timestamp
st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
} else if (st->reference_dts != AV_NOPTS_VALUE) {
// compute DTS based on reference timestamp
pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
}
if (pc->dts_sync_point > 0)
st->reference_dts = pkt->dts; // new reference
}
}
/* This may be redundant, but it should not hurt. */
if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
presentation_delayed = 1;
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
/* interpolate PTS and DTS if they are not present */
//We skip H264 currently because delay and has_b_frames are not reliably set
if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->last_IP_pts;
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->cur_dts;
/* this is tricky: the dts must be incremented by the duration
of the frame we are displaying, i.e. the last I- or P-frame */
if (st->last_IP_duration == 0)
st->last_IP_duration = pkt->duration;
if(pkt->dts != AV_NOPTS_VALUE)
st->cur_dts = pkt->dts + st->last_IP_duration;
st->last_IP_duration = pkt->duration;
st->last_IP_pts= pkt->pts;
/* cannot compute PTS if not present (we can compute it only
by knowing the future */
} else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
pkt->pts += pkt->duration;
// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
}
}
/* presentation is not delayed : PTS and DTS are the same */
if(pkt->pts == AV_NOPTS_VALUE)
pkt->pts = pkt->dts;
update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
if(pkt->pts == AV_NOPTS_VALUE)
pkt->pts = st->cur_dts;
pkt->dts = pkt->pts;
if(pkt->pts != AV_NOPTS_VALUE)
st->cur_dts = pkt->pts + pkt->duration;
}
}
if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
st->pts_buffer[0]= pkt->pts;
for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
if(pkt->dts == AV_NOPTS_VALUE)
pkt->dts= st->pts_buffer[0];
if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
}
if(pkt->dts > st->cur_dts)
st->cur_dts = pkt->dts;
}
// av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
/* update flags */
if(is_intra_only(st->codec))
pkt->flags |= AV_PKT_FLAG_KEY;
else if (pc) {
pkt->flags = 0;
/* keyframe computation */
if (pc->key_frame == 1)
pkt->flags |= AV_PKT_FLAG_KEY;
else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
pkt->flags |= AV_PKT_FLAG_KEY;
}
if (pc)
pkt->convergence_duration = pc->convergence_duration;
}
static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st;
int len, ret, i;
av_init_packet(pkt);
for(;;) {
/* select current input stream component */
st = s->cur_st;
if (st) {
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
/* raw data support */
*pkt = st->cur_pkt; st->cur_pkt.data= NULL;
compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL;
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}
break;
} else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
st->cur_ptr, st->cur_len,
st->cur_pkt.pts, st->cur_pkt.dts,
st->cur_pkt.pos);
st->cur_pkt.pts = AV_NOPTS_VALUE;
st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */
st->cur_ptr += len;
st->cur_len -= len;
/* return packet if any */
if (pkt->size) {
got_packet:
pkt->duration = 0;
pkt->stream_index = st->index;
pkt->pts = st->parser->pts;
pkt->dts = st->parser->dts;
pkt->pos = st->parser->pos;
pkt->destruct = NULL;
compute_pkt_fields(s, st, st->parser, pkt);
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
ff_reduce_index(s, st->index);
av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
0, 0, AVINDEX_KEYFRAME);
}
break;
}
} else {
/* free packet */
av_free_packet(&st->cur_pkt);
s->cur_st = NULL;
}
} else {
AVPacket cur_pkt;
/* read next packet */
ret = av_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* return the last frames, if any */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing) {
av_parser_parse2(st->parser, st->codec,
&pkt->data, &pkt->size,
NULL, 0,
AV_NOPTS_VALUE, AV_NOPTS_VALUE,
AV_NOPTS_VALUE);
if (pkt->size)
goto got_packet;
}
}
/* no more packets: really terminate parsing */
return ret;
}
st = s->streams[cur_pkt.stream_index];
st->cur_pkt= cur_pkt;
if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
st->cur_pkt.dts != AV_NOPTS_VALUE &&
st->cur_pkt.pts < st->cur_pkt.dts){
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
st->cur_pkt.stream_index,
st->cur_pkt.pts,
st->cur_pkt.dts,
st->cur_pkt.size);
// av_free_packet(&st->cur_pkt);
// return -1;
}
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
st->cur_pkt.stream_index,
st->cur_pkt.pts,
st->cur_pkt.dts,
st->cur_pkt.size,
st->cur_pkt.duration,
st->cur_pkt.flags);
s->cur_st = st;
st->cur_ptr = st->cur_pkt.data;
st->cur_len = st->cur_pkt.size;
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codec->codec_id);
if (!st->parser) {
/* no parser available: just output the raw packets */
st->need_parsing = AVSTREAM_PARSE_NONE;
}else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}
if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
st->parser->next_frame_offset=
st->parser->cur_offset= st->cur_pkt.pos;
}
}
}
}
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
pkt->stream_index,
pkt->pts,
pkt->dts,
pkt->size,
pkt->duration,
pkt->flags);
return 0;
}
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
AVPacketList *pktl;
int eof=0;
const int genpts= s->flags & AVFMT_FLAG_GENPTS;
for(;;){
pktl = s->packet_buffer;
if (pktl) {
AVPacket *next_pkt= &pktl->pkt;
if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
if( pktl->pkt.stream_index == next_pkt->stream_index
&& next_pkt->dts < pktl->pkt.dts
&& pktl->pkt.pts != pktl->pkt.dts //not b frame
/*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
next_pkt->pts= pktl->pkt.dts;
}
pktl= pktl->next;
}
pktl = s->packet_buffer;
}
if( next_pkt->pts != AV_NOPTS_VALUE
|| next_pkt->dts == AV_NOPTS_VALUE
|| !genpts || eof){
/* read packet from packet buffer, if there is data */
*pkt = *next_pkt;
s->packet_buffer = pktl->next;
av_free(pktl);
return 0;
}
}
if(genpts){
int ret= av_read_frame_internal(s, pkt);
if(ret<0){
if(pktl && ret != AVERROR(EAGAIN)){
eof=1;
continue;
}else
return ret;
}
if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
&s->packet_buffer_end)) < 0)
return AVERROR(ENOMEM);
}else{
assert(!s->packet_buffer);
return av_read_frame_internal(s, pkt);
}
}
}
/* XXX: suppress the packet queue */
static void flush_packet_queue(AVFormatContext *s)
{
AVPacketList *pktl;
for(;;) {
pktl = s->packet_buffer;
if (!pktl)
break;
s->packet_buffer = pktl->next;
av_free_packet(&pktl->pkt);
av_free(pktl);
}
while(s->raw_packet_buffer){
pktl = s->raw_packet_buffer;
s->raw_packet_buffer = pktl->next;
av_free_packet(&pktl->pkt);
av_free(pktl);
}
s->packet_buffer_end=
s->raw_packet_buffer_end= NULL;
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
}
/*******************************************************/
/* seek support */
int av_find_default_stream_index(AVFormatContext *s)
{
int first_audio_index = -1;
int i;
AVStream *st;
if (s->nb_streams <= 0)
return -1;
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
return i;
}
if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
first_audio_index = i;
}
return first_audio_index >= 0 ? first_audio_index : 0;
}
/**
* Flush the frame reader.
*/
void ff_read_frame_flush(AVFormatContext *s)
{
AVStream *st;
int i, j;
flush_packet_queue(s);
s->cur_st = NULL;
/* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser) {
av_parser_close(st->parser);
st->parser = NULL;
av_free_packet(&st->cur_pkt);
}
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
st->reference_dts = AV_NOPTS_VALUE;
/* fail safe */
st->cur_ptr = NULL;
st->cur_len = 0;
st->probe_packets = MAX_PROBE_PACKETS;
for(j=0; j<MAX_REORDER_DELAY+1; j++)
st->pts_buffer[j]= AV_NOPTS_VALUE;
}
}
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
int i;
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
st->cur_dts = av_rescale(timestamp,
st->time_base.den * (int64_t)ref_st->time_base.num,
st->time_base.num * (int64_t)ref_st->time_base.den);
}
}
void ff_reduce_index(AVFormatContext *s, int stream_index)
{
AVStream *st= s->streams[stream_index];
unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
if((unsigned)st->nb_index_entries >= max_entries){
int i;
for(i=0; 2*i<st->nb_index_entries; i++)
st->index_entries[i]= st->index_entries[2*i];
st->nb_index_entries= i;
}
}
int av_add_index_entry(AVStream *st,
int64_t pos, int64_t timestamp, int size, int distance, int flags)
{
AVIndexEntry *entries, *ie;
int index;
if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
return -1;
entries = av_fast_realloc(st->index_entries,
&st->index_entries_allocated_size,
(st->nb_index_entries + 1) *
sizeof(AVIndexEntry));
if(!entries)
return -1;
st->index_entries= entries;
index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
if(index<0){
index= st->nb_index_entries++;
ie= &entries[index];
assert(index==0 || ie[-1].timestamp < timestamp);
}else{
ie= &entries[index];
if(ie->timestamp != timestamp){
if(ie->timestamp <= timestamp)
return -1;
memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
st->nb_index_entries++;
}else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
distance= ie->min_distance;
}
ie->pos = pos;
ie->timestamp = timestamp;
ie->min_distance= distance;
ie->size= size;
ie->flags = flags;
return index;
}
int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
int flags)
{
AVIndexEntry *entries= st->index_entries;
int nb_entries= st->nb_index_entries;
int a, b, m;
int64_t timestamp;
a = - 1;
b = nb_entries;
//optimize appending index entries at the end
if(b && entries[b-1].timestamp < wanted_timestamp)
a= b-1;
while (b - a > 1) {
m = (a + b) >> 1;
timestamp = entries[m].timestamp;
if(timestamp >= wanted_timestamp)
b = m;
if(timestamp <= wanted_timestamp)
a = m;
}
m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
if(!(flags & AVSEEK_FLAG_ANY)){
while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
}
}
if(m == nb_entries)
return -1;
return m;
}
#define DEBUG_SEEK
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
AVInputFormat *avif= s->iformat;
int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
int index;
int64_t ret;
AVStream *st;
if (stream_index < 0)
return -1;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
#endif
ts_max=
ts_min= AV_NOPTS_VALUE;
pos_limit= -1; //gcc falsely says it may be uninitialized
st= s->streams[stream_index];
if(st->index_entries){
AVIndexEntry *e;
index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
index= FFMAX(index, 0);
e= &st->index_entries[index];
if(e->timestamp <= target_ts || e->pos == e->min_distance){
pos_min= e->pos;
ts_min= e->timestamp;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
pos_min,ts_min);
#endif
}else{
assert(index==0);
}
index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
assert(index < st->nb_index_entries);
if(index >= 0){
e= &st->index_entries[index];
assert(e->timestamp >= target_ts);
pos_max= e->pos;
ts_max= e->timestamp;
pos_limit= pos_max - e->min_distance;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
pos_max,pos_limit, ts_max);
#endif
}
}
pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
if(pos<0)
return -1;
/* do the seek */
if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
return ret;
av_update_cur_dts(s, st, ts);
return 0;
}
int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
int64_t pos, ts;
int64_t start_pos, filesize;
int no_change;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
#endif
if(ts_min == AV_NOPTS_VALUE){
pos_min = s->data_offset;
ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
if (ts_min == AV_NOPTS_VALUE)
return -1;
}
if(ts_max == AV_NOPTS_VALUE){
int step= 1024;
filesize = url_fsize(s->pb);
pos_max = filesize - 1;
do{
pos_max -= step;
ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
step += step;
}while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
if (ts_max == AV_NOPTS_VALUE)
return -1;
for(;;){
int64_t tmp_pos= pos_max + 1;
int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
if(tmp_ts == AV_NOPTS_VALUE)
break;
ts_max= tmp_ts;
pos_max= tmp_pos;
if(tmp_pos >= filesize)
break;
}
pos_limit= pos_max;
}
if(ts_min > ts_max){
return -1;
}else if(ts_min == ts_max){
pos_limit= pos_min;
}
no_change=0;
while (pos_min < pos_limit) {
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
pos_min, pos_max,
ts_min, ts_max);
#endif
assert(pos_limit <= pos_max);
if(no_change==0){
int64_t approximate_keyframe_distance= pos_max - pos_limit;
// interpolate position (better than dichotomy)
pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
+ pos_min - approximate_keyframe_distance;
}else if(no_change==1){
// bisection, if interpolation failed to change min or max pos last time
pos = (pos_min + pos_limit)>>1;
}else{
/* linear search if bisection failed, can only happen if there
are very few or no keyframes between min/max */
pos=pos_min;
}
if(pos <= pos_min)
pos= pos_min + 1;
else if(pos > pos_limit)
pos= pos_limit;
start_pos= pos;
ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
if(pos == pos_max)
no_change++;
else
no_change=0;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
start_pos, no_change);
#endif
if(ts == AV_NOPTS_VALUE){
av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
return -1;
}
assert(ts != AV_NOPTS_VALUE);
if (target_ts <= ts) {
pos_limit = start_pos - 1;
pos_max = pos;
ts_max = ts;
}
if (target_ts >= ts) {
pos_min = pos;
ts_min = ts;
}
}
pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
#ifdef DEBUG_SEEK
pos_min = pos;
ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
pos_min++;
ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
pos, ts_min, target_ts, ts_max);
#endif
*ts_ret= ts;
return pos;
}
static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
int64_t pos_min, pos_max;
#if 0
AVStream *st;
if (stream_index < 0)
return -1;
st= s->streams[stream_index];
#endif
pos_min = s->data_offset;
pos_max = url_fsize(s->pb) - 1;
if (pos < pos_min) pos= pos_min;
else if(pos > pos_max) pos= pos_max;
url_fseek(s->pb, pos, SEEK_SET);
#if 0
av_update_cur_dts(s, st, ts);
#endif
return 0;
}
static int av_seek_frame_generic(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
int index;
int64_t ret;
AVStream *st;
AVIndexEntry *ie;
st = s->streams[stream_index];
index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
return -1;
if(index < 0 || index==st->nb_index_entries-1){
int i;
AVPacket pkt;
if(st->nb_index_entries){
assert(st->index_entries);
ie= &st->index_entries[st->nb_index_entries-1];
if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
return ret;
av_update_cur_dts(s, st, ie->timestamp);
}else{
if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
return ret;
}
for(i=0;; i++) {
int ret;
do{
ret = av_read_frame(s, &pkt);
}while(ret == AVERROR(EAGAIN));
if(ret<0)
break;
av_free_packet(&pkt);
if(stream_index == pkt.stream_index){
if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
break;
}
}
index = av_index_search_timestamp(st, timestamp, flags);
}
if (index < 0)
return -1;
ff_read_frame_flush(s);
if (s->iformat->read_seek){
if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
return 0;
}
ie = &st->index_entries[index];
if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
return ret;
av_update_cur_dts(s, st, ie->timestamp);
return 0;
}
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
int ret;
AVStream *st;
ff_read_frame_flush(s);
if(flags & AVSEEK_FLAG_BYTE)
return av_seek_frame_byte(s, stream_index, timestamp, flags);
if(stream_index < 0){
stream_index= av_find_default_stream_index(s);
if(stream_index < 0)
return -1;
st= s->streams[stream_index];
/* timestamp for default must be expressed in AV_TIME_BASE units */
timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
}
/* first, we try the format specific seek */
if (s->iformat->read_seek)
ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
else
ret = -1;
if (ret >= 0) {
return 0;
}
if(s->iformat->read_timestamp)
return av_seek_frame_binary(s, stream_index, timestamp, flags);
else
return av_seek_frame_generic(s, stream_index, timestamp, flags);
}
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
if(min_ts > ts || max_ts < ts)
return -1;
ff_read_frame_flush(s);
if (s->iformat->read_seek2)
return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
if(s->iformat->read_timestamp){
//try to seek via read_timestamp()
}
//Fallback to old API if new is not implemented but old is
//Note the old has somewat different sematics
if(s->iformat->read_seek || 1)
return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
// try some generic seek like av_seek_frame_generic() but with new ts semantics
}
/*******************************************************/
/**
* Returns TRUE if the stream has accurate duration in any stream.
*
* @return TRUE if the stream has accurate duration for at least one component.
*/
static int av_has_duration(AVFormatContext *ic)
{
int i;
AVStream *st;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->duration != AV_NOPTS_VALUE)
return 1;
}
return 0;
}
/**
* Estimate the stream timings from the one of each components.
*
* Also computes the global bitrate if possible.
*/
static void av_update_stream_timings(AVFormatContext *ic)
{
int64_t start_time, start_time1, end_time, end_time1;
int64_t duration, duration1;
int i;
AVStream *st;
start_time = INT64_MAX;
end_time = INT64_MIN;
duration = INT64_MIN;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
if (start_time1 < start_time)
start_time = start_time1;
if (st->duration != AV_NOPTS_VALUE) {
end_time1 = start_time1
+ av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
if (end_time1 > end_time)
end_time = end_time1;
}
}
if (st->duration != AV_NOPTS_VALUE) {
duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
if (duration1 > duration)
duration = duration1;
}
}
if (start_time != INT64_MAX) {
ic->start_time = start_time;
if (end_time != INT64_MIN) {
if (end_time - start_time > duration)
duration = end_time - start_time;
}
}
if (duration != INT64_MIN) {
ic->duration = duration;
if (ic->file_size > 0) {
/* compute the bitrate */
ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
(double)ic->duration;
}
}
}
static void fill_all_stream_timings(AVFormatContext *ic)
{
int i;
AVStream *st;
av_update_stream_timings(ic);
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time == AV_NOPTS_VALUE) {
if(ic->start_time != AV_NOPTS_VALUE)
st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
if(ic->duration != AV_NOPTS_VALUE)
st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
}
}
}
static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
{
int64_t filesize, duration;
int bit_rate, i;
AVStream *st;
/* if bit_rate is already set, we believe it */
if (ic->bit_rate == 0) {
bit_rate = 0;
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
bit_rate += st->codec->bit_rate;
}
ic->bit_rate = bit_rate;
}
/* if duration is already set, we believe it */
if (ic->duration == AV_NOPTS_VALUE &&
ic->bit_rate != 0 &&
ic->file_size != 0) {
filesize = ic->file_size;
if (filesize > 0) {
for(i = 0; i < ic->nb_streams; i++) {
st = ic->streams[i];
duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
if (st->duration == AV_NOPTS_VALUE)
st->duration = duration;
}
}
}
}
#define DURATION_MAX_READ_SIZE 250000
#define DURATION_MAX_RETRY 3
/* only usable for MPEG-PS streams */
static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
{
AVPacket pkt1, *pkt = &pkt1;
AVStream *st;
int read_size, i, ret;
int64_t end_time, start_time[MAX_STREAMS];
int64_t filesize, offset, duration;
int retry=0;
ic->cur_st = NULL;
/* flush packet queue */
flush_packet_queue(ic);
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if(st->start_time != AV_NOPTS_VALUE){
start_time[i]= st->start_time;
}else if(st->first_dts != AV_NOPTS_VALUE){
start_time[i]= st->first_dts;
}else
av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
if (st->parser) {
av_parser_close(st->parser);
st->parser= NULL;
av_free_packet(&st->cur_pkt);
}
}
/* estimate the end time (duration) */
/* XXX: may need to support wrapping */
filesize = ic->file_size;
end_time = AV_NOPTS_VALUE;
do{
offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
if (offset < 0)
offset = 0;
url_fseek(ic->pb, offset, SEEK_SET);
read_size = 0;
for(;;) {
if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
break;
do{
ret = av_read_packet(ic, pkt);
}while(ret == AVERROR(EAGAIN));
if (ret != 0)
break;
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
if (pkt->pts != AV_NOPTS_VALUE &&
start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
end_time = pkt->pts;
duration = end_time - start_time[pkt->stream_index];
if (duration < 0)
duration += 1LL<<st->pts_wrap_bits;
if (duration > 0) {
if (st->duration == AV_NOPTS_VALUE ||
st->duration < duration)
st->duration = duration;
}
}
av_free_packet(pkt);
}
}while( end_time==AV_NOPTS_VALUE
&& filesize > (DURATION_MAX_READ_SIZE<<retry)
&& ++retry <= DURATION_MAX_RETRY);
fill_all_stream_timings(ic);
url_fseek(ic->pb, old_offset, SEEK_SET);
for(i=0; i<ic->nb_streams; i++){
st= ic->streams[i];
st->cur_dts= st->first_dts;
st->last_IP_pts = AV_NOPTS_VALUE;
}
}
static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
{
int64_t file_size;
/* get the file size, if possible */
if (ic->iformat->flags & AVFMT_NOFILE) {
file_size = 0;
} else {
file_size = url_fsize(ic->pb);
if (file_size < 0)
file_size = 0;
}
ic->file_size = file_size;
if ((!strcmp(ic->iformat->name, "mpeg") ||
!strcmp(ic->iformat->name, "mpegts")) &&
file_size && !url_is_streamed(ic->pb)) {
/* get accurate estimate from the PTSes */
av_estimate_timings_from_pts(ic, old_offset);
} else if (av_has_duration(ic)) {
/* at least one component has timings - we use them for all
the components */
fill_all_stream_timings(ic);
} else {
av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
/* less precise: use bitrate info */
av_estimate_timings_from_bit_rate(ic);
}
av_update_stream_timings(ic);
#if 0
{
int i;
AVStream *st;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
printf("%d: start_time: %0.3f duration: %0.3f\n",
i, (double)st->start_time / AV_TIME_BASE,
(double)st->duration / AV_TIME_BASE);
}
printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
(double)ic->start_time / AV_TIME_BASE,
(double)ic->duration / AV_TIME_BASE,
ic->bit_rate / 1000);
}
#endif
}
static int has_codec_parameters(AVCodecContext *enc)
{
int val;
switch(enc->codec_type) {
case AVMEDIA_TYPE_AUDIO:
val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
if(!enc->frame_size &&
(enc->codec_id == CODEC_ID_VORBIS ||
enc->codec_id == CODEC_ID_AAC ||
enc->codec_id == CODEC_ID_MP1 ||
enc->codec_id == CODEC_ID_MP2 ||
enc->codec_id == CODEC_ID_MP3 ||
enc->codec_id == CODEC_ID_SPEEX))
return 0;
break;
case AVMEDIA_TYPE_VIDEO:
val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
break;
default:
val = 1;
break;
}
return enc->codec_id != CODEC_ID_NONE && val != 0;
}
static int try_decode_frame(AVStream *st, AVPacket *avpkt)
{
int16_t *samples;
AVCodec *codec;
int got_picture, data_size, ret=0;
AVFrame picture;
if(!st->codec->codec){
codec = avcodec_find_decoder(st->codec->codec_id);
if (!codec)
return -1;
ret = avcodec_open(st->codec, codec);
if (ret < 0)
return ret;
}
if(!has_codec_parameters(st->codec)){
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avcodec_get_frame_defaults(&picture);
ret = avcodec_decode_video2(st->codec, &picture,
&got_picture, avpkt);
break;
case AVMEDIA_TYPE_AUDIO:
data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
samples = av_malloc(data_size);
if (!samples)
goto fail;
ret = avcodec_decode_audio3(st->codec, samples,
&data_size, avpkt);
av_free(samples);
break;
default:
break;
}
}
fail:
return ret;
}
unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
{
while (tags->id != CODEC_ID_NONE) {
if (tags->id == id)
return tags->tag;
tags++;
}
return 0;
}
enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
{
int i;
for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
if(tag == tags[i].tag)
return tags[i].id;
}
for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
&& toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
&& toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
&& toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
return tags[i].id;
}
return CODEC_ID_NONE;
}
unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
{
int i;
for(i=0; tags && tags[i]; i++){
int tag= ff_codec_get_tag(tags[i], id);
if(tag) return tag;
}
return 0;
}
enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
{
int i;
for(i=0; tags && tags[i]; i++){
enum CodecID id= ff_codec_get_id(tags[i], tag);
if(id!=CODEC_ID_NONE) return id;
}
return CODEC_ID_NONE;
}
static void compute_chapters_end(AVFormatContext *s)
{
unsigned int i;
for (i=0; i+1<s->nb_chapters; i++)
if (s->chapters[i]->end == AV_NOPTS_VALUE) {
assert(s->chapters[i]->start <= s->chapters[i+1]->start);
assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
s->chapters[i]->end = s->chapters[i+1]->start;
}
if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
assert(s->start_time != AV_NOPTS_VALUE);
assert(s->duration > 0);
s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
AV_TIME_BASE_Q,
s->chapters[i]->time_base);
}
}
#define MAX_STD_TIMEBASES (60*12+5)
static int get_std_framerate(int i){
if(i<60*12) return i*1001;
else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
}
/*
* Is the time base unreliable.
* This is a heuristic to balance between quick acceptance of the values in
* the headers vs. some extra checks.
* Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
* MPEG-2 commonly misuses field repeat flags to store different framerates.
* And there are "variable" fps files this needs to detect as well.
*/
static int tb_unreliable(AVCodecContext *c){
if( c->time_base.den >= 101L*c->time_base.num
|| c->time_base.den < 5L*c->time_base.num
/* || c->codec_tag == AV_RL32("DIVX")
|| c->codec_tag == AV_RL32("XVID")*/
|| c->codec_id == CODEC_ID_MPEG2VIDEO
|| c->codec_id == CODEC_ID_H264
)
return 1;
return 0;
}
int av_find_stream_info(AVFormatContext *ic)
{
int i, count, ret, read_size, j;
AVStream *st;
AVPacket pkt1, *pkt;
int64_t last_dts[MAX_STREAMS];
int64_t duration_gcd[MAX_STREAMS]={0};
int duration_count[MAX_STREAMS]={0};
double (*duration_error)[MAX_STD_TIMEBASES];
int64_t old_offset = url_ftell(ic->pb);
int64_t codec_info_duration[MAX_STREAMS]={0};
duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
if (!duration_error) return AVERROR(ENOMEM);
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (st->codec->codec_id == CODEC_ID_AAC) {
st->codec->sample_rate = 0;
st->codec->frame_size = 0;
st->codec->channels = 0;
}
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
/* if(!st->time_base.num)
st->time_base= */
if(!st->codec->time_base.num)
st->codec->time_base= st->time_base;
}
//only for the split stuff
if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codec->codec_id);
if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}
}
assert(!st->codec->codec);
//try to just open decoders, in case this is enough to get parameters
if(!has_codec_parameters(st->codec)){
AVCodec *codec = avcodec_find_decoder(st->codec->codec_id);
if (codec)
avcodec_open(st->codec, codec);
}
}
for(i=0;i<MAX_STREAMS;i++){
last_dts[i]= AV_NOPTS_VALUE;
}
count = 0;
read_size = 0;
for(;;) {
if(url_interrupt_cb()){
ret= AVERROR(EINTR);
av_log(ic, AV_LOG_DEBUG, "interrupted\n");
break;
}
/* check if one codec still needs to be handled */
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (!has_codec_parameters(st->codec))
break;
/* variable fps and no guess at the real fps */
if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
&& duration_count[i]<20 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
break;
if(st->parser && st->parser->parser->split && !st->codec->extradata)
break;
if(st->first_dts == AV_NOPTS_VALUE)
break;
}
if (i == ic->nb_streams) {
/* NOTE: if the format has no header, then we need to read
some packets to get most of the streams, so we cannot
stop here */
if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
/* if we found the info for all the codecs, we can stop */
ret = count;
av_log(ic, AV_LOG_DEBUG, "All info found\n");
break;
}
}
/* we did not get all the codec info, but we read too much data */
if (read_size >= ic->probesize) {
ret = count;
av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
break;
}
/* NOTE: a new stream can be added there if no header in file
(AVFMTCTX_NOHEADER) */
ret = av_read_frame_internal(ic, &pkt1);
if(ret == AVERROR(EAGAIN))
continue;
if (ret < 0) {
/* EOF or error */
ret = -1; /* we could not have all the codec parameters before EOF */
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (!has_codec_parameters(st->codec)){
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
} else {
ret = 0;
}
}
break;
}
pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
if(av_dup_packet(pkt) < 0) {
av_free(duration_error);
return AVERROR(ENOMEM);
}
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
if(st->codec_info_nb_frames>1) {
if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
break;
}
codec_info_duration[st->index] += pkt->duration;
}
st->codec_info_nb_frames++;
{
int index= pkt->stream_index;
int64_t last= last_dts[index];
int64_t duration= pkt->dts - last;
if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
double dur= duration * av_q2d(st->time_base);
// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
// av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
if(duration_count[index] < 2)
memset(duration_error[index], 0, sizeof(*duration_error));
for(i=1; i<MAX_STD_TIMEBASES; i++){
int framerate= get_std_framerate(i);
int ticks= lrintf(dur*framerate/(1001*12));
double error= dur - ticks*1001*12/(double)framerate;
duration_error[index][i] += error*error;
}
duration_count[index]++;
// ignore the first 4 values, they might have some random jitter
if (duration_count[index] > 3)
duration_gcd[index] = av_gcd(duration_gcd[index], duration);
}
if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
last_dts[pkt->stream_index]= pkt->dts;
}
if(st->parser && st->parser->parser->split && !st->codec->extradata){
int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
if(i){
st->codec->extradata_size= i;
st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
}
}
/* if still no information, we try to open the codec and to
decompress the frame. We try to avoid that in most cases as
it takes longer and uses more memory. For MPEG-4, we need to
decompress for QuickTime. */
if (!has_codec_parameters(st->codec))
try_decode_frame(st, pkt);
count++;
}
// close codecs which were opened in try_decode_frame()
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if(st->codec->codec)
avcodec_close(st->codec);
}
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if(st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && codec_info_duration[i])
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
(st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
codec_info_duration[i] *(int64_t)st->time_base.num, 60000);
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
// the check for tb_unreliable() is not completely correct, since this is not about handling
// a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
// ipmovie.c produces.
if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1 && !st->r_frame_rate.num)
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
if(duration_count[i] && !st->r_frame_rate.num
&& tb_unreliable(st->codec) /*&&
//FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
int num = 0;
double best_error= 2*av_q2d(st->time_base);
best_error= best_error*best_error*duration_count[i]*1000*12*30;
for(j=1; j<MAX_STD_TIMEBASES; j++){
double error= duration_error[i][j] * get_std_framerate(j);
// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
// av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
if(error < best_error){
best_error= error;
num = get_std_framerate(j);
}
}
// do not increase frame rate by more than 1 % in order to match a standard rate.
if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
}
if (!st->r_frame_rate.num){
if( st->codec->time_base.den * (int64_t)st->time_base.num
<= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
st->r_frame_rate.num = st->codec->time_base.den;
st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
}else{
st->r_frame_rate.num = st->time_base.den;
st->r_frame_rate.den = st->time_base.num;
}
}
}else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if(!st->codec->bits_per_coded_sample)
st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
}
}
av_estimate_timings(ic, old_offset);
compute_chapters_end(ic);
#if 0
/* correct DTS for B-frame streams with no timestamps */
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(b-frames){
ppktl = &ic->packet_buffer;
while(ppkt1){
if(ppkt1->stream_index != i)
continue;
if(ppkt1->pkt->dts < 0)
break;
if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
break;
ppkt1->pkt->dts -= delta;
ppkt1= ppkt1->next;
}
if(ppkt1)
continue;
st->cur_dts -= delta;
}
}
}
#endif
av_free(duration_error);
return ret;
}
/*******************************************************/
int av_read_play(AVFormatContext *s)
{
if (s->iformat->read_play)
return s->iformat->read_play(s);
if (s->pb)
return av_url_read_fpause(s->pb, 0);
return AVERROR(ENOSYS);
}
int av_read_pause(AVFormatContext *s)
{
if (s->iformat->read_pause)
return s->iformat->read_pause(s);
if (s->pb)
return av_url_read_fpause(s->pb, 1);
return AVERROR(ENOSYS);
}
void av_close_input_stream(AVFormatContext *s)
{
int i;
AVStream *st;
if (s->iformat->read_close)
s->iformat->read_close(s);
for(i=0;i<s->nb_streams;i++) {
/* free all data in a stream component */
st = s->streams[i];
if (st->parser) {
av_parser_close(st->parser);
av_free_packet(&st->cur_pkt);
}
av_metadata_free(&st->metadata);
av_free(st->index_entries);
av_free(st->codec->extradata);
av_free(st->codec);
#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(st->filename);
#endif
av_free(st->priv_data);
av_free(st);
}
for(i=s->nb_programs-1; i>=0; i--) {
#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_freep(&s->programs[i]->provider_name);
av_freep(&s->programs[i]->name);
#endif
av_metadata_free(&s->programs[i]->metadata);
av_freep(&s->programs[i]->stream_index);
av_freep(&s->programs[i]);
}
av_freep(&s->programs);
flush_packet_queue(s);
av_freep(&s->priv_data);
while(s->nb_chapters--) {
#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(s->chapters[s->nb_chapters]->title);
#endif
av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
av_free(s->chapters[s->nb_chapters]);
}
av_freep(&s->chapters);
av_metadata_free(&s->metadata);
av_free(s);
}
void av_close_input_file(AVFormatContext *s)
{
ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
av_close_input_stream(s);
if (pb)
url_fclose(pb);
}
AVStream *av_new_stream(AVFormatContext *s, int id)
{
AVStream *st;
int i;
if (s->nb_streams >= MAX_STREAMS)
return NULL;
st = av_mallocz(sizeof(AVStream));
if (!st)
return NULL;
st->codec= avcodec_alloc_context();
if (s->iformat) {
/* no default bitrate if decoding */
st->codec->bit_rate = 0;
}
st->index = s->nb_streams;
st->id = id;
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
/* we set the current DTS to 0 so that formats without any timestamps
but durations get some timestamps, formats with some unknown
timestamps have their first few packets buffered and the
timestamps corrected before they are returned to the user */
st->cur_dts = 0;
st->first_dts = AV_NOPTS_VALUE;
st->probe_packets = MAX_PROBE_PACKETS;
/* default pts setting is MPEG-like */
av_set_pts_info(st, 33, 1, 90000);
st->last_IP_pts = AV_NOPTS_VALUE;
for(i=0; i<MAX_REORDER_DELAY+1; i++)
st->pts_buffer[i]= AV_NOPTS_VALUE;
st->reference_dts = AV_NOPTS_VALUE;
st->sample_aspect_ratio = (AVRational){0,1};
s->streams[s->nb_streams++] = st;
return st;
}
AVProgram *av_new_program(AVFormatContext *ac, int id)
{
AVProgram *program=NULL;
int i;
#ifdef DEBUG_SI
av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
#endif
for(i=0; i<ac->nb_programs; i++)
if(ac->programs[i]->id == id)
program = ac->programs[i];
if(!program){
program = av_mallocz(sizeof(AVProgram));
if (!program)
return NULL;
dynarray_add(&ac->programs, &ac->nb_programs, program);
program->discard = AVDISCARD_NONE;
}
program->id = id;
return program;
}
AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
{
AVChapter *chapter = NULL;
int i;
for(i=0; i<s->nb_chapters; i++)
if(s->chapters[i]->id == id)
chapter = s->chapters[i];
if(!chapter){
chapter= av_mallocz(sizeof(AVChapter));
if(!chapter)
return NULL;
dynarray_add(&s->chapters, &s->nb_chapters, chapter);
}
#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(chapter->title);
#endif
av_metadata_set2(&chapter->metadata, "title", title, 0);
chapter->id = id;
chapter->time_base= time_base;
chapter->start = start;
chapter->end = end;
return chapter;
}
/************************************************************/
/* output media file */
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
{
int ret;
if (s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (!s->priv_data)
return AVERROR(ENOMEM);
} else
s->priv_data = NULL;
if (s->oformat->set_parameters) {
ret = s->oformat->set_parameters(s, ap);
if (ret < 0)
return ret;
}
return 0;
}
int av_write_header(AVFormatContext *s)
{
int ret, i;
AVStream *st;
// some sanity checks
if (s->nb_streams == 0) {
av_log(s, AV_LOG_ERROR, "no streams\n");
return -1;
}
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if(st->codec->sample_rate<=0){
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
return -1;
}
if(!st->codec->block_align)
st->codec->block_align = st->codec->channels *
av_get_bits_per_sample(st->codec->codec_id) >> 3;
break;
case AVMEDIA_TYPE_VIDEO:
if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
av_log(s, AV_LOG_ERROR, "time base not set\n");
return -1;
}
if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
return -1;
}
if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
return -1;
}
break;
}
if(s->oformat->codec_tag){
if(st->codec->codec_tag){
//FIXME
//check that tag + id is in the table
//if neither is in the table -> OK
//if tag is in the table with another id -> FAIL
//if id is in the table with another tag -> FAIL unless strict < ?
}else
st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
}
if(s->oformat->flags & AVFMT_GLOBALHEADER &&
!(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
}
if (!s->priv_data && s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (!s->priv_data)
return AVERROR(ENOMEM);
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
ff_metadata_mux_compat(s);
#endif
/* set muxer identification string */
if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
AVMetadata *m;
AVMetadataTag *t;
if (!(m = av_mallocz(sizeof(AVMetadata))))
return AVERROR(ENOMEM);
av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
metadata_conv(&m, s->oformat->metadata_conv, NULL);
if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
av_metadata_set2(&s->metadata, t->key, t->value, 0);
av_metadata_free(&m);
}
if(s->oformat->write_header){
ret = s->oformat->write_header(s);
if (ret < 0)
return ret;
}
/* init PTS generation */
for(i=0;i<s->nb_streams;i++) {
int64_t den = AV_NOPTS_VALUE;
st = s->streams[i];
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
den = (int64_t)st->time_base.num * st->codec->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
den = (int64_t)st->time_base.num * st->codec->time_base.den;
break;
default:
break;
}
if (den != AV_NOPTS_VALUE) {
if (den <= 0)
return AVERROR_INVALIDDATA;
av_frac_init(&st->pts, 0, 0, den);
}
}
return 0;
}
//FIXME merge with compute_pkt_fields
static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
int num, den, frame_size, i;
// av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
return -1;*/
/* duration field */
if (pkt->duration == 0) {
compute_frame_duration(&num, &den, st, NULL, pkt);
if (den && num) {
pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
}
}
if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
pkt->pts= pkt->dts;
//XXX/FIXME this is a temporary hack until all encoders output pts
if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
pkt->dts=
// pkt->pts= st->cur_dts;
pkt->pts= st->pts.val;
}
//calculate dts from pts
if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
st->pts_buffer[0]= pkt->pts;
for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
pkt->dts= st->pts_buffer[0];
}
if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
av_log(s, AV_LOG_ERROR,
"st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
st->index, st->cur_dts, pkt->dts);
return -1;
}
if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
return -1;
}
// av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
st->cur_dts= pkt->dts;
st->pts.val= pkt->dts;
/* update pts */
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
frame_size = get_audio_frame_size(st->codec, pkt->size);
/* HACK/FIXME, we skip the initial 0 size packets as they are most
likely equal to the encoder delay, but it would be better if we
had the real timestamps from the encoder */
if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
}
break;
case AVMEDIA_TYPE_VIDEO:
av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
break;
default:
break;
}
return 0;
}
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
{
int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
ret= s->oformat->write_packet(s, pkt);
if(!ret)
ret= url_ferror(s->pb);
return ret;
}
void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
{
AVPacketList **next_point, *this_pktl;
this_pktl = av_mallocz(sizeof(AVPacketList));
this_pktl->pkt= *pkt;
pkt->destruct= NULL; // do not free original but only the copy
av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
if(s->streams[pkt->stream_index]->last_in_packet_buffer){
next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
}else
next_point = &s->packet_buffer;
if(*next_point){
if(compare(s, &s->packet_buffer_end->pkt, pkt)){
while(!compare(s, &(*next_point)->pkt, pkt)){
next_point= &(*next_point)->next;
}
goto next_non_null;
}else{
next_point = &(s->packet_buffer_end->next);
}
}
assert(!*next_point);
s->packet_buffer_end= this_pktl;
next_non_null:
this_pktl->next= *next_point;
s->streams[pkt->stream_index]->last_in_packet_buffer=
*next_point= this_pktl;
}
int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
{
AVStream *st = s->streams[ pkt ->stream_index];
AVStream *st2= s->streams[ next->stream_index];
int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
}
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
AVPacketList *pktl;
int stream_count=0;
int i;
if(pkt){
ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
}
for(i=0; i < s->nb_streams; i++)
stream_count+= !!s->streams[i]->last_in_packet_buffer;
if(stream_count && (s->nb_streams == stream_count || flush)){
pktl= s->packet_buffer;
*out= pktl->pkt;
s->packet_buffer= pktl->next;
if(!s->packet_buffer)
s->packet_buffer_end= NULL;
if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
s->streams[out->stream_index]->last_in_packet_buffer= NULL;
av_freep(&pktl);
return 1;
}else{
av_init_packet(out);
return 0;
}
}
/**
* Interleaves an AVPacket correctly so it can be muxed.
* @param out the interleaved packet will be output here
* @param in the input packet
* @param flush 1 if no further packets are available as input and all
* remaining packets should be output
* @return 1 if a packet was output, 0 if no packet could be output,
* < 0 if an error occurred
*/
static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
if(s->oformat->interleave_packet)
return s->oformat->interleave_packet(s, out, in, flush);
else
return av_interleave_packet_per_dts(s, out, in, flush);
}
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
AVStream *st= s->streams[ pkt->stream_index];
//FIXME/XXX/HACK drop zero sized packets
if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
return 0;
//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return -1;
if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return -1;
for(;;){
AVPacket opkt;
int ret= av_interleave_packet(s, &opkt, pkt, 0);
if(ret<=0) //FIXME cleanup needed for ret<0 ?
return ret;
ret= s->oformat->write_packet(s, &opkt);
av_free_packet(&opkt);
pkt= NULL;
if(ret<0)
return ret;
if(url_ferror(s->pb))
return url_ferror(s->pb);
}
}
int av_write_trailer(AVFormatContext *s)
{
int ret, i;
for(;;){
AVPacket pkt;
ret= av_interleave_packet(s, &pkt, NULL, 1);
if(ret<0) //FIXME cleanup needed for ret<0 ?
goto fail;
if(!ret)
break;
ret= s->oformat->write_packet(s, &pkt);
av_free_packet(&pkt);
if(ret<0)
goto fail;
if(url_ferror(s->pb))
goto fail;
}
if(s->oformat->write_trailer)
ret = s->oformat->write_trailer(s);
fail:
if(ret == 0)
ret=url_ferror(s->pb);
for(i=0;i<s->nb_streams;i++) {
av_freep(&s->streams[i]->priv_data);
av_freep(&s->streams[i]->index_entries);
}
av_freep(&s->priv_data);
return ret;
}
void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
{
int i, j;
AVProgram *program=NULL;
void *tmp;
if (idx >= ac->nb_streams) {
av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
return;
}
for(i=0; i<ac->nb_programs; i++){
if(ac->programs[i]->id != progid)
continue;
program = ac->programs[i];
for(j=0; j<program->nb_stream_indexes; j++)
if(program->stream_index[j] == idx)
return;
tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
if(!tmp)
return;
program->stream_index = tmp;
program->stream_index[program->nb_stream_indexes++] = idx;
return;
}
}
static void print_fps(double d, const char *postfix){
uint64_t v= lrintf(d*100);
if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
}
static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
{
if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
AVMetadataTag *tag=NULL;
av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
if(strcmp("language", tag->key))
av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
}
}
}
/* "user interface" functions */
static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
{
char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
int g = av_gcd(st->time_base.num, st->time_base.den);
AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
if (lang)
av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
AVRational display_aspect_ratio;
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
st->codec->width*st->sample_aspect_ratio.num,
st->codec->height*st->sample_aspect_ratio.den,
1024*1024);
av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
display_aspect_ratio.num, display_aspect_ratio.den);
}
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
if(st->avg_frame_rate.den && st->avg_frame_rate.num)
print_fps(av_q2d(st->avg_frame_rate), "fps");
if(st->r_frame_rate.den && st->r_frame_rate.num)
print_fps(av_q2d(st->r_frame_rate), "tbr");
if(st->time_base.den && st->time_base.num)
print_fps(1/av_q2d(st->time_base), "tbn");
if(st->codec->time_base.den && st->codec->time_base.num)
print_fps(1/av_q2d(st->codec->time_base), "tbc");
}
av_log(NULL, AV_LOG_INFO, "\n");
dump_metadata(NULL, st->metadata, " ");
}
void dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output)
{
int i;
uint8_t *printed = av_mallocz(ic->nb_streams);
if (ic->nb_streams && !printed)
return;
av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
is_output ? "Output" : "Input",
index,
is_output ? ic->oformat->name : ic->iformat->name,
is_output ? "to" : "from", url);
dump_metadata(NULL, ic->metadata, " ");
if (!is_output) {
av_log(NULL, AV_LOG_INFO, " Duration: ");
if (ic->duration != AV_NOPTS_VALUE) {
int hours, mins, secs, us;
secs = ic->duration / AV_TIME_BASE;
us = ic->duration % AV_TIME_BASE;
mins = secs / 60;
secs %= 60;
hours = mins / 60;
mins %= 60;
av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
(100 * us) / AV_TIME_BASE);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}
if (ic->start_time != AV_NOPTS_VALUE) {
int secs, us;
av_log(NULL, AV_LOG_INFO, ", start: ");
secs = ic->start_time / AV_TIME_BASE;
us = ic->start_time % AV_TIME_BASE;
av_log(NULL, AV_LOG_INFO, "%d.%06d",
secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
}
av_log(NULL, AV_LOG_INFO, ", bitrate: ");
if (ic->bit_rate) {
av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}
av_log(NULL, AV_LOG_INFO, "\n");
}
for (i = 0; i < ic->nb_chapters; i++) {
AVChapter *ch = ic->chapters[i];
av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
dump_metadata(NULL, ch->metadata, " ");
}
if(ic->nb_programs) {
int j, k, total = 0;
for(j=0; j<ic->nb_programs; j++) {
AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
"name", NULL, 0);
av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
name ? name->value : "");
dump_metadata(NULL, ic->programs[j]->metadata, " ");
for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
printed[ic->programs[j]->stream_index[k]] = 1;
}
total += ic->programs[j]->nb_stream_indexes;
}
if (total < ic->nb_streams)
av_log(NULL, AV_LOG_INFO, " No Program\n");
}
for(i=0;i<ic->nb_streams;i++)
if (!printed[i])
dump_stream_format(ic, i, index, is_output);
av_free(printed);
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
{
return av_parse_video_frame_size(width_ptr, height_ptr, str);
}
int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
{
AVRational frame_rate;
int ret = av_parse_video_frame_rate(&frame_rate, arg);
*frame_rate_num= frame_rate.num;
*frame_rate_den= frame_rate.den;
return ret;
}
#endif
int64_t av_gettime(void)
{
struct timeval tv;
gettimeofday(&tv,NULL);
return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
}
uint64_t ff_ntp_time(void)
{
return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
}
int64_t parse_date(const char *datestr, int duration)
{
const char *p;
int64_t t;
struct tm dt;
int i;
static const char * const date_fmt[] = {
"%Y-%m-%d",
"%Y%m%d",
};
static const char * const time_fmt[] = {
"%H:%M:%S",
"%H%M%S",
};
const char *q;
int is_utc, len;
char lastch;
int negative = 0;
#undef time
time_t now = time(0);
len = strlen(datestr);
if (len > 0)
lastch = datestr[len - 1];
else
lastch = '\0';
is_utc = (lastch == 'z' || lastch == 'Z');
memset(&dt, 0, sizeof(dt));
p = datestr;
q = NULL;
if (!duration) {
if (!strncasecmp(datestr, "now", len))
return (int64_t) now * 1000000;
/* parse the year-month-day part */
for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
q = small_strptime(p, date_fmt[i], &dt);
if (q) {
break;
}
}
/* if the year-month-day part is missing, then take the
* current year-month-day time */
if (!q) {
if (is_utc) {
dt = *gmtime(&now);
} else {
dt = *localtime(&now);
}
dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
} else {
p = q;
}
if (*p == 'T' || *p == 't' || *p == ' ')
p++;
/* parse the hour-minute-second part */
for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
q = small_strptime(p, time_fmt[i], &dt);
if (q) {
break;
}
}
} else {
/* parse datestr as a duration */
if (p[0] == '-') {
negative = 1;
++p;
}
/* parse datestr as HH:MM:SS */
q = small_strptime(p, time_fmt[0], &dt);
if (!q) {
/* parse datestr as S+ */
dt.tm_sec = strtol(p, (char **)&q, 10);
if (q == p)
/* the parsing didn't succeed */
return INT64_MIN;
dt.tm_min = 0;
dt.tm_hour = 0;
}
}
/* Now we have all the fields that we can get */
if (!q) {
return INT64_MIN;
}
if (duration) {
t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
} else {
dt.tm_isdst = -1; /* unknown */
if (is_utc) {
t = mktimegm(&dt);
} else {
t = mktime(&dt);
}
}
t *= 1000000;
/* parse the .m... part */
if (*q == '.') {
int val, n;
q++;
for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
if (!isdigit(*q))
break;
val += n * (*q - '0');
}
t += val;
}
return negative ? -t : t;
}
int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
{
const char *p;
char tag[128], *q;
p = info;
if (*p == '?')
p++;
for(;;) {
q = tag;
while (*p != '\0' && *p != '=' && *p != '&') {
if ((q - tag) < sizeof(tag) - 1)
*q++ = *p;
p++;
}
*q = '\0';
q = arg;
if (*p == '=') {
p++;
while (*p != '&' && *p != '\0') {
if ((q - arg) < arg_size - 1) {
if (*p == '+')
*q++ = ' ';
else
*q++ = *p;
}
p++;
}
*q = '\0';
}
if (!strcmp(tag, tag1))
return 1;
if (*p != '&')
break;
p++;
}
return 0;
}
int av_get_frame_filename(char *buf, int buf_size,
const char *path, int number)
{
const char *p;
char *q, buf1[20], c;
int nd, len, percentd_found;
q = buf;
p = path;
percentd_found = 0;
for(;;) {
c = *p++;
if (c == '\0')
break;
if (c == '%') {
do {
nd = 0;
while (isdigit(*p)) {
nd = nd * 10 + *p++ - '0';
}
c = *p++;
} while (isdigit(c));
switch(c) {
case '%':
goto addchar;
case 'd':
if (percentd_found)
goto fail;
percentd_found = 1;
snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
len = strlen(buf1);
if ((q - buf + len) > buf_size - 1)
goto fail;
memcpy(q, buf1, len);
q += len;
break;
default:
goto fail;
}
} else {
addchar:
if ((q - buf) < buf_size - 1)
*q++ = c;
}
}
if (!percentd_found)
goto fail;
*q = '\0';
return 0;
fail:
*q = '\0';
return -1;
}
static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
{
int len, i, j, c;
#undef fprintf
#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
for(i=0;i<size;i+=16) {
len = size - i;
if (len > 16)
len = 16;
PRINT("%08x ", i);
for(j=0;j<16;j++) {
if (j < len)
PRINT(" %02x", buf[i+j]);
else
PRINT(" ");
}
PRINT(" ");
for(j=0;j<len;j++) {
c = buf[i+j];
if (c < ' ' || c > '~')
c = '.';
PRINT("%c", c);
}
PRINT("\n");
}
#undef PRINT
}
void av_hex_dump(FILE *f, uint8_t *buf, int size)
{
hex_dump_internal(NULL, f, 0, buf, size);
}
void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
{
hex_dump_internal(avcl, NULL, level, buf, size);
}
//FIXME needs to know the time_base
static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
{
#undef fprintf
#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
PRINT("stream #%d:\n", pkt->stream_index);
PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
/* DTS is _always_ valid after av_read_frame() */
PRINT(" dts=");
if (pkt->dts == AV_NOPTS_VALUE)
PRINT("N/A");
else
PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
/* PTS may not be known if B-frames are present. */
PRINT(" pts=");
if (pkt->pts == AV_NOPTS_VALUE)
PRINT("N/A");
else
PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
PRINT("\n");
PRINT(" size=%d\n", pkt->size);
#undef PRINT
if (dump_payload)
av_hex_dump(f, pkt->data, pkt->size);
}
void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
{
pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
}
void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
{
pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
}
void ff_url_split(char *proto, int proto_size,
char *authorization, int authorization_size,
char *hostname, int hostname_size,
int *port_ptr,
char *path, int path_size,
const char *url)
{
const char *p, *ls, *at, *col, *brk;
if (port_ptr) *port_ptr = -1;
if (proto_size > 0) proto[0] = 0;
if (authorization_size > 0) authorization[0] = 0;
if (hostname_size > 0) hostname[0] = 0;
if (path_size > 0) path[0] = 0;
/* parse protocol */
if ((p = strchr(url, ':'))) {
av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
p++; /* skip ':' */
if (*p == '/') p++;
if (*p == '/') p++;
} else {
/* no protocol means plain filename */
av_strlcpy(path, url, path_size);
return;
}
/* separate path from hostname */
ls = strchr(p, '/');
if(!ls)
ls = strchr(p, '?');
if(ls)
av_strlcpy(path, ls, path_size);
else
ls = &p[strlen(p)]; // XXX
/* the rest is hostname, use that to parse auth/port */
if (ls != p) {
/* authorization (user[:pass]@hostname) */
if ((at = strchr(p, '@')) && at < ls) {
av_strlcpy(authorization, p,
FFMIN(authorization_size, at + 1 - p));
p = at + 1; /* skip '@' */
}
if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
/* [host]:port */
av_strlcpy(hostname, p + 1,
FFMIN(hostname_size, brk - p));
if (brk[1] == ':' && port_ptr)
*port_ptr = atoi(brk + 2);
} else if ((col = strchr(p, ':')) && col < ls) {
av_strlcpy(hostname, p,
FFMIN(col + 1 - p, hostname_size));
if (port_ptr) *port_ptr = atoi(col + 1);
} else
av_strlcpy(hostname, p,
FFMIN(ls + 1 - p, hostname_size));
}
}
char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
{
int i;
static const char hex_table_uc[16] = { '0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', 'A', 'B',
'C', 'D', 'E', 'F' };
static const char hex_table_lc[16] = { '0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', 'a', 'b',
'c', 'd', 'e', 'f' };
const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
for(i = 0; i < s; i++) {
buff[i * 2] = hex_table[src[i] >> 4];
buff[i * 2 + 1] = hex_table[src[i] & 0xF];
}
return buff;
}
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
unsigned int pts_num, unsigned int pts_den)
{
s->pts_wrap_bits = pts_wrap_bits;
if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
if(s->time_base.num != pts_num)
av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
}else
av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
if(!s->time_base.num || !s->time_base.den)
s->time_base.num= s->time_base.den= 0;
}
int ff_url_join(char *str, int size, const char *proto,
const char *authorization, const char *hostname,
int port, const char *fmt, ...)
{
#if CONFIG_NETWORK
struct addrinfo hints, *ai;
#endif
str[0] = '\0';
if (proto)
av_strlcatf(str, size, "%s://", proto);
if (authorization)
av_strlcatf(str, size, "%s@", authorization);
#if CONFIG_NETWORK && defined(AF_INET6)
/* Determine if hostname is a numerical IPv6 address,
* properly escape it within [] in that case. */
memset(&hints, 0, sizeof(hints));
hints.ai_flags = AI_NUMERICHOST;
if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
if (ai->ai_family == AF_INET6) {
av_strlcat(str, "[", size);
av_strlcat(str, hostname, size);
av_strlcat(str, "]", size);
} else {
av_strlcat(str, hostname, size);
}
freeaddrinfo(ai);
} else
#endif
/* Not an IPv6 address, just output the plain string. */
av_strlcat(str, hostname, size);
if (port >= 0)
av_strlcatf(str, size, ":%d", port);
if (fmt) {
va_list vl;
int len = strlen(str);
va_start(vl, fmt);
vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
va_end(vl);
}
return strlen(str);
}
| 123linslouis-android-video-cutter | jni/libavformat/utils.c | C | asf20 | 115,521 |
/*
* Register all the formats and protocols
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "rtp.h"
#include "rdt.h"
#define REGISTER_MUXER(X,x) { \
extern AVOutputFormat x##_muxer; \
if(CONFIG_##X##_MUXER) av_register_output_format(&x##_muxer); }
#define REGISTER_DEMUXER(X,x) { \
extern AVInputFormat x##_demuxer; \
if(CONFIG_##X##_DEMUXER) av_register_input_format(&x##_demuxer); }
#define REGISTER_MUXDEMUX(X,x) REGISTER_MUXER(X,x); REGISTER_DEMUXER(X,x)
#define REGISTER_PROTOCOL(X,x) { \
extern URLProtocol x##_protocol; \
if(CONFIG_##X##_PROTOCOL) av_register_protocol(&x##_protocol); }
void av_register_all(void)
{
static int initialized;
if (initialized)
return;
initialized = 1;
avcodec_register_all();
/* (de)muxers */
REGISTER_DEMUXER (AAC, aac);
REGISTER_MUXDEMUX (AC3, ac3);
REGISTER_MUXER (ADTS, adts);
REGISTER_DEMUXER (AEA, aea);
REGISTER_MUXDEMUX (AIFF, aiff);
REGISTER_MUXDEMUX (AMR, amr);
REGISTER_DEMUXER (ANM, anm);
REGISTER_DEMUXER (APC, apc);
REGISTER_DEMUXER (APE, ape);
REGISTER_MUXDEMUX (ASF, asf);
REGISTER_MUXDEMUX (ASS, ass);
REGISTER_MUXER (ASF_STREAM, asf_stream);
REGISTER_MUXDEMUX (AU, au);
REGISTER_MUXDEMUX (AVI, avi);
REGISTER_DEMUXER (AVISYNTH, avisynth);
REGISTER_MUXER (AVM2, avm2);
REGISTER_DEMUXER (AVS, avs);
REGISTER_DEMUXER (BETHSOFTVID, bethsoftvid);
REGISTER_DEMUXER (BFI, bfi);
REGISTER_DEMUXER (BINK, bink);
REGISTER_DEMUXER (C93, c93);
REGISTER_DEMUXER (CAF, caf);
REGISTER_DEMUXER (CAVSVIDEO, cavsvideo);
REGISTER_DEMUXER (CDG, cdg);
REGISTER_MUXER (CRC, crc);
REGISTER_MUXDEMUX (DAUD, daud);
REGISTER_MUXDEMUX (DIRAC, dirac);
REGISTER_MUXDEMUX (DNXHD, dnxhd);
REGISTER_DEMUXER (DSICIN, dsicin);
REGISTER_MUXDEMUX (DTS, dts);
REGISTER_MUXDEMUX (DV, dv);
REGISTER_DEMUXER (DXA, dxa);
REGISTER_DEMUXER (EA, ea);
REGISTER_DEMUXER (EA_CDATA, ea_cdata);
REGISTER_MUXDEMUX (EAC3, eac3);
REGISTER_MUXDEMUX (FFM, ffm);
REGISTER_MUXDEMUX (FILMSTRIP, filmstrip);
REGISTER_MUXDEMUX (FLAC, flac);
REGISTER_DEMUXER (FLIC, flic);
REGISTER_MUXDEMUX (FLV, flv);
REGISTER_DEMUXER (FOURXM, fourxm);
REGISTER_MUXER (FRAMECRC, framecrc);
REGISTER_MUXER (GIF, gif);
REGISTER_DEMUXER (GSM, gsm);
REGISTER_MUXDEMUX (GXF, gxf);
REGISTER_MUXDEMUX (H261, h261);
REGISTER_MUXDEMUX (H263, h263);
REGISTER_MUXDEMUX (H264, h264);
REGISTER_DEMUXER (IDCIN, idcin);
REGISTER_DEMUXER (IFF, iff);
REGISTER_MUXDEMUX (IMAGE2, image2);
REGISTER_MUXDEMUX (IMAGE2PIPE, image2pipe);
REGISTER_DEMUXER (INGENIENT, ingenient);
REGISTER_DEMUXER (IPMOVIE, ipmovie);
REGISTER_MUXER (IPOD, ipod);
REGISTER_DEMUXER (ISS, iss);
REGISTER_DEMUXER (IV8, iv8);
REGISTER_DEMUXER (LMLM4, lmlm4);
REGISTER_MUXDEMUX (M4V, m4v);
REGISTER_MUXDEMUX (MATROSKA, matroska);
REGISTER_MUXER (MATROSKA_AUDIO, matroska_audio);
REGISTER_MUXDEMUX (MJPEG, mjpeg);
REGISTER_MUXDEMUX (MLP, mlp);
REGISTER_DEMUXER (MM, mm);
REGISTER_MUXDEMUX (MMF, mmf);
REGISTER_MUXDEMUX (MOV, mov);
REGISTER_MUXER (MP2, mp2);
REGISTER_MUXDEMUX (MP3, mp3);
REGISTER_MUXER (MP4, mp4);
REGISTER_DEMUXER (MPC, mpc);
REGISTER_DEMUXER (MPC8, mpc8);
REGISTER_MUXER (MPEG1SYSTEM, mpeg1system);
REGISTER_MUXER (MPEG1VCD, mpeg1vcd);
REGISTER_MUXER (MPEG1VIDEO, mpeg1video);
REGISTER_MUXER (MPEG2DVD, mpeg2dvd);
REGISTER_MUXER (MPEG2SVCD, mpeg2svcd);
REGISTER_MUXER (MPEG2VIDEO, mpeg2video);
REGISTER_MUXER (MPEG2VOB, mpeg2vob);
REGISTER_DEMUXER (MPEGPS, mpegps);
REGISTER_MUXDEMUX (MPEGTS, mpegts);
REGISTER_DEMUXER (MPEGTSRAW, mpegtsraw);
REGISTER_DEMUXER (MPEGVIDEO, mpegvideo);
REGISTER_MUXER (MPJPEG, mpjpeg);
REGISTER_DEMUXER (MSNWC_TCP, msnwc_tcp);
REGISTER_DEMUXER (MTV, mtv);
REGISTER_DEMUXER (MVI, mvi);
REGISTER_MUXDEMUX (MXF, mxf);
REGISTER_MUXER (MXF_D10, mxf_d10);
REGISTER_DEMUXER (NC, nc);
REGISTER_DEMUXER (NSV, nsv);
REGISTER_MUXER (NULL, null);
REGISTER_MUXDEMUX (NUT, nut);
REGISTER_DEMUXER (NUV, nuv);
REGISTER_MUXDEMUX (OGG, ogg);
REGISTER_DEMUXER (OMA, oma);
REGISTER_MUXDEMUX (PCM_ALAW, pcm_alaw);
REGISTER_MUXDEMUX (PCM_MULAW, pcm_mulaw);
REGISTER_MUXDEMUX (PCM_F64BE, pcm_f64be);
REGISTER_MUXDEMUX (PCM_F64LE, pcm_f64le);
REGISTER_MUXDEMUX (PCM_F32BE, pcm_f32be);
REGISTER_MUXDEMUX (PCM_F32LE, pcm_f32le);
REGISTER_MUXDEMUX (PCM_S32BE, pcm_s32be);
REGISTER_MUXDEMUX (PCM_S32LE, pcm_s32le);
REGISTER_MUXDEMUX (PCM_S24BE, pcm_s24be);
REGISTER_MUXDEMUX (PCM_S24LE, pcm_s24le);
REGISTER_MUXDEMUX (PCM_S16BE, pcm_s16be);
REGISTER_MUXDEMUX (PCM_S16LE, pcm_s16le);
REGISTER_MUXDEMUX (PCM_S8, pcm_s8);
REGISTER_MUXDEMUX (PCM_U32BE, pcm_u32be);
REGISTER_MUXDEMUX (PCM_U32LE, pcm_u32le);
REGISTER_MUXDEMUX (PCM_U24BE, pcm_u24be);
REGISTER_MUXDEMUX (PCM_U24LE, pcm_u24le);
REGISTER_MUXDEMUX (PCM_U16BE, pcm_u16be);
REGISTER_MUXDEMUX (PCM_U16LE, pcm_u16le);
REGISTER_MUXDEMUX (PCM_U8, pcm_u8);
REGISTER_MUXER (PSP, psp);
REGISTER_DEMUXER (PVA, pva);
REGISTER_DEMUXER (QCP, qcp);
REGISTER_DEMUXER (R3D, r3d);
REGISTER_MUXDEMUX (RAWVIDEO, rawvideo);
REGISTER_DEMUXER (RL2, rl2);
REGISTER_MUXDEMUX (RM, rm);
REGISTER_MUXDEMUX (ROQ, roq);
REGISTER_DEMUXER (RPL, rpl);
REGISTER_MUXER (RTP, rtp);
REGISTER_MUXDEMUX (RTSP, rtsp);
REGISTER_DEMUXER (SDP, sdp);
#if CONFIG_SDP_DEMUXER
av_register_rtp_dynamic_payload_handlers();
av_register_rdt_dynamic_payload_handlers();
#endif
REGISTER_DEMUXER (SEGAFILM, segafilm);
REGISTER_DEMUXER (SHORTEN, shorten);
REGISTER_DEMUXER (SIFF, siff);
REGISTER_DEMUXER (SMACKER, smacker);
REGISTER_DEMUXER (SOL, sol);
REGISTER_MUXDEMUX (SOX, sox);
REGISTER_MUXER (SPDIF, spdif);
REGISTER_DEMUXER (STR, str);
REGISTER_MUXDEMUX (SWF, swf);
REGISTER_MUXER (TG2, tg2);
REGISTER_MUXER (TGP, tgp);
REGISTER_DEMUXER (THP, thp);
REGISTER_DEMUXER (TIERTEXSEQ, tiertexseq);
REGISTER_DEMUXER (TMV, tmv);
REGISTER_MUXDEMUX (TRUEHD, truehd);
REGISTER_DEMUXER (TTA, tta);
REGISTER_DEMUXER (TXD, txd);
REGISTER_DEMUXER (VC1, vc1);
REGISTER_MUXDEMUX (VC1T, vc1t);
REGISTER_DEMUXER (VMD, vmd);
REGISTER_MUXDEMUX (VOC, voc);
REGISTER_DEMUXER (VQF, vqf);
REGISTER_DEMUXER (W64, w64);
REGISTER_MUXDEMUX (WAV, wav);
REGISTER_DEMUXER (WC3, wc3);
REGISTER_MUXER (WEBM, webm);
REGISTER_DEMUXER (WSAUD, wsaud);
REGISTER_DEMUXER (WSVQA, wsvqa);
REGISTER_DEMUXER (WV, wv);
REGISTER_DEMUXER (XA, xa);
REGISTER_DEMUXER (YOP, yop);
REGISTER_MUXDEMUX (YUV4MPEGPIPE, yuv4mpegpipe);
/* external libraries */
REGISTER_MUXDEMUX (LIBNUT, libnut);
/* protocols */
REGISTER_PROTOCOL (FILE, file);
REGISTER_PROTOCOL (GOPHER, gopher);
REGISTER_PROTOCOL (HTTP, http);
REGISTER_PROTOCOL (PIPE, pipe);
REGISTER_PROTOCOL (RTMP, rtmp);
#if CONFIG_LIBRTMP
REGISTER_PROTOCOL (RTMP, rtmpt);
REGISTER_PROTOCOL (RTMP, rtmpe);
REGISTER_PROTOCOL (RTMP, rtmpte);
REGISTER_PROTOCOL (RTMP, rtmps);
#endif
REGISTER_PROTOCOL (RTP, rtp);
REGISTER_PROTOCOL (TCP, tcp);
REGISTER_PROTOCOL (UDP, udp);
REGISTER_PROTOCOL (CONCAT, concat);
}
| 123linslouis-android-video-cutter | jni/libavformat/allformats.c | C | asf20 | 8,389 |
/*
* Interplay MVE File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Interplay MVE file demuxer
* by Mike Melanson (melanson@pcisys.net)
* For more information regarding the Interplay MVE file format, visit:
* http://www.pcisys.net/~melanson/codecs/
* The aforementioned site also contains a command line utility for parsing
* IP MVE files so that you can get a good idea of the typical structure of
* such files. This demuxer is not the best example to use if you are trying
* to write your own as it uses a rather roundabout approach for splitting
* up and sending out the chunks.
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
/* debugging support: #define DEBUG_IPMOVIE as non-zero to see extremely
* verbose information about the demux process */
#define DEBUG_IPMOVIE 0
#if DEBUG_IPMOVIE
#undef printf
#define debug_ipmovie printf
#else
static inline void debug_ipmovie(const char *format, ...) { }
#endif
#define CHUNK_PREAMBLE_SIZE 4
#define OPCODE_PREAMBLE_SIZE 4
#define CHUNK_INIT_AUDIO 0x0000
#define CHUNK_AUDIO_ONLY 0x0001
#define CHUNK_INIT_VIDEO 0x0002
#define CHUNK_VIDEO 0x0003
#define CHUNK_SHUTDOWN 0x0004
#define CHUNK_END 0x0005
/* these last types are used internally */
#define CHUNK_DONE 0xFFFC
#define CHUNK_NOMEM 0xFFFD
#define CHUNK_EOF 0xFFFE
#define CHUNK_BAD 0xFFFF
#define OPCODE_END_OF_STREAM 0x00
#define OPCODE_END_OF_CHUNK 0x01
#define OPCODE_CREATE_TIMER 0x02
#define OPCODE_INIT_AUDIO_BUFFERS 0x03
#define OPCODE_START_STOP_AUDIO 0x04
#define OPCODE_INIT_VIDEO_BUFFERS 0x05
#define OPCODE_UNKNOWN_06 0x06
#define OPCODE_SEND_BUFFER 0x07
#define OPCODE_AUDIO_FRAME 0x08
#define OPCODE_SILENCE_FRAME 0x09
#define OPCODE_INIT_VIDEO_MODE 0x0A
#define OPCODE_CREATE_GRADIENT 0x0B
#define OPCODE_SET_PALETTE 0x0C
#define OPCODE_SET_PALETTE_COMPRESSED 0x0D
#define OPCODE_UNKNOWN_0E 0x0E
#define OPCODE_SET_DECODING_MAP 0x0F
#define OPCODE_UNKNOWN_10 0x10
#define OPCODE_VIDEO_DATA 0x11
#define OPCODE_UNKNOWN_12 0x12
#define OPCODE_UNKNOWN_13 0x13
#define OPCODE_UNKNOWN_14 0x14
#define OPCODE_UNKNOWN_15 0x15
#define PALETTE_COUNT 256
typedef struct IPMVEContext {
unsigned char *buf;
int buf_size;
uint64_t frame_pts_inc;
unsigned int video_bpp;
unsigned int video_width;
unsigned int video_height;
int64_t video_pts;
unsigned int audio_bits;
unsigned int audio_channels;
unsigned int audio_sample_rate;
enum CodecID audio_type;
unsigned int audio_frame_count;
int video_stream_index;
int audio_stream_index;
int64_t audio_chunk_offset;
int audio_chunk_size;
int64_t video_chunk_offset;
int video_chunk_size;
int64_t decode_map_chunk_offset;
int decode_map_chunk_size;
int64_t next_chunk_offset;
AVPaletteControl palette_control;
} IPMVEContext;
static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
AVPacket *pkt) {
int chunk_type;
if (s->audio_chunk_offset) {
/* adjust for PCM audio by skipping chunk header */
if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) {
s->audio_chunk_offset += 6;
s->audio_chunk_size -= 6;
}
url_fseek(pb, s->audio_chunk_offset, SEEK_SET);
s->audio_chunk_offset = 0;
if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size))
return CHUNK_EOF;
pkt->stream_index = s->audio_stream_index;
pkt->pts = s->audio_frame_count;
/* audio frame maintenance */
if (s->audio_type != CODEC_ID_INTERPLAY_DPCM)
s->audio_frame_count +=
(s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8));
else
s->audio_frame_count +=
(s->audio_chunk_size - 6) / s->audio_channels;
debug_ipmovie("sending audio frame with pts %"PRId64" (%d audio frames)\n",
pkt->pts, s->audio_frame_count);
chunk_type = CHUNK_VIDEO;
} else if (s->decode_map_chunk_offset) {
/* send both the decode map and the video data together */
if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
return CHUNK_NOMEM;
pkt->pos= s->decode_map_chunk_offset;
url_fseek(pb, s->decode_map_chunk_offset, SEEK_SET);
s->decode_map_chunk_offset = 0;
if (get_buffer(pb, pkt->data, s->decode_map_chunk_size) !=
s->decode_map_chunk_size) {
av_free_packet(pkt);
return CHUNK_EOF;
}
url_fseek(pb, s->video_chunk_offset, SEEK_SET);
s->video_chunk_offset = 0;
if (get_buffer(pb, pkt->data + s->decode_map_chunk_size,
s->video_chunk_size) != s->video_chunk_size) {
av_free_packet(pkt);
return CHUNK_EOF;
}
pkt->stream_index = s->video_stream_index;
pkt->pts = s->video_pts;
debug_ipmovie("sending video frame with pts %"PRId64"\n",
pkt->pts);
s->video_pts += s->frame_pts_inc;
chunk_type = CHUNK_VIDEO;
} else {
url_fseek(pb, s->next_chunk_offset, SEEK_SET);
chunk_type = CHUNK_DONE;
}
return chunk_type;
}
/* This function loads and processes a single chunk in an IP movie file.
* It returns the type of chunk that was processed. */
static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
AVPacket *pkt)
{
unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
int chunk_type;
int chunk_size;
unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
unsigned char opcode_type;
unsigned char opcode_version;
int opcode_size;
unsigned char scratch[1024];
int i, j;
int first_color, last_color;
int audio_flags;
unsigned char r, g, b;
/* see if there are any pending packets */
chunk_type = load_ipmovie_packet(s, pb, pkt);
if (chunk_type != CHUNK_DONE)
return chunk_type;
/* read the next chunk, wherever the file happens to be pointing */
if (url_feof(pb))
return CHUNK_EOF;
if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return CHUNK_BAD;
chunk_size = AV_RL16(&chunk_preamble[0]);
chunk_type = AV_RL16(&chunk_preamble[2]);
debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);
switch (chunk_type) {
case CHUNK_INIT_AUDIO:
debug_ipmovie("initialize audio\n");
break;
case CHUNK_AUDIO_ONLY:
debug_ipmovie("audio only\n");
break;
case CHUNK_INIT_VIDEO:
debug_ipmovie("initialize video\n");
break;
case CHUNK_VIDEO:
debug_ipmovie("video (and audio)\n");
break;
case CHUNK_SHUTDOWN:
debug_ipmovie("shutdown\n");
break;
case CHUNK_END:
debug_ipmovie("end\n");
break;
default:
debug_ipmovie("invalid chunk\n");
chunk_type = CHUNK_BAD;
break;
}
while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {
/* read the next chunk, wherever the file happens to be pointing */
if (url_feof(pb)) {
chunk_type = CHUNK_EOF;
break;
}
if (get_buffer(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE) {
chunk_type = CHUNK_BAD;
break;
}
opcode_size = AV_RL16(&opcode_preamble[0]);
opcode_type = opcode_preamble[2];
opcode_version = opcode_preamble[3];
chunk_size -= OPCODE_PREAMBLE_SIZE;
chunk_size -= opcode_size;
if (chunk_size < 0) {
debug_ipmovie("chunk_size countdown just went negative\n");
chunk_type = CHUNK_BAD;
break;
}
debug_ipmovie(" opcode type %02X, version %d, 0x%04X bytes: ",
opcode_type, opcode_version, opcode_size);
switch (opcode_type) {
case OPCODE_END_OF_STREAM:
debug_ipmovie("end of stream\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_END_OF_CHUNK:
debug_ipmovie("end of chunk\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_CREATE_TIMER:
debug_ipmovie("create timer\n");
if ((opcode_version > 0) || (opcode_size > 6)) {
debug_ipmovie("bad create_timer opcode\n");
chunk_type = CHUNK_BAD;
break;
}
if (get_buffer(pb, scratch, opcode_size) !=
opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]);
debug_ipmovie(" %.2f frames/second (timer div = %d, subdiv = %d)\n",
1000000.0/s->frame_pts_inc, AV_RL32(&scratch[0]), AV_RL16(&scratch[4]));
break;
case OPCODE_INIT_AUDIO_BUFFERS:
debug_ipmovie("initialize audio buffers\n");
if ((opcode_version > 1) || (opcode_size > 10)) {
debug_ipmovie("bad init_audio_buffers opcode\n");
chunk_type = CHUNK_BAD;
break;
}
if (get_buffer(pb, scratch, opcode_size) !=
opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
s->audio_sample_rate = AV_RL16(&scratch[4]);
audio_flags = AV_RL16(&scratch[2]);
/* bit 0 of the flags: 0 = mono, 1 = stereo */
s->audio_channels = (audio_flags & 1) + 1;
/* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
/* bit 2 indicates compressed audio in version 1 opcode */
if ((opcode_version == 1) && (audio_flags & 0x4))
s->audio_type = CODEC_ID_INTERPLAY_DPCM;
else if (s->audio_bits == 16)
s->audio_type = CODEC_ID_PCM_S16LE;
else
s->audio_type = CODEC_ID_PCM_U8;
debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n",
s->audio_bits,
s->audio_sample_rate,
(s->audio_channels == 2) ? "stereo" : "mono",
(s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
"Interplay audio" : "PCM");
break;
case OPCODE_START_STOP_AUDIO:
debug_ipmovie("start/stop audio\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_INIT_VIDEO_BUFFERS:
debug_ipmovie("initialize video buffers\n");
if ((opcode_version > 2) || (opcode_size > 8)) {
debug_ipmovie("bad init_video_buffers opcode\n");
chunk_type = CHUNK_BAD;
break;
}
if (get_buffer(pb, scratch, opcode_size) !=
opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
s->video_width = AV_RL16(&scratch[0]) * 8;
s->video_height = AV_RL16(&scratch[2]) * 8;
if (opcode_version < 2 || !AV_RL16(&scratch[6])) {
s->video_bpp = 8;
} else {
s->video_bpp = 16;
}
debug_ipmovie("video resolution: %d x %d\n",
s->video_width, s->video_height);
break;
case OPCODE_UNKNOWN_06:
case OPCODE_UNKNOWN_0E:
case OPCODE_UNKNOWN_10:
case OPCODE_UNKNOWN_12:
case OPCODE_UNKNOWN_13:
case OPCODE_UNKNOWN_14:
case OPCODE_UNKNOWN_15:
debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type);
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_SEND_BUFFER:
debug_ipmovie("send buffer\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_AUDIO_FRAME:
debug_ipmovie("audio frame\n");
/* log position and move on for now */
s->audio_chunk_offset = url_ftell(pb);
s->audio_chunk_size = opcode_size;
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_SILENCE_FRAME:
debug_ipmovie("silence frame\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_INIT_VIDEO_MODE:
debug_ipmovie("initialize video mode\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_CREATE_GRADIENT:
debug_ipmovie("create gradient\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_SET_PALETTE:
debug_ipmovie("set palette\n");
/* check for the logical maximum palette size
* (3 * 256 + 4 bytes) */
if (opcode_size > 0x304) {
debug_ipmovie("demux_ipmovie: set_palette opcode too large\n");
chunk_type = CHUNK_BAD;
break;
}
if (get_buffer(pb, scratch, opcode_size) != opcode_size) {
chunk_type = CHUNK_BAD;
break;
}
/* load the palette into internal data structure */
first_color = AV_RL16(&scratch[0]);
last_color = first_color + AV_RL16(&scratch[2]) - 1;
/* sanity check (since they are 16 bit values) */
if ((first_color > 0xFF) || (last_color > 0xFF)) {
debug_ipmovie("demux_ipmovie: set_palette indexes out of range (%d -> %d)\n",
first_color, last_color);
chunk_type = CHUNK_BAD;
break;
}
j = 4; /* offset of first palette data */
for (i = first_color; i <= last_color; i++) {
/* the palette is stored as a 6-bit VGA palette, thus each
* component is shifted up to a 8-bit range */
r = scratch[j++] * 4;
g = scratch[j++] * 4;
b = scratch[j++] * 4;
s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
}
/* indicate a palette change */
s->palette_control.palette_changed = 1;
break;
case OPCODE_SET_PALETTE_COMPRESSED:
debug_ipmovie("set palette compressed\n");
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_SET_DECODING_MAP:
debug_ipmovie("set decoding map\n");
/* log position and move on for now */
s->decode_map_chunk_offset = url_ftell(pb);
s->decode_map_chunk_size = opcode_size;
url_fseek(pb, opcode_size, SEEK_CUR);
break;
case OPCODE_VIDEO_DATA:
debug_ipmovie("set video data\n");
/* log position and move on for now */
s->video_chunk_offset = url_ftell(pb);
s->video_chunk_size = opcode_size;
url_fseek(pb, opcode_size, SEEK_CUR);
break;
default:
debug_ipmovie("*** unknown opcode type\n");
chunk_type = CHUNK_BAD;
break;
}
}
/* make a note of where the stream is sitting */
s->next_chunk_offset = url_ftell(pb);
/* dispatch the first of any pending packets */
if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
chunk_type = load_ipmovie_packet(s, pb, pkt);
return chunk_type;
}
static const char signature[] = "Interplay MVE File\x1A\0\x1A";
static int ipmovie_probe(AVProbeData *p)
{
uint8_t *b = p->buf;
uint8_t *b_end = p->buf + p->buf_size - sizeof(signature);
do {
if (memcmp(b++, signature, sizeof(signature)) == 0)
return AVPROBE_SCORE_MAX;
} while (b < b_end);
return 0;
}
static int ipmovie_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
IPMVEContext *ipmovie = s->priv_data;
ByteIOContext *pb = s->pb;
AVPacket pkt;
AVStream *st;
unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
int chunk_type;
uint8_t signature_buffer[sizeof(signature)];
get_buffer(pb, signature_buffer, sizeof(signature_buffer));
while (memcmp(signature_buffer, signature, sizeof(signature))) {
memmove(signature_buffer, signature_buffer + 1, sizeof(signature_buffer) - 1);
signature_buffer[sizeof(signature_buffer) - 1] = get_byte(pb);
if (url_feof(pb))
return AVERROR_EOF;
}
/* initialize private context members */
ipmovie->video_pts = ipmovie->audio_frame_count = 0;
ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
ipmovie->decode_map_chunk_offset = 0;
/* on the first read, this will position the stream at the first chunk */
ipmovie->next_chunk_offset = url_ftell(pb) + 4;
/* process the first chunk which should be CHUNK_INIT_VIDEO */
if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
return AVERROR_INVALIDDATA;
/* peek ahead to the next chunk-- if it is an init audio chunk, process
* it; if it is the first video chunk, this is a silent file */
if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_type = AV_RL16(&chunk_preamble[2]);
url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);
if (chunk_type == CHUNK_VIDEO)
ipmovie->audio_type = CODEC_ID_NONE; /* no audio */
else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
return AVERROR_INVALIDDATA;
/* initialize the stream decoders */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 63, 1, 1000000);
ipmovie->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_INTERPLAY_VIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = ipmovie->video_width;
st->codec->height = ipmovie->video_height;
st->codec->bits_per_coded_sample = ipmovie->video_bpp;
/* palette considerations */
st->codec->palctrl = &ipmovie->palette_control;
if (ipmovie->audio_type) {
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 32, 1, ipmovie->audio_sample_rate);
ipmovie->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ipmovie->audio_type;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = ipmovie->audio_channels;
st->codec->sample_rate = ipmovie->audio_sample_rate;
st->codec->bits_per_coded_sample = ipmovie->audio_bits;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
if (st->codec->codec_id == CODEC_ID_INTERPLAY_DPCM)
st->codec->bit_rate /= 2;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
}
return 0;
}
static int ipmovie_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
IPMVEContext *ipmovie = s->priv_data;
ByteIOContext *pb = s->pb;
int ret;
ret = process_ipmovie_chunk(ipmovie, pb, pkt);
if (ret == CHUNK_BAD)
ret = AVERROR_INVALIDDATA;
else if (ret == CHUNK_EOF)
ret = AVERROR(EIO);
else if (ret == CHUNK_NOMEM)
ret = AVERROR(ENOMEM);
else if (ret == CHUNK_VIDEO)
ret = 0;
else
ret = -1;
return ret;
}
AVInputFormat ipmovie_demuxer = {
"ipmovie",
NULL_IF_CONFIG_SMALL("Interplay MVE format"),
sizeof(IPMVEContext),
ipmovie_probe,
ipmovie_read_header,
ipmovie_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/ipmovie.c | C | asf20 | 20,951 |
/*
* Xiph RTP Protocols
* Copyright (c) 2009 Colin McQuillian
* Copyright (c) 2010 Josh Allmann
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @brief Xiph / RTP Code
* @author Colin McQuillan <m.niloc@gmail.com>
* @author Josh Allmann <joshua.allmann@gmail.com>
*/
#include "libavutil/avstring.h"
#include "libavutil/base64.h"
#include "libavcodec/bytestream.h"
#include <assert.h>
#include "rtpdec.h"
#include "rtpdec_xiph.h"
/**
* RTP/Xiph specific private data.
*/
struct PayloadContext {
unsigned ident; ///< 24-bit stream configuration identifier
uint32_t timestamp;
ByteIOContext* fragment; ///< buffer for split payloads
};
static PayloadContext *xiph_new_context(void)
{
return av_mallocz(sizeof(PayloadContext));
}
static inline void free_fragment_if_needed(PayloadContext * data)
{
if (data->fragment) {
uint8_t* p;
url_close_dyn_buf(data->fragment, &p);
av_free(p);
data->fragment = NULL;
}
}
static void xiph_free_context(PayloadContext * data)
{
free_fragment_if_needed(data);
av_free(data);
}
static int xiph_handle_packet(AVFormatContext * ctx,
PayloadContext * data,
AVStream * st,
AVPacket * pkt,
uint32_t * timestamp,
const uint8_t * buf, int len, int flags)
{
int ident, fragmented, tdt, num_pkts, pkt_len;
if (len < 6) {
av_log(ctx, AV_LOG_ERROR, "Invalid %d byte packet\n", len);
return AVERROR_INVALIDDATA;
}
// read xiph rtp headers
ident = AV_RB24(buf);
fragmented = buf[3] >> 6;
tdt = (buf[3] >> 4) & 3;
num_pkts = buf[3] & 7;
pkt_len = AV_RB16(buf + 4);
if (pkt_len > len - 6) {
av_log(ctx, AV_LOG_ERROR,
"Invalid packet length %d in %d byte packet\n", pkt_len,
len);
return AVERROR_INVALIDDATA;
}
if (ident != data->ident) {
av_log(ctx, AV_LOG_ERROR,
"Unimplemented Xiph SDP configuration change detected\n");
return AVERROR_PATCHWELCOME;
}
if (tdt) {
av_log(ctx, AV_LOG_ERROR,
"Unimplemented RTP Xiph packet settings (%d,%d,%d)\n",
fragmented, tdt, num_pkts);
return AVERROR_PATCHWELCOME;
}
buf += 6; // move past header bits
len -= 6;
if (fragmented == 0) {
// whole frame(s)
int i, data_len, write_len;
buf -= 2;
len += 2;
// fast first pass to calculate total length
for (i = 0, data_len = 0; (i < num_pkts) && (len >= 2); i++) {
int off = data_len + (i << 1);
pkt_len = AV_RB16(buf + off);
data_len += pkt_len;
len -= pkt_len + 2;
}
if (len < 0 || i < num_pkts) {
av_log(ctx, AV_LOG_ERROR,
"Bad packet: %d bytes left at frame %d of %d\n",
len, i, num_pkts);
return AVERROR_INVALIDDATA;
}
if (av_new_packet(pkt, data_len)) {
av_log(ctx, AV_LOG_ERROR, "Out of memory.\n");
return AVERROR(ENOMEM);
}
pkt->stream_index = st->index;
// concatenate frames
for (i = 0, write_len = 0; write_len < data_len; i++) {
pkt_len = AV_RB16(buf);
buf += 2;
memcpy(pkt->data + write_len, buf, pkt_len);
write_len += pkt_len;
buf += pkt_len;
}
assert(write_len == data_len);
return 0;
} else if (fragmented == 1) {
// start of xiph data fragment
int res;
// end packet has been lost somewhere, so drop buffered data
free_fragment_if_needed(data);
if((res = url_open_dyn_buf(&data->fragment)) < 0)
return res;
put_buffer(data->fragment, buf, pkt_len);
data->timestamp = *timestamp;
} else {
assert(fragmented < 4);
if (data->timestamp != *timestamp) {
// skip if fragmented timestamp is incorrect;
// a start packet has been lost somewhere
free_fragment_if_needed(data);
av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match!\n");
return AVERROR_INVALIDDATA;
}
// copy data to fragment buffer
put_buffer(data->fragment, buf, pkt_len);
if (fragmented == 3) {
// end of xiph data packet
uint8_t* xiph_data;
int frame_size = url_close_dyn_buf(data->fragment, &xiph_data);
if (frame_size < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error occurred when getting fragment buffer.");
return frame_size;
}
if (av_new_packet(pkt, frame_size)) {
av_log(ctx, AV_LOG_ERROR, "Out of memory.\n");
return AVERROR(ENOMEM);
}
memcpy(pkt->data, xiph_data, frame_size);
pkt->stream_index = st->index;
av_free(xiph_data);
data->fragment = NULL;
return 0;
}
}
return AVERROR(EAGAIN);
}
/**
* Length encoding described in RFC5215 section 3.1.1.
*/
static int get_base128(const uint8_t ** buf, const uint8_t * buf_end)
{
int n = 0;
for (; *buf < buf_end; ++*buf) {
n <<= 7;
n += **buf & 0x7f;
if (!(**buf & 0x80)) {
++*buf;
return n;
}
}
return 0;
}
/**
* Based off parse_packed_headers in Vorbis RTP
*/
static unsigned int
parse_packed_headers(const uint8_t * packed_headers,
const uint8_t * packed_headers_end,
AVCodecContext * codec, PayloadContext * xiph_data)
{
unsigned num_packed, num_headers, length, length1, length2, extradata_alloc;
uint8_t *ptr;
if (packed_headers_end - packed_headers < 9) {
av_log(codec, AV_LOG_ERROR,
"Invalid %d byte packed header.",
packed_headers_end - packed_headers);
return AVERROR_INVALIDDATA;
}
num_packed = bytestream_get_be32(&packed_headers);
xiph_data->ident = bytestream_get_be24(&packed_headers);
length = bytestream_get_be16(&packed_headers);
num_headers = get_base128(&packed_headers, packed_headers_end);
length1 = get_base128(&packed_headers, packed_headers_end);
length2 = get_base128(&packed_headers, packed_headers_end);
if (num_packed != 1 || num_headers > 3) {
av_log(codec, AV_LOG_ERROR,
"Unimplemented number of headers: %d packed headers, %d headers\n",
num_packed, num_headers);
return AVERROR_PATCHWELCOME;
}
if (packed_headers_end - packed_headers != length ||
length1 > length || length2 > length - length1) {
av_log(codec, AV_LOG_ERROR,
"Bad packed header lengths (%d,%d,%d,%d)\n", length1,
length2, packed_headers_end - packed_headers, length);
return AVERROR_INVALIDDATA;
}
/* allocate extra space:
* -- length/255 +2 for xiphlacing
* -- one for the '2' marker
* -- FF_INPUT_BUFFER_PADDING_SIZE required */
extradata_alloc = length + length/255 + 3 + FF_INPUT_BUFFER_PADDING_SIZE;
ptr = codec->extradata = av_malloc(extradata_alloc);
if (!ptr) {
av_log(codec, AV_LOG_ERROR, "Out of memory\n");
return AVERROR(ENOMEM);
}
*ptr++ = 2;
ptr += av_xiphlacing(ptr, length1);
ptr += av_xiphlacing(ptr, length2);
memcpy(ptr, packed_headers, length);
ptr += length;
codec->extradata_size = ptr - codec->extradata;
// clear out remaining parts of the buffer
memset(ptr, 0, extradata_alloc - codec->extradata_size);
return 0;
}
static int xiph_parse_fmtp_pair(AVCodecContext * codec,
PayloadContext *xiph_data,
char *attr, char *value)
{
int result = 0;
if (!strcmp(attr, "sampling")) {
return AVERROR_PATCHWELCOME;
} else if (!strcmp(attr, "width")) {
/* This is an integer between 1 and 1048561
* and MUST be in multiples of 16. */
codec->width = atoi(value);
return 0;
} else if (!strcmp(attr, "height")) {
/* This is an integer between 1 and 1048561
* and MUST be in multiples of 16. */
codec->height = atoi(value);
return 0;
} else if (!strcmp(attr, "delivery-method")) {
/* Possible values are: inline, in_band, out_band/specific_name. */
return AVERROR_PATCHWELCOME;
} else if (!strcmp(attr, "configuration-uri")) {
/* NOTE: configuration-uri is supported only under 2 conditions:
*--after the delivery-method tag
* --with a delivery-method value of out_band */
return AVERROR_PATCHWELCOME;
} else if (!strcmp(attr, "configuration")) {
/* NOTE: configuration is supported only AFTER the delivery-method tag
* The configuration value is a base64 encoded packed header */
uint8_t *decoded_packet = NULL;
int packet_size;
size_t decoded_alloc = strlen(value) / 4 * 3 + 4;
if (decoded_alloc <= INT_MAX) {
decoded_packet = av_malloc(decoded_alloc);
if (decoded_packet) {
packet_size =
av_base64_decode(decoded_packet, value, decoded_alloc);
result = parse_packed_headers
(decoded_packet, decoded_packet + packet_size, codec,
xiph_data);
} else {
av_log(codec, AV_LOG_ERROR,
"Out of memory while decoding SDP configuration.\n");
result = AVERROR(ENOMEM);
}
} else {
av_log(codec, AV_LOG_ERROR, "Packet too large\n");
result = AVERROR_INVALIDDATA;
}
av_free(decoded_packet);
}
return result;
}
static int xiph_parse_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *data, const char *line)
{
const char *p;
char *value;
char attr[25];
int value_size = strlen(line), attr_size = sizeof(attr), res = 0;
AVCodecContext* codec = s->streams[st_index]->codec;
assert(data);
if (!(value = av_malloc(value_size))) {
av_log(codec, AV_LOG_ERROR, "Out of memory\n");
return AVERROR(ENOMEM);
}
if (av_strstart(line, "fmtp:", &p)) {
// remove protocol identifier
while (*p && *p == ' ') p++; // strip spaces
while (*p && *p != ' ') p++; // eat protocol identifier
while (*p && *p == ' ') p++; // strip trailing spaces
while (ff_rtsp_next_attr_and_value(&p,
attr, attr_size,
value, value_size)) {
res = xiph_parse_fmtp_pair(codec, data, attr, value);
if (res < 0 && res != AVERROR_PATCHWELCOME)
return res;
}
}
av_free(value);
return 0;
}
RTPDynamicProtocolHandler ff_theora_dynamic_handler = {
.enc_name = "theora",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = CODEC_ID_THEORA,
.parse_sdp_a_line = xiph_parse_sdp_line,
.open = xiph_new_context,
.close = xiph_free_context,
.parse_packet = xiph_handle_packet
};
RTPDynamicProtocolHandler ff_vorbis_dynamic_handler = {
.enc_name = "vorbis",
.codec_type = AVMEDIA_TYPE_AUDIO,
.codec_id = CODEC_ID_VORBIS,
.parse_sdp_a_line = xiph_parse_sdp_line,
.open = xiph_new_context,
.close = xiph_free_context,
.parse_packet = xiph_handle_packet
};
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec_xiph.c | C | asf20 | 12,642 |
/*
* RTP input format
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* needed for gethostname() */
#define _XOPEN_SOURCE 600
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "mpegts.h"
#include <unistd.h>
#include "network.h"
#include "rtpdec.h"
#include "rtpdec_amr.h"
#include "rtpdec_asf.h"
#include "rtpdec_h263.h"
#include "rtpdec_h264.h"
#include "rtpdec_xiph.h"
//#define DEBUG
/* TODO: - add RTCP statistics reporting (should be optional).
- add support for h263/mpeg4 packetized output : IDEA: send a
buffer to 'rtp_write_packet' contains all the packets for ONE
frame. Each packet should have a four byte header containing
the length in big endian format (same trick as
'url_open_dyn_packet_buf')
*/
/* statistics functions */
RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler= NULL;
static RTPDynamicProtocolHandler mp4v_es_handler= {"MP4V-ES", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG4};
static RTPDynamicProtocolHandler mpeg4_generic_handler= {"mpeg4-generic", AVMEDIA_TYPE_AUDIO, CODEC_ID_AAC};
void ff_register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler)
{
handler->next= RTPFirstDynamicPayloadHandler;
RTPFirstDynamicPayloadHandler= handler;
}
void av_register_rtp_dynamic_payload_handlers(void)
{
ff_register_dynamic_payload_handler(&mp4v_es_handler);
ff_register_dynamic_payload_handler(&mpeg4_generic_handler);
ff_register_dynamic_payload_handler(&ff_amr_nb_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_amr_wb_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h263_1998_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h263_2000_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_h264_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_vorbis_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_theora_dynamic_handler);
ff_register_dynamic_payload_handler(&ff_ms_rtp_asf_pfv_handler);
ff_register_dynamic_payload_handler(&ff_ms_rtp_asf_pfa_handler);
}
static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int len)
{
if (buf[1] != 200)
return -1;
s->last_rtcp_ntp_time = AV_RB64(buf + 8);
if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE)
s->first_rtcp_ntp_time = s->last_rtcp_ntp_time;
s->last_rtcp_timestamp = AV_RB32(buf + 16);
return 0;
}
#define RTP_SEQ_MOD (1<<16)
/**
* called on parse open packet
*/
static void rtp_init_statistics(RTPStatistics *s, uint16_t base_sequence) // called on parse open packet.
{
memset(s, 0, sizeof(RTPStatistics));
s->max_seq= base_sequence;
s->probation= 1;
}
/**
* called whenever there is a large jump in sequence numbers, or when they get out of probation...
*/
static void rtp_init_sequence(RTPStatistics *s, uint16_t seq)
{
s->max_seq= seq;
s->cycles= 0;
s->base_seq= seq -1;
s->bad_seq= RTP_SEQ_MOD + 1;
s->received= 0;
s->expected_prior= 0;
s->received_prior= 0;
s->jitter= 0;
s->transit= 0;
}
/**
* returns 1 if we should handle this packet.
*/
static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq)
{
uint16_t udelta= seq - s->max_seq;
const int MAX_DROPOUT= 3000;
const int MAX_MISORDER = 100;
const int MIN_SEQUENTIAL = 2;
/* source not valid until MIN_SEQUENTIAL packets with sequence seq. numbers have been received */
if(s->probation)
{
if(seq==s->max_seq + 1) {
s->probation--;
s->max_seq= seq;
if(s->probation==0) {
rtp_init_sequence(s, seq);
s->received++;
return 1;
}
} else {
s->probation= MIN_SEQUENTIAL - 1;
s->max_seq = seq;
}
} else if (udelta < MAX_DROPOUT) {
// in order, with permissible gap
if(seq < s->max_seq) {
//sequence number wrapped; count antother 64k cycles
s->cycles += RTP_SEQ_MOD;
}
s->max_seq= seq;
} else if (udelta <= RTP_SEQ_MOD - MAX_MISORDER) {
// sequence made a large jump...
if(seq==s->bad_seq) {
// two sequential packets-- assume that the other side restarted without telling us; just resync.
rtp_init_sequence(s, seq);
} else {
s->bad_seq= (seq + 1) & (RTP_SEQ_MOD-1);
return 0;
}
} else {
// duplicate or reordered packet...
}
s->received++;
return 1;
}
#if 0
/**
* This function is currently unused; without a valid local ntp time, I don't see how we could calculate the
* difference between the arrival and sent timestamp. As a result, the jitter and transit statistics values
* never change. I left this in in case someone else can see a way. (rdm)
*/
static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp, uint32_t arrival_timestamp)
{
uint32_t transit= arrival_timestamp - sent_timestamp;
int d;
s->transit= transit;
d= FFABS(transit - s->transit);
s->jitter += d - ((s->jitter + 8)>>4);
}
#endif
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
{
ByteIOContext *pb;
uint8_t *buf;
int len;
int rtcp_bytes;
RTPStatistics *stats= &s->statistics;
uint32_t lost;
uint32_t extended_max;
uint32_t expected_interval;
uint32_t received_interval;
uint32_t lost_interval;
uint32_t expected;
uint32_t fraction;
uint64_t ntp_time= s->last_rtcp_ntp_time; // TODO: Get local ntp time?
if (!s->rtp_ctx || (count < 1))
return -1;
/* TODO: I think this is way too often; RFC 1889 has algorithm for this */
/* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
s->octet_count += count;
rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN;
rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
if (rtcp_bytes < 28)
return -1;
s->last_octet_count = s->octet_count;
if (url_open_dyn_buf(&pb) < 0)
return -1;
// Receiver Report
put_byte(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
put_byte(pb, 201);
put_be16(pb, 7); /* length in words - 1 */
put_be32(pb, s->ssrc); // our own SSRC
put_be32(pb, s->ssrc); // XXX: should be the server's here!
// some placeholders we should really fill...
// RFC 1889/p64
extended_max= stats->cycles + stats->max_seq;
expected= extended_max - stats->base_seq + 1;
lost= expected - stats->received;
lost= FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
expected_interval= expected - stats->expected_prior;
stats->expected_prior= expected;
received_interval= stats->received - stats->received_prior;
stats->received_prior= stats->received;
lost_interval= expected_interval - received_interval;
if (expected_interval==0 || lost_interval<=0) fraction= 0;
else fraction = (lost_interval<<8)/expected_interval;
fraction= (fraction<<24) | lost;
put_be32(pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
put_be32(pb, extended_max); /* max sequence received */
put_be32(pb, stats->jitter>>4); /* jitter */
if(s->last_rtcp_ntp_time==AV_NOPTS_VALUE)
{
put_be32(pb, 0); /* last SR timestamp */
put_be32(pb, 0); /* delay since last SR */
} else {
uint32_t middle_32_bits= s->last_rtcp_ntp_time>>16; // this is valid, right? do we need to handle 64 bit values special?
uint32_t delay_since_last= ntp_time - s->last_rtcp_ntp_time;
put_be32(pb, middle_32_bits); /* last SR timestamp */
put_be32(pb, delay_since_last); /* delay since last SR */
}
// CNAME
put_byte(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
put_byte(pb, 202);
len = strlen(s->hostname);
put_be16(pb, (6 + len + 3) / 4); /* length in words - 1 */
put_be32(pb, s->ssrc);
put_byte(pb, 0x01);
put_byte(pb, len);
put_buffer(pb, s->hostname, len);
// padding
for (len = (6 + len) % 4; len % 4; len++) {
put_byte(pb, 0);
}
put_flush_packet(pb);
len = url_close_dyn_buf(pb, &buf);
if ((len > 0) && buf) {
int result;
dprintf(s->ic, "sending %d bytes of RR\n", len);
result= url_write(s->rtp_ctx, buf, len);
dprintf(s->ic, "result from url_write: %d\n", result);
av_free(buf);
}
return 0;
}
void rtp_send_punch_packets(URLContext* rtp_handle)
{
ByteIOContext *pb;
uint8_t *buf;
int len;
/* Send a small RTP packet */
if (url_open_dyn_buf(&pb) < 0)
return;
put_byte(pb, (RTP_VERSION << 6));
put_byte(pb, 0); /* Payload type */
put_be16(pb, 0); /* Seq */
put_be32(pb, 0); /* Timestamp */
put_be32(pb, 0); /* SSRC */
put_flush_packet(pb);
len = url_close_dyn_buf(pb, &buf);
if ((len > 0) && buf)
url_write(rtp_handle, buf, len);
av_free(buf);
/* Send a minimal RTCP RR */
if (url_open_dyn_buf(&pb) < 0)
return;
put_byte(pb, (RTP_VERSION << 6));
put_byte(pb, 201); /* receiver report */
put_be16(pb, 1); /* length in words - 1 */
put_be32(pb, 0); /* our own SSRC */
put_flush_packet(pb);
len = url_close_dyn_buf(pb, &buf);
if ((len > 0) && buf)
url_write(rtp_handle, buf, len);
av_free(buf);
}
/**
* open a new RTP parse context for stream 'st'. 'st' can be NULL for
* MPEG2TS streams to indicate that they should be demuxed inside the
* rtp demux (otherwise CODEC_ID_MPEG2TS packets are returned)
* TODO: change this to not take rtp_payload data, and use the new dynamic payload system.
*/
RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, RTPPayloadData *rtp_payload_data)
{
RTPDemuxContext *s;
s = av_mallocz(sizeof(RTPDemuxContext));
if (!s)
return NULL;
s->payload_type = payload_type;
s->last_rtcp_ntp_time = AV_NOPTS_VALUE;
s->first_rtcp_ntp_time = AV_NOPTS_VALUE;
s->ic = s1;
s->st = st;
s->rtp_payload_data = rtp_payload_data;
rtp_init_statistics(&s->statistics, 0); // do we know the initial sequence from sdp?
if (!strcmp(ff_rtp_enc_name(payload_type), "MP2T")) {
s->ts = ff_mpegts_parse_open(s->ic);
if (s->ts == NULL) {
av_free(s);
return NULL;
}
} else {
av_set_pts_info(st, 32, 1, 90000);
switch(st->codec->codec_id) {
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
case CODEC_ID_MP2:
case CODEC_ID_MP3:
case CODEC_ID_MPEG4:
case CODEC_ID_H263:
case CODEC_ID_H264:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
default:
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
av_set_pts_info(st, 32, 1, st->codec->sample_rate);
}
break;
}
}
// needed to send back RTCP RR in RTSP sessions
s->rtp_ctx = rtpc;
gethostname(s->hostname, sizeof(s->hostname));
return s;
}
void
rtp_parse_set_dynamic_protocol(RTPDemuxContext *s, PayloadContext *ctx,
RTPDynamicProtocolHandler *handler)
{
s->dynamic_protocol_context = ctx;
s->parse_packet = handler->parse_packet;
}
static int rtp_parse_mp4_au(RTPDemuxContext *s, const uint8_t *buf)
{
int au_headers_length, au_header_size, i;
GetBitContext getbitcontext;
RTPPayloadData *infos;
infos = s->rtp_payload_data;
if (infos == NULL)
return -1;
/* decode the first 2 bytes where the AUHeader sections are stored
length in bits */
au_headers_length = AV_RB16(buf);
if (au_headers_length > RTP_MAX_PACKET_LENGTH)
return -1;
infos->au_headers_length_bytes = (au_headers_length + 7) / 8;
/* skip AU headers length section (2 bytes) */
buf += 2;
init_get_bits(&getbitcontext, buf, infos->au_headers_length_bytes * 8);
/* XXX: Wrong if optionnal additional sections are present (cts, dts etc...) */
au_header_size = infos->sizelength + infos->indexlength;
if (au_header_size <= 0 || (au_headers_length % au_header_size != 0))
return -1;
infos->nb_au_headers = au_headers_length / au_header_size;
if (!infos->au_headers || infos->au_headers_allocated < infos->nb_au_headers) {
av_free(infos->au_headers);
infos->au_headers = av_malloc(sizeof(struct AUHeaders) * infos->nb_au_headers);
infos->au_headers_allocated = infos->nb_au_headers;
}
/* XXX: We handle multiple AU Section as only one (need to fix this for interleaving)
In my test, the FAAD decoder does not behave correctly when sending each AU one by one
but does when sending the whole as one big packet... */
infos->au_headers[0].size = 0;
infos->au_headers[0].index = 0;
for (i = 0; i < infos->nb_au_headers; ++i) {
infos->au_headers[0].size += get_bits_long(&getbitcontext, infos->sizelength);
infos->au_headers[0].index = get_bits_long(&getbitcontext, infos->indexlength);
}
infos->nb_au_headers = 1;
return 0;
}
/**
* This was the second switch in rtp_parse packet. Normalizes time, if required, sets stream_index, etc.
*/
static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp)
{
if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE) {
int64_t addend;
int delta_timestamp;
/* compute pts from timestamp with received ntp_time */
delta_timestamp = timestamp - s->last_rtcp_timestamp;
/* convert to the PTS timebase */
addend = av_rescale(s->last_rtcp_ntp_time - s->first_rtcp_ntp_time, s->st->time_base.den, (uint64_t)s->st->time_base.num << 32);
pkt->pts = s->range_start_offset + addend + delta_timestamp;
}
}
/**
* Parse an RTP or RTCP packet directly sent as a buffer.
* @param s RTP parse context.
* @param pkt returned packet
* @param buf input buffer or NULL to read the next packets
* @param len buffer len
* @return 0 if a packet is returned, 1 if a packet is returned and more can follow
* (use buf as NULL to read the next). -1 if no packet (error or no more packet).
*/
int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
const uint8_t *buf, int len)
{
unsigned int ssrc, h;
int payload_type, seq, ret, flags = 0;
AVStream *st;
uint32_t timestamp;
int rv= 0;
if (!buf) {
/* return the next packets, if any */
if(s->st && s->parse_packet) {
timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned....
rv= s->parse_packet(s->ic, s->dynamic_protocol_context,
s->st, pkt, ×tamp, NULL, 0, flags);
finalize_packet(s, pkt, timestamp);
return rv;
} else {
// TODO: Move to a dynamic packet handler (like above)
if (s->read_buf_index >= s->read_buf_size)
return -1;
ret = ff_mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
s->read_buf_size - s->read_buf_index);
if (ret < 0)
return -1;
s->read_buf_index += ret;
if (s->read_buf_index < s->read_buf_size)
return 1;
else
return 0;
}
}
if (len < 12)
return -1;
if ((buf[0] & 0xc0) != (RTP_VERSION << 6))
return -1;
if (buf[1] >= 200 && buf[1] <= 204) {
rtcp_parse_packet(s, buf, len);
return -1;
}
payload_type = buf[1] & 0x7f;
if (buf[1] & 0x80)
flags |= RTP_FLAG_MARKER;
seq = AV_RB16(buf + 2);
timestamp = AV_RB32(buf + 4);
ssrc = AV_RB32(buf + 8);
/* store the ssrc in the RTPDemuxContext */
s->ssrc = ssrc;
/* NOTE: we can handle only one payload type */
if (s->payload_type != payload_type)
return -1;
st = s->st;
// only do something with this if all the rtp checks pass...
if(!rtp_valid_packet_in_sequence(&s->statistics, seq))
{
av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n",
payload_type, seq, ((s->seq + 1) & 0xffff));
return -1;
}
s->seq = seq;
len -= 12;
buf += 12;
if (!st) {
/* specific MPEG2TS demux support */
ret = ff_mpegts_parse_packet(s->ts, pkt, buf, len);
if (ret < 0)
return -1;
if (ret < len) {
s->read_buf_size = len - ret;
memcpy(s->buf, buf + ret, s->read_buf_size);
s->read_buf_index = 0;
return 1;
}
return 0;
} else if (s->parse_packet) {
rv = s->parse_packet(s->ic, s->dynamic_protocol_context,
s->st, pkt, ×tamp, buf, len, flags);
} else {
// at this point, the RTP header has been stripped; This is ASSUMING that there is only 1 CSRC, which in't wise.
switch(st->codec->codec_id) {
case CODEC_ID_MP2:
case CODEC_ID_MP3:
/* better than nothing: skip mpeg audio RTP header */
if (len <= 4)
return -1;
h = AV_RB32(buf);
len -= 4;
buf += 4;
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
break;
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
/* better than nothing: skip mpeg video RTP header */
if (len <= 4)
return -1;
h = AV_RB32(buf);
buf += 4;
len -= 4;
if (h & (1 << 26)) {
/* mpeg2 */
if (len <= 4)
return -1;
buf += 4;
len -= 4;
}
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
break;
// moved from below, verbatim. this is because this section handles packets, and the lower switch handles
// timestamps.
// TODO: Put this into a dynamic packet handler...
case CODEC_ID_AAC:
if (rtp_parse_mp4_au(s, buf))
return -1;
{
RTPPayloadData *infos = s->rtp_payload_data;
if (infos == NULL)
return -1;
buf += infos->au_headers_length_bytes + 2;
len -= infos->au_headers_length_bytes + 2;
/* XXX: Fixme we only handle the case where rtp_parse_mp4_au define
one au_header */
av_new_packet(pkt, infos->au_headers[0].size);
memcpy(pkt->data, buf, infos->au_headers[0].size);
buf += infos->au_headers[0].size;
len -= infos->au_headers[0].size;
}
s->read_buf_size = len;
rv= 0;
break;
default:
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
break;
}
pkt->stream_index = st->index;
}
// now perform timestamp things....
finalize_packet(s, pkt, timestamp);
return rv;
}
void rtp_parse_close(RTPDemuxContext *s)
{
// TODO: fold this into the protocol specific data fields.
av_free(s->rtp_payload_data->mode);
av_free(s->rtp_payload_data->au_headers);
if (!strcmp(ff_rtp_enc_name(s->payload_type), "MP2T")) {
ff_mpegts_parse_close(s->ts);
}
av_free(s);
}
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec.c | C | asf20 | 20,439 |
/*
* ASF compatible demuxer
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
//#define DEBUG
#include "libavutil/common.h"
#include "libavutil/avstring.h"
#include "libavcodec/mpegaudio.h"
#include "avformat.h"
#include "riff.h"
#include "asf.h"
#include "asfcrypt.h"
#include "avlanguage.h"
void ff_mms_set_stream_selection(URLContext *h, AVFormatContext *format);
#undef NDEBUG
#include <assert.h>
#define FRAME_HEADER_SIZE 17
// Fix Me! FRAME_HEADER_SIZE may be different.
static const ff_asf_guid index_guid = {
0x90, 0x08, 0x00, 0x33, 0xb1, 0xe5, 0xcf, 0x11, 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb
};
static const ff_asf_guid stream_bitrate_guid = { /* (http://get.to/sdp) */
0xce, 0x75, 0xf8, 0x7b, 0x8d, 0x46, 0xd1, 0x11, 0x8d, 0x82, 0x00, 0x60, 0x97, 0xc9, 0xa2, 0xb2
};
/**********************************/
/* decoding */
static int guidcmp(const void *g1, const void *g2)
{
return memcmp(g1, g2, sizeof(ff_asf_guid));
}
#ifdef DEBUG
#define PRINT_IF_GUID(g,cmp) \
if (!guidcmp(g, &cmp)) \
dprintf(NULL, "(GUID: %s) ", #cmp)
static void print_guid(const ff_asf_guid *g)
{
int i;
PRINT_IF_GUID(g, ff_asf_header);
else PRINT_IF_GUID(g, ff_asf_file_header);
else PRINT_IF_GUID(g, ff_asf_stream_header);
else PRINT_IF_GUID(g, ff_asf_audio_stream);
else PRINT_IF_GUID(g, ff_asf_audio_conceal_none);
else PRINT_IF_GUID(g, ff_asf_video_stream);
else PRINT_IF_GUID(g, ff_asf_video_conceal_none);
else PRINT_IF_GUID(g, ff_asf_command_stream);
else PRINT_IF_GUID(g, ff_asf_comment_header);
else PRINT_IF_GUID(g, ff_asf_codec_comment_header);
else PRINT_IF_GUID(g, ff_asf_codec_comment1_header);
else PRINT_IF_GUID(g, ff_asf_data_header);
else PRINT_IF_GUID(g, index_guid);
else PRINT_IF_GUID(g, ff_asf_head1_guid);
else PRINT_IF_GUID(g, ff_asf_head2_guid);
else PRINT_IF_GUID(g, ff_asf_my_guid);
else PRINT_IF_GUID(g, ff_asf_ext_stream_header);
else PRINT_IF_GUID(g, ff_asf_extended_content_header);
else PRINT_IF_GUID(g, ff_asf_ext_stream_embed_stream_header);
else PRINT_IF_GUID(g, ff_asf_ext_stream_audio_stream);
else PRINT_IF_GUID(g, ff_asf_metadata_header);
else PRINT_IF_GUID(g, ff_asf_marker_header);
else PRINT_IF_GUID(g, stream_bitrate_guid);
else PRINT_IF_GUID(g, ff_asf_language_guid);
else
dprintf(NULL, "(GUID: unknown) ");
for(i=0;i<16;i++)
dprintf(NULL, " 0x%02x,", (*g)[i]);
dprintf(NULL, "}\n");
}
#undef PRINT_IF_GUID
#else
#define print_guid(g)
#endif
static void get_guid(ByteIOContext *s, ff_asf_guid *g)
{
assert(sizeof(*g) == 16);
get_buffer(s, *g, sizeof(*g));
}
#if 0
static void get_str16(ByteIOContext *pb, char *buf, int buf_size)
{
int len, c;
char *q;
len = get_le16(pb);
q = buf;
while (len > 0) {
c = get_le16(pb);
if ((q - buf) < buf_size - 1)
*q++ = c;
len--;
}
*q = '\0';
}
#endif
static void get_str16_nolen(ByteIOContext *pb, int len, char *buf, int buf_size)
{
char* q = buf;
while (len > 1) {
uint8_t tmp;
uint32_t ch;
GET_UTF16(ch, (len -= 2) >= 0 ? get_le16(pb) : 0, break;)
PUT_UTF8(ch, tmp, if (q - buf < buf_size - 1) *q++ = tmp;)
}
if (len > 0)
url_fskip(pb, len);
*q = '\0';
}
static int asf_probe(AVProbeData *pd)
{
/* check file header */
if (!guidcmp(pd->buf, &ff_asf_header))
return AVPROBE_SCORE_MAX;
else
return 0;
}
static int get_value(ByteIOContext *pb, int type){
switch(type){
case 2: return get_le32(pb);
case 3: return get_le32(pb);
case 4: return get_le64(pb);
case 5: return get_le16(pb);
default:return INT_MIN;
}
}
static void get_tag(AVFormatContext *s, const char *key, int type, int len)
{
char *value;
if ((unsigned)len >= (UINT_MAX - 1)/2)
return;
value = av_malloc(2*len+1);
if (!value)
return;
if (type == 0) { // UTF16-LE
get_str16_nolen(s->pb, len, value, 2*len + 1);
} else if (type > 1 && type <= 5) { // boolean or DWORD or QWORD or WORD
uint64_t num = get_value(s->pb, type);
snprintf(value, len, "%"PRIu64, num);
} else {
url_fskip(s->pb, len);
av_freep(&value);
av_log(s, AV_LOG_DEBUG, "Unsupported value type %d in tag %s.\n", type, key);
return;
}
av_metadata_set2(&s->metadata, key, value, 0);
av_freep(&value);
}
static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
ASFContext *asf = s->priv_data;
ff_asf_guid g;
ByteIOContext *pb = s->pb;
AVStream *st;
ASFStream *asf_st;
int size, i;
int64_t gsize;
AVRational dar[128];
uint32_t bitrate[128];
memset(dar, 0, sizeof(dar));
memset(bitrate, 0, sizeof(bitrate));
get_guid(pb, &g);
if (guidcmp(&g, &ff_asf_header))
return -1;
get_le64(pb);
get_le32(pb);
get_byte(pb);
get_byte(pb);
memset(&asf->asfid2avid, -1, sizeof(asf->asfid2avid));
for(;;) {
uint64_t gpos= url_ftell(pb);
get_guid(pb, &g);
gsize = get_le64(pb);
dprintf(s, "%08"PRIx64": ", gpos);
print_guid(&g);
dprintf(s, " size=0x%"PRIx64"\n", gsize);
if (!guidcmp(&g, &ff_asf_data_header)) {
asf->data_object_offset = url_ftell(pb);
// if not streaming, gsize is not unlimited (how?), and there is enough space in the file..
if (!(asf->hdr.flags & 0x01) && gsize >= 100) {
asf->data_object_size = gsize - 24;
} else {
asf->data_object_size = (uint64_t)-1;
}
break;
}
if (gsize < 24)
return -1;
if (!guidcmp(&g, &ff_asf_file_header)) {
get_guid(pb, &asf->hdr.guid);
asf->hdr.file_size = get_le64(pb);
asf->hdr.create_time = get_le64(pb);
asf->nb_packets = get_le64(pb);
asf->hdr.play_time = get_le64(pb);
asf->hdr.send_time = get_le64(pb);
asf->hdr.preroll = get_le32(pb);
asf->hdr.ignore = get_le32(pb);
asf->hdr.flags = get_le32(pb);
asf->hdr.min_pktsize = get_le32(pb);
asf->hdr.max_pktsize = get_le32(pb);
asf->hdr.max_bitrate = get_le32(pb);
s->packet_size = asf->hdr.max_pktsize;
} else if (!guidcmp(&g, &ff_asf_stream_header)) {
enum AVMediaType type;
int type_specific_size, sizeX;
uint64_t total_size;
unsigned int tag1;
int64_t pos1, pos2, start_time;
int test_for_ext_stream_audio, is_dvr_ms_audio=0;
pos1 = url_ftell(pb);
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
asf_st = av_mallocz(sizeof(ASFStream));
if (!asf_st)
return AVERROR(ENOMEM);
st->priv_data = asf_st;
start_time = asf->hdr.preroll;
asf_st->stream_language_index = 128; // invalid stream index means no language info
if(!(asf->hdr.flags & 0x01)) { // if we aren't streaming...
st->duration = asf->hdr.play_time /
(10000000 / 1000) - start_time;
}
get_guid(pb, &g);
test_for_ext_stream_audio = 0;
if (!guidcmp(&g, &ff_asf_audio_stream)) {
type = AVMEDIA_TYPE_AUDIO;
} else if (!guidcmp(&g, &ff_asf_video_stream)) {
type = AVMEDIA_TYPE_VIDEO;
} else if (!guidcmp(&g, &ff_asf_command_stream)) {
type = AVMEDIA_TYPE_DATA;
} else if (!guidcmp(&g, &ff_asf_ext_stream_embed_stream_header)) {
test_for_ext_stream_audio = 1;
type = AVMEDIA_TYPE_UNKNOWN;
} else {
return -1;
}
get_guid(pb, &g);
total_size = get_le64(pb);
type_specific_size = get_le32(pb);
get_le32(pb);
st->id = get_le16(pb) & 0x7f; /* stream id */
// mapping of asf ID to AV stream ID;
asf->asfid2avid[st->id] = s->nb_streams - 1;
get_le32(pb);
if (test_for_ext_stream_audio) {
get_guid(pb, &g);
if (!guidcmp(&g, &ff_asf_ext_stream_audio_stream)) {
type = AVMEDIA_TYPE_AUDIO;
is_dvr_ms_audio=1;
get_guid(pb, &g);
get_le32(pb);
get_le32(pb);
get_le32(pb);
get_guid(pb, &g);
get_le32(pb);
}
}
st->codec->codec_type = type;
if (type == AVMEDIA_TYPE_AUDIO) {
ff_get_wav_header(pb, st->codec, type_specific_size);
if (is_dvr_ms_audio) {
// codec_id and codec_tag are unreliable in dvr_ms
// files. Set them later by probing stream.
st->codec->codec_id = CODEC_ID_PROBE;
st->codec->codec_tag = 0;
}
if (st->codec->codec_id == CODEC_ID_AAC) {
st->need_parsing = AVSTREAM_PARSE_NONE;
} else {
st->need_parsing = AVSTREAM_PARSE_FULL;
}
/* We have to init the frame size at some point .... */
pos2 = url_ftell(pb);
if (gsize >= (pos2 + 8 - pos1 + 24)) {
asf_st->ds_span = get_byte(pb);
asf_st->ds_packet_size = get_le16(pb);
asf_st->ds_chunk_size = get_le16(pb);
get_le16(pb); //ds_data_size
get_byte(pb); //ds_silence_data
}
//printf("Descrambling: ps:%d cs:%d ds:%d s:%d sd:%d\n",
// asf_st->ds_packet_size, asf_st->ds_chunk_size,
// asf_st->ds_data_size, asf_st->ds_span, asf_st->ds_silence_data);
if (asf_st->ds_span > 1) {
if (!asf_st->ds_chunk_size
|| (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1)
|| asf_st->ds_packet_size % asf_st->ds_chunk_size)
asf_st->ds_span = 0; // disable descrambling
}
switch (st->codec->codec_id) {
case CODEC_ID_MP3:
st->codec->frame_size = MPA_FRAME_SIZE;
break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW:
st->codec->frame_size = 1;
break;
default:
/* This is probably wrong, but it prevents a crash later */
st->codec->frame_size = 1;
break;
}
} else if (type == AVMEDIA_TYPE_VIDEO) {
get_le32(pb);
get_le32(pb);
get_byte(pb);
size = get_le16(pb); /* size */
sizeX= get_le32(pb); /* size */
st->codec->width = get_le32(pb);
st->codec->height = get_le32(pb);
/* not available for asf */
get_le16(pb); /* panes */
st->codec->bits_per_coded_sample = get_le16(pb); /* depth */
tag1 = get_le32(pb);
url_fskip(pb, 20);
// av_log(s, AV_LOG_DEBUG, "size:%d tsize:%d sizeX:%d\n", size, total_size, sizeX);
size= sizeX;
if (size > 40) {
st->codec->extradata_size = size - 40;
st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
}
/* Extract palette from extradata if bpp <= 8 */
/* This code assumes that extradata contains only palette */
/* This is true for all paletted codecs implemented in ffmpeg */
if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
#if HAVE_BIGENDIAN
for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
#else
memcpy(st->codec->palctrl->palette, st->codec->extradata,
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
#endif
st->codec->palctrl->palette_changed = 1;
}
st->codec->codec_tag = tag1;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
if(tag1 == MKTAG('D', 'V', 'R', ' '))
st->need_parsing = AVSTREAM_PARSE_FULL;
}
pos2 = url_ftell(pb);
url_fskip(pb, gsize - (pos2 - pos1 + 24));
} else if (!guidcmp(&g, &ff_asf_comment_header)) {
int len1, len2, len3, len4, len5;
len1 = get_le16(pb);
len2 = get_le16(pb);
len3 = get_le16(pb);
len4 = get_le16(pb);
len5 = get_le16(pb);
get_tag(s, "title" , 0, len1);
get_tag(s, "author" , 0, len2);
get_tag(s, "copyright", 0, len3);
get_tag(s, "comment" , 0, len4);
url_fskip(pb, len5);
} else if (!guidcmp(&g, &stream_bitrate_guid)) {
int stream_count = get_le16(pb);
int j;
// av_log(s, AV_LOG_ERROR, "stream bitrate properties\n");
// av_log(s, AV_LOG_ERROR, "streams %d\n", streams);
for(j = 0; j < stream_count; j++) {
int flags, bitrate, stream_id;
flags= get_le16(pb);
bitrate= get_le32(pb);
stream_id= (flags & 0x7f);
// av_log(s, AV_LOG_ERROR, "flags: 0x%x stream id %d, bitrate %d\n", flags, stream_id, bitrate);
asf->stream_bitrates[stream_id]= bitrate;
}
} else if (!guidcmp(&g, &ff_asf_language_guid)) {
int j;
int stream_count = get_le16(pb);
for(j = 0; j < stream_count; j++) {
char lang[6];
unsigned int lang_len = get_byte(pb);
get_str16_nolen(pb, lang_len, lang, sizeof(lang));
if (j < 128)
av_strlcpy(asf->stream_languages[j], lang, sizeof(*asf->stream_languages));
}
} else if (!guidcmp(&g, &ff_asf_extended_content_header)) {
int desc_count, i;
desc_count = get_le16(pb);
for(i=0;i<desc_count;i++) {
int name_len,value_type,value_len;
char name[1024];
name_len = get_le16(pb);
if (name_len%2) // must be even, broken lavf versions wrote len-1
name_len += 1;
get_str16_nolen(pb, name_len, name, sizeof(name));
value_type = get_le16(pb);
value_len = get_le16(pb);
if (!value_type && value_len%2)
value_len += 1;
get_tag(s, name, value_type, value_len);
}
} else if (!guidcmp(&g, &ff_asf_metadata_header)) {
int n, stream_num, name_len, value_len, value_type, value_num;
n = get_le16(pb);
for(i=0;i<n;i++) {
char name[1024];
get_le16(pb); //lang_list_index
stream_num= get_le16(pb);
name_len= get_le16(pb);
value_type= get_le16(pb);
value_len= get_le32(pb);
get_str16_nolen(pb, name_len, name, sizeof(name));
//av_log(s, AV_LOG_ERROR, "%d %d %d %d %d <%s>\n", i, stream_num, name_len, value_type, value_len, name);
value_num= get_le16(pb);//we should use get_value() here but it does not work 2 is le16 here but le32 elsewhere
url_fskip(pb, value_len - 2);
if(stream_num<128){
if (!strcmp(name, "AspectRatioX")) dar[stream_num].num= value_num;
else if(!strcmp(name, "AspectRatioY")) dar[stream_num].den= value_num;
}
}
} else if (!guidcmp(&g, &ff_asf_ext_stream_header)) {
int ext_len, payload_ext_ct, stream_ct;
uint32_t ext_d, leak_rate, stream_num;
unsigned int stream_languageid_index;
get_le64(pb); // starttime
get_le64(pb); // endtime
leak_rate = get_le32(pb); // leak-datarate
get_le32(pb); // bucket-datasize
get_le32(pb); // init-bucket-fullness
get_le32(pb); // alt-leak-datarate
get_le32(pb); // alt-bucket-datasize
get_le32(pb); // alt-init-bucket-fullness
get_le32(pb); // max-object-size
get_le32(pb); // flags (reliable,seekable,no_cleanpoints?,resend-live-cleanpoints, rest of bits reserved)
stream_num = get_le16(pb); // stream-num
stream_languageid_index = get_le16(pb); // stream-language-id-index
if (stream_num < 128)
asf->streams[stream_num].stream_language_index = stream_languageid_index;
get_le64(pb); // avg frametime in 100ns units
stream_ct = get_le16(pb); //stream-name-count
payload_ext_ct = get_le16(pb); //payload-extension-system-count
if (stream_num < 128)
bitrate[stream_num] = leak_rate;
for (i=0; i<stream_ct; i++){
get_le16(pb);
ext_len = get_le16(pb);
url_fseek(pb, ext_len, SEEK_CUR);
}
for (i=0; i<payload_ext_ct; i++){
get_guid(pb, &g);
ext_d=get_le16(pb);
ext_len=get_le32(pb);
url_fseek(pb, ext_len, SEEK_CUR);
}
// there could be a optional stream properties object to follow
// if so the next iteration will pick it up
} else if (!guidcmp(&g, &ff_asf_head1_guid)) {
int v1, v2;
get_guid(pb, &g);
v1 = get_le32(pb);
v2 = get_le16(pb);
} else if (!guidcmp(&g, &ff_asf_marker_header)) {
int i, count, name_len;
char name[1024];
get_le64(pb); // reserved 16 bytes
get_le64(pb); // ...
count = get_le32(pb); // markers count
get_le16(pb); // reserved 2 bytes
name_len = get_le16(pb); // name length
for(i=0;i<name_len;i++){
get_byte(pb); // skip the name
}
for(i=0;i<count;i++){
int64_t pres_time;
int name_len;
get_le64(pb); // offset, 8 bytes
pres_time = get_le64(pb); // presentation time
get_le16(pb); // entry length
get_le32(pb); // send time
get_le32(pb); // flags
name_len = get_le32(pb); // name length
get_str16_nolen(pb, name_len * 2, name, sizeof(name));
ff_new_chapter(s, i, (AVRational){1, 10000000}, pres_time, AV_NOPTS_VALUE, name );
}
#if 0
} else if (!guidcmp(&g, &ff_asf_codec_comment_header)) {
int len, v1, n, num;
char str[256], *q;
char tag[16];
get_guid(pb, &g);
print_guid(&g);
n = get_le32(pb);
for(i=0;i<n;i++) {
num = get_le16(pb); /* stream number */
get_str16(pb, str, sizeof(str));
get_str16(pb, str, sizeof(str));
len = get_le16(pb);
q = tag;
while (len > 0) {
v1 = get_byte(pb);
if ((q - tag) < sizeof(tag) - 1)
*q++ = v1;
len--;
}
*q = '\0';
}
#endif
} else if (url_feof(pb)) {
return -1;
} else {
if (!s->keylen) {
if (!guidcmp(&g, &ff_asf_content_encryption)) {
av_log(s, AV_LOG_WARNING, "DRM protected stream detected, decoding will likely fail!\n");
} else if (!guidcmp(&g, &ff_asf_ext_content_encryption)) {
av_log(s, AV_LOG_WARNING, "Ext DRM protected stream detected, decoding will likely fail!\n");
} else if (!guidcmp(&g, &ff_asf_digital_signature)) {
av_log(s, AV_LOG_WARNING, "Digital signature detected, decoding will likely fail!\n");
}
}
}
if(url_ftell(pb) != gpos + gsize)
av_log(s, AV_LOG_DEBUG, "gpos mismatch our pos=%"PRIu64", end=%"PRIu64"\n", url_ftell(pb)-gpos, gsize);
url_fseek(pb, gpos + gsize, SEEK_SET);
}
get_guid(pb, &g);
get_le64(pb);
get_byte(pb);
get_byte(pb);
if (url_feof(pb))
return -1;
asf->data_offset = url_ftell(pb);
asf->packet_size_left = 0;
for(i=0; i<128; i++){
int stream_num= asf->asfid2avid[i];
if(stream_num>=0){
AVStream *st = s->streams[stream_num];
if (!st->codec->bit_rate)
st->codec->bit_rate = bitrate[i];
if (dar[i].num > 0 && dar[i].den > 0)
av_reduce(&st->sample_aspect_ratio.num,
&st->sample_aspect_ratio.den,
dar[i].num, dar[i].den, INT_MAX);
//av_log(s, AV_LOG_ERROR, "dar %d:%d sar=%d:%d\n", dar[i].num, dar[i].den, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
// copy and convert language codes to the frontend
if (asf->streams[i].stream_language_index < 128) {
const char *rfc1766 = asf->stream_languages[asf->streams[i].stream_language_index];
if (rfc1766 && strlen(rfc1766) > 1) {
const char primary_tag[3] = { rfc1766[0], rfc1766[1], '\0' }; // ignore country code if any
const char *iso6392 = av_convert_lang_to(primary_tag, AV_LANG_ISO639_2_BIBL);
if (iso6392)
av_metadata_set2(&st->metadata, "language", iso6392, 0);
}
}
}
}
return 0;
}
#define DO_2BITS(bits, var, defval) \
switch (bits & 3) \
{ \
case 3: var = get_le32(pb); rsize += 4; break; \
case 2: var = get_le16(pb); rsize += 2; break; \
case 1: var = get_byte(pb); rsize++; break; \
default: var = defval; break; \
}
/**
* Load a single ASF packet into the demuxer.
* @param s demux context
* @param pb context to read data from
* @return 0 on success, <0 on error
*/
static int ff_asf_get_packet(AVFormatContext *s, ByteIOContext *pb)
{
ASFContext *asf = s->priv_data;
uint32_t packet_length, padsize;
int rsize = 8;
int c, d, e, off;
// if we do not know packet size, allow skipping up to 32 kB
off= 32768;
if (s->packet_size > 0)
off= (url_ftell(pb) - s->data_offset) % s->packet_size + 3;
c=d=e=-1;
while(off-- > 0){
c=d; d=e;
e= get_byte(pb);
if(c == 0x82 && !d && !e)
break;
}
if (c != 0x82) {
/**
* This code allows handling of -EAGAIN at packet boundaries (i.e.
* if the packet sync code above triggers -EAGAIN). This does not
* imply complete -EAGAIN handling support at random positions in
* the stream.
*/
if (url_ferror(pb) == AVERROR(EAGAIN))
return AVERROR(EAGAIN);
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
}
if ((c & 0x8f) == 0x82) {
if (d || e) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n");
return -1;
}
c= get_byte(pb);
d= get_byte(pb);
rsize+=3;
}else{
url_fseek(pb, -1, SEEK_CUR); //FIXME
}
asf->packet_flags = c;
asf->packet_property = d;
DO_2BITS(asf->packet_flags >> 5, packet_length, s->packet_size);
DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored
DO_2BITS(asf->packet_flags >> 3, padsize, 0); // padding length
//the following checks prevent overflows and infinite loops
if(!packet_length || packet_length >= (1U<<29)){
av_log(s, AV_LOG_ERROR, "invalid packet_length %d at:%"PRId64"\n", packet_length, url_ftell(pb));
return -1;
}
if(padsize >= packet_length){
av_log(s, AV_LOG_ERROR, "invalid padsize %d at:%"PRId64"\n", padsize, url_ftell(pb));
return -1;
}
asf->packet_timestamp = get_le32(pb);
get_le16(pb); /* duration */
// rsize has at least 11 bytes which have to be present
if (asf->packet_flags & 0x01) {
asf->packet_segsizetype = get_byte(pb); rsize++;
asf->packet_segments = asf->packet_segsizetype & 0x3f;
} else {
asf->packet_segments = 1;
asf->packet_segsizetype = 0x80;
}
asf->packet_size_left = packet_length - padsize - rsize;
if (packet_length < asf->hdr.min_pktsize)
padsize += asf->hdr.min_pktsize - packet_length;
asf->packet_padsize = padsize;
dprintf(s, "packet: size=%d padsize=%d left=%d\n", s->packet_size, asf->packet_padsize, asf->packet_size_left);
return 0;
}
/**
*
* @return <0 if error
*/
static int asf_read_frame_header(AVFormatContext *s, ByteIOContext *pb){
ASFContext *asf = s->priv_data;
int rsize = 1;
int num = get_byte(pb);
int64_t ts0, ts1;
asf->packet_segments--;
asf->packet_key_frame = num >> 7;
asf->stream_index = asf->asfid2avid[num & 0x7f];
// sequence should be ignored!
DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
//printf("key:%d stream:%d seq:%d offset:%d replic_size:%d\n", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size);
if (asf->packet_replic_size >= 8) {
asf->packet_obj_size = get_le32(pb);
if(asf->packet_obj_size >= (1<<24) || asf->packet_obj_size <= 0){
av_log(s, AV_LOG_ERROR, "packet_obj_size invalid\n");
return -1;
}
asf->packet_frag_timestamp = get_le32(pb); // timestamp
if(asf->packet_replic_size >= 8+38+4){
// for(i=0; i<asf->packet_replic_size-8; i++)
// av_log(s, AV_LOG_DEBUG, "%02X ",get_byte(pb));
// av_log(s, AV_LOG_DEBUG, "\n");
url_fskip(pb, 10);
ts0= get_le64(pb);
ts1= get_le64(pb);
url_fskip(pb, 12);
get_le32(pb);
url_fskip(pb, asf->packet_replic_size - 8 - 38 - 4);
if(ts0!= -1) asf->packet_frag_timestamp= ts0/10000;
else asf->packet_frag_timestamp= AV_NOPTS_VALUE;
}else
url_fskip(pb, asf->packet_replic_size - 8);
rsize += asf->packet_replic_size; // FIXME - check validity
} else if (asf->packet_replic_size==1){
// multipacket - frag_offset is beginning timestamp
asf->packet_time_start = asf->packet_frag_offset;
asf->packet_frag_offset = 0;
asf->packet_frag_timestamp = asf->packet_timestamp;
asf->packet_time_delta = get_byte(pb);
rsize++;
}else if(asf->packet_replic_size!=0){
av_log(s, AV_LOG_ERROR, "unexpected packet_replic_size of %d\n", asf->packet_replic_size);
return -1;
}
if (asf->packet_flags & 0x01) {
DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
if(asf->packet_frag_size > asf->packet_size_left - rsize){
av_log(s, AV_LOG_ERROR, "packet_frag_size is invalid\n");
return -1;
}
//printf("Fragsize %d\n", asf->packet_frag_size);
} else {
asf->packet_frag_size = asf->packet_size_left - rsize;
//printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
}
if (asf->packet_replic_size == 1) {
asf->packet_multi_size = asf->packet_frag_size;
if (asf->packet_multi_size > asf->packet_size_left)
return -1;
}
asf->packet_size_left -= rsize;
//printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
return 0;
}
/**
* Parse data from individual ASF packets (which were previously loaded
* with asf_get_packet()).
* @param s demux context
* @param pb context to read data from
* @param pkt pointer to store packet data into
* @return 0 if data was stored in pkt, <0 on error or 1 if more ASF
* packets need to be loaded (through asf_get_packet())
*/
static int ff_asf_parse_packet(AVFormatContext *s, ByteIOContext *pb, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
ASFStream *asf_st = 0;
for (;;) {
if(url_feof(pb))
return AVERROR_EOF;
if (asf->packet_size_left < FRAME_HEADER_SIZE
|| asf->packet_segments < 1) {
//asf->packet_size_left <= asf->packet_padsize) {
int ret = asf->packet_size_left + asf->packet_padsize;
//printf("PacketLeftSize:%d Pad:%d Pos:%"PRId64"\n", asf->packet_size_left, asf->packet_padsize, url_ftell(pb));
assert(ret>=0);
/* fail safe */
url_fskip(pb, ret);
asf->packet_pos= url_ftell(pb);
if (asf->data_object_size != (uint64_t)-1 &&
(asf->packet_pos - asf->data_object_offset >= asf->data_object_size))
return AVERROR_EOF; /* Do not exceed the size of the data object */
return 1;
}
if (asf->packet_time_start == 0) {
if(asf_read_frame_header(s, pb) < 0){
asf->packet_segments= 0;
continue;
}
if (asf->stream_index < 0
|| s->streams[asf->stream_index]->discard >= AVDISCARD_ALL
|| (!asf->packet_key_frame && s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY)
) {
asf->packet_time_start = 0;
/* unhandled packet (should not happen) */
url_fskip(pb, asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
if(asf->stream_index < 0)
av_log(s, AV_LOG_ERROR, "ff asf skip %d (unknown stream)\n", asf->packet_frag_size);
continue;
}
asf->asf_st = s->streams[asf->stream_index]->priv_data;
}
asf_st = asf->asf_st;
if (asf->packet_replic_size == 1) {
// frag_offset is here used as the beginning timestamp
asf->packet_frag_timestamp = asf->packet_time_start;
asf->packet_time_start += asf->packet_time_delta;
asf->packet_obj_size = asf->packet_frag_size = get_byte(pb);
asf->packet_size_left--;
asf->packet_multi_size--;
if (asf->packet_multi_size < asf->packet_obj_size)
{
asf->packet_time_start = 0;
url_fskip(pb, asf->packet_multi_size);
asf->packet_size_left -= asf->packet_multi_size;
continue;
}
asf->packet_multi_size -= asf->packet_obj_size;
//printf("COMPRESS size %d %d %d ms:%d\n", asf->packet_obj_size, asf->packet_frag_timestamp, asf->packet_size_left, asf->packet_multi_size);
}
if( /*asf->packet_frag_size == asf->packet_obj_size*/
asf_st->frag_offset + asf->packet_frag_size <= asf_st->pkt.size
&& asf_st->frag_offset + asf->packet_frag_size > asf->packet_obj_size){
av_log(s, AV_LOG_INFO, "ignoring invalid packet_obj_size (%d %d %d %d)\n",
asf_st->frag_offset, asf->packet_frag_size,
asf->packet_obj_size, asf_st->pkt.size);
asf->packet_obj_size= asf_st->pkt.size;
}
if ( asf_st->pkt.size != asf->packet_obj_size
|| asf_st->frag_offset + asf->packet_frag_size > asf_st->pkt.size) { //FIXME is this condition sufficient?
if(asf_st->pkt.data){
av_log(s, AV_LOG_INFO, "freeing incomplete packet size %d, new %d\n", asf_st->pkt.size, asf->packet_obj_size);
asf_st->frag_offset = 0;
av_free_packet(&asf_st->pkt);
}
/* new packet */
av_new_packet(&asf_st->pkt, asf->packet_obj_size);
asf_st->seq = asf->packet_seq;
asf_st->pkt.dts = asf->packet_frag_timestamp;
asf_st->pkt.stream_index = asf->stream_index;
asf_st->pkt.pos =
asf_st->packet_pos= asf->packet_pos;
//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY,
//s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf->packet_obj_size);
if (s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
asf->packet_key_frame = 1;
if (asf->packet_key_frame)
asf_st->pkt.flags |= AV_PKT_FLAG_KEY;
}
/* read data */
//printf("READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\n",
// s->packet_size, asf_st->pkt.size, asf->packet_frag_offset,
// asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data);
asf->packet_size_left -= asf->packet_frag_size;
if (asf->packet_size_left < 0)
continue;
if( asf->packet_frag_offset >= asf_st->pkt.size
|| asf->packet_frag_size > asf_st->pkt.size - asf->packet_frag_offset){
av_log(s, AV_LOG_ERROR, "packet fragment position invalid %u,%u not in %u\n",
asf->packet_frag_offset, asf->packet_frag_size, asf_st->pkt.size);
continue;
}
get_buffer(pb, asf_st->pkt.data + asf->packet_frag_offset,
asf->packet_frag_size);
if (s->key && s->keylen == 20)
ff_asfcrypt_dec(s->key, asf_st->pkt.data + asf->packet_frag_offset,
asf->packet_frag_size);
asf_st->frag_offset += asf->packet_frag_size;
/* test if whole packet is read */
if (asf_st->frag_offset == asf_st->pkt.size) {
//workaround for macroshit radio DVR-MS files
if( s->streams[asf->stream_index]->codec->codec_id == CODEC_ID_MPEG2VIDEO
&& asf_st->pkt.size > 100){
int i;
for(i=0; i<asf_st->pkt.size && !asf_st->pkt.data[i]; i++);
if(i == asf_st->pkt.size){
av_log(s, AV_LOG_DEBUG, "discarding ms fart\n");
asf_st->frag_offset = 0;
av_free_packet(&asf_st->pkt);
continue;
}
}
/* return packet */
if (asf_st->ds_span > 1) {
if(asf_st->pkt.size != asf_st->ds_packet_size * asf_st->ds_span){
av_log(s, AV_LOG_ERROR, "pkt.size != ds_packet_size * ds_span (%d %d %d)\n", asf_st->pkt.size, asf_st->ds_packet_size, asf_st->ds_span);
}else{
/* packet descrambling */
uint8_t *newdata = av_malloc(asf_st->pkt.size);
if (newdata) {
int offset = 0;
while (offset < asf_st->pkt.size) {
int off = offset / asf_st->ds_chunk_size;
int row = off / asf_st->ds_span;
int col = off % asf_st->ds_span;
int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
//printf("off:%d row:%d col:%d idx:%d\n", off, row, col, idx);
assert(offset + asf_st->ds_chunk_size <= asf_st->pkt.size);
assert(idx+1 <= asf_st->pkt.size / asf_st->ds_chunk_size);
memcpy(newdata + offset,
asf_st->pkt.data + idx * asf_st->ds_chunk_size,
asf_st->ds_chunk_size);
offset += asf_st->ds_chunk_size;
}
av_free(asf_st->pkt.data);
asf_st->pkt.data = newdata;
}
}
}
asf_st->frag_offset = 0;
*pkt= asf_st->pkt;
//printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
asf_st->pkt.size = 0;
asf_st->pkt.data = 0;
break; // packet completed
}
}
return 0;
}
static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
for (;;) {
int ret;
/* parse cached packets, if any */
if ((ret = ff_asf_parse_packet(s, s->pb, pkt)) <= 0)
return ret;
if ((ret = ff_asf_get_packet(s, s->pb)) < 0)
assert(asf->packet_size_left < FRAME_HEADER_SIZE || asf->packet_segments < 1);
asf->packet_time_start = 0;
}
return 0;
}
// Added to support seeking after packets have been read
// If information is not reset, read_packet fails due to
// leftover information from previous reads
static void asf_reset_header(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
ASFStream *asf_st;
int i;
asf->packet_nb_frames = 0;
asf->packet_size_left = 0;
asf->packet_segments = 0;
asf->packet_flags = 0;
asf->packet_property = 0;
asf->packet_timestamp = 0;
asf->packet_segsizetype = 0;
asf->packet_segments = 0;
asf->packet_seq = 0;
asf->packet_replic_size = 0;
asf->packet_key_frame = 0;
asf->packet_padsize = 0;
asf->packet_frag_offset = 0;
asf->packet_frag_size = 0;
asf->packet_frag_timestamp = 0;
asf->packet_multi_size = 0;
asf->packet_obj_size = 0;
asf->packet_time_delta = 0;
asf->packet_time_start = 0;
for(i=0; i<s->nb_streams; i++){
asf_st= s->streams[i]->priv_data;
av_free_packet(&asf_st->pkt);
asf_st->frag_offset=0;
asf_st->seq=0;
}
asf->asf_st= NULL;
}
static int asf_read_close(AVFormatContext *s)
{
int i;
asf_reset_header(s);
for(i=0;i<s->nb_streams;i++) {
AVStream *st = s->streams[i];
av_free(st->codec->palctrl);
}
return 0;
}
static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit)
{
AVPacket pkt1, *pkt = &pkt1;
ASFStream *asf_st;
int64_t pts;
int64_t pos= *ppos;
int i;
int64_t start_pos[s->nb_streams];
for(i=0; i<s->nb_streams; i++){
start_pos[i]= pos;
}
if (s->packet_size > 0)
pos= (pos+s->packet_size-1-s->data_offset)/s->packet_size*s->packet_size+ s->data_offset;
*ppos= pos;
url_fseek(s->pb, pos, SEEK_SET);
//printf("asf_read_pts\n");
asf_reset_header(s);
for(;;){
if (av_read_frame(s, pkt) < 0){
av_log(s, AV_LOG_INFO, "asf_read_pts failed\n");
return AV_NOPTS_VALUE;
}
pts= pkt->pts;
av_free_packet(pkt);
if(pkt->flags&AV_PKT_FLAG_KEY){
i= pkt->stream_index;
asf_st= s->streams[i]->priv_data;
// assert((asf_st->packet_pos - s->data_offset) % s->packet_size == 0);
pos= asf_st->packet_pos;
av_add_index_entry(s->streams[i], pos, pts, pkt->size, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
start_pos[i]= asf_st->packet_pos + 1;
if(pkt->stream_index == stream_index)
break;
}
}
*ppos= pos;
//printf("found keyframe at %"PRId64" stream %d stamp:%"PRId64"\n", *ppos, stream_index, pts);
return pts;
}
static void asf_build_simple_index(AVFormatContext *s, int stream_index)
{
ff_asf_guid g;
ASFContext *asf = s->priv_data;
int64_t current_pos= url_ftell(s->pb);
int i;
url_fseek(s->pb, asf->data_object_offset + asf->data_object_size, SEEK_SET);
get_guid(s->pb, &g);
if (!guidcmp(&g, &index_guid)) {
int64_t itime, last_pos=-1;
int pct, ict;
int64_t av_unused gsize= get_le64(s->pb);
get_guid(s->pb, &g);
itime=get_le64(s->pb);
pct=get_le32(s->pb);
ict=get_le32(s->pb);
av_log(s, AV_LOG_DEBUG, "itime:0x%"PRIx64", pct:%d, ict:%d\n",itime,pct,ict);
for (i=0;i<ict;i++){
int pktnum=get_le32(s->pb);
int pktct =get_le16(s->pb);
int64_t pos = s->data_offset + s->packet_size*(int64_t)pktnum;
int64_t index_pts= av_rescale(itime, i, 10000);
if(pos != last_pos){
av_log(s, AV_LOG_DEBUG, "pktnum:%d, pktct:%d\n", pktnum, pktct);
av_add_index_entry(s->streams[stream_index], pos, index_pts, s->packet_size, 0, AVINDEX_KEYFRAME);
last_pos=pos;
}
}
asf->index_read= 1;
}
url_fseek(s->pb, current_pos, SEEK_SET);
}
static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags)
{
ASFContext *asf = s->priv_data;
AVStream *st = s->streams[stream_index];
int64_t pos;
int index;
if (s->packet_size <= 0)
return -1;
/* Try using the protocol's read_seek if available */
if(s->pb) {
int ret = av_url_read_fseek(s->pb, stream_index, pts, flags);
if(ret >= 0)
asf_reset_header(s);
if (ret != AVERROR(ENOSYS))
return ret;
}
if (!asf->index_read)
asf_build_simple_index(s, stream_index);
if(!(asf->index_read && st->index_entries)){
if(av_seek_frame_binary(s, stream_index, pts, flags)<0)
return -1;
}else{
index= av_index_search_timestamp(st, pts, flags);
if(index<0)
return -1;
/* find the position */
pos = st->index_entries[index].pos;
// various attempts to find key frame have failed so far
// asf_reset_header(s);
// url_fseek(s->pb, pos, SEEK_SET);
// key_pos = pos;
// for(i=0;i<16;i++){
// pos = url_ftell(s->pb);
// if (av_read_frame(s, &pkt) < 0){
// av_log(s, AV_LOG_INFO, "seek failed\n");
// return -1;
// }
// asf_st = s->streams[stream_index]->priv_data;
// pos += st->parser->frame_offset;
//
// if (pkt.size > b) {
// b = pkt.size;
// key_pos = pos;
// }
//
// av_free_packet(&pkt);
// }
/* do the seek */
av_log(s, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos);
url_fseek(s->pb, pos, SEEK_SET);
}
asf_reset_header(s);
return 0;
}
AVInputFormat asf_demuxer = {
"asf",
NULL_IF_CONFIG_SMALL("ASF format"),
sizeof(ASFContext),
asf_probe,
asf_read_header,
asf_read_packet,
asf_read_close,
asf_read_seek,
asf_read_pts,
.metadata_conv = ff_asf_metadata_conv,
};
| 123linslouis-android-video-cutter | jni/libavformat/asfdec.c | C | asf20 | 45,082 |
/*
* RTP packetization for H.263 video
* Copyright (c) 2009 Luca Abeni
* Copyright (c) 2009 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "rtpenc.h"
static const uint8_t *find_resync_marker_reverse(const uint8_t *restrict start,
const uint8_t *restrict end)
{
const uint8_t *p = end - 1;
start += 1; /* Make sure we never return the original start. */
for (; p > start; p -= 2) {
if (!*p) {
if (!p[ 1] && p[2]) return p;
else if (!p[-1] && p[1]) return p - 1;
}
}
return end;
}
/**
* Packetize H.263 frames into RTP packets according to RFC 4629
*/
void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size)
{
RTPMuxContext *s = s1->priv_data;
int len, max_packet_size;
uint8_t *q;
max_packet_size = s->max_payload_size;
while (size > 0) {
q = s->buf;
if (size >= 2 && (buf1[0] == 0) && (buf1[1] == 0)) {
*q++ = 0x04;
buf1 += 2;
size -= 2;
} else {
*q++ = 0;
}
*q++ = 0;
len = FFMIN(max_packet_size - 2, size);
/* Look for a better place to split the frame into packets. */
if (len < size) {
const uint8_t *end = find_resync_marker_reverse(buf1, buf1 + len);
len = end - buf1;
}
memcpy(q, buf1, len);
q += len;
/* 90 KHz time stamp */
s->timestamp = s->cur_timestamp;
ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size));
buf1 += len;
size -= len;
}
}
| 123linslouis-android-video-cutter | jni/libavformat/rtpenc_h263.c | C | asf20 | 2,386 |
/*
* Motion Pixels MVI Demuxer
* Copyright (c) 2008 Gregory Montoir (cyx@users.sourceforge.net)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#define MVI_FRAC_BITS 10
#define MVI_AUDIO_STREAM_INDEX 0
#define MVI_VIDEO_STREAM_INDEX 1
typedef struct MviDemuxContext {
unsigned int (*get_int)(ByteIOContext *);
uint32_t audio_data_size;
uint64_t audio_size_counter;
uint64_t audio_frame_size;
int audio_size_left;
int video_frame_size;
} MviDemuxContext;
static int read_header(AVFormatContext *s, AVFormatParameters *ap)
{
MviDemuxContext *mvi = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *ast, *vst;
unsigned int version, frames_count, msecs_per_frame, player_version;
ast = av_new_stream(s, 0);
if (!ast)
return AVERROR(ENOMEM);
vst = av_new_stream(s, 0);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->extradata_size = 2;
vst->codec->extradata = av_mallocz(2 + FF_INPUT_BUFFER_PADDING_SIZE);
version = get_byte(pb);
vst->codec->extradata[0] = get_byte(pb);
vst->codec->extradata[1] = get_byte(pb);
frames_count = get_le32(pb);
msecs_per_frame = get_le32(pb);
vst->codec->width = get_le16(pb);
vst->codec->height = get_le16(pb);
get_byte(pb);
ast->codec->sample_rate = get_le16(pb);
mvi->audio_data_size = get_le32(pb);
get_byte(pb);
player_version = get_le32(pb);
get_le16(pb);
get_byte(pb);
if (frames_count == 0 || mvi->audio_data_size == 0)
return AVERROR_INVALIDDATA;
if (version != 7 || player_version > 213) {
av_log(s, AV_LOG_ERROR, "unhandled version (%d,%d)\n", version, player_version);
return AVERROR_INVALIDDATA;
}
av_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = CODEC_ID_PCM_U8;
ast->codec->channels = 1;
ast->codec->bits_per_coded_sample = 8;
ast->codec->bit_rate = ast->codec->sample_rate * 8;
av_set_pts_info(vst, 64, msecs_per_frame, 1000000);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_MOTIONPIXELS;
mvi->get_int = (vst->codec->width * vst->codec->height < (1 << 16)) ? get_le16 : get_le24;
mvi->audio_frame_size = ((uint64_t)mvi->audio_data_size << MVI_FRAC_BITS) / frames_count;
mvi->audio_size_counter = (ast->codec->sample_rate * 830 / mvi->audio_frame_size - 1) * mvi->audio_frame_size;
mvi->audio_size_left = mvi->audio_data_size;
return 0;
}
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, count;
MviDemuxContext *mvi = s->priv_data;
ByteIOContext *pb = s->pb;
if (mvi->video_frame_size == 0) {
mvi->video_frame_size = (mvi->get_int)(pb);
if (mvi->audio_size_left == 0)
return AVERROR(EIO);
count = (mvi->audio_size_counter + mvi->audio_frame_size + 512) >> MVI_FRAC_BITS;
if (count > mvi->audio_size_left)
count = mvi->audio_size_left;
if ((ret = av_get_packet(pb, pkt, count)) < 0)
return ret;
pkt->stream_index = MVI_AUDIO_STREAM_INDEX;
mvi->audio_size_left -= count;
mvi->audio_size_counter += mvi->audio_frame_size - (count << MVI_FRAC_BITS);
} else {
if ((ret = av_get_packet(pb, pkt, mvi->video_frame_size)) < 0)
return ret;
pkt->stream_index = MVI_VIDEO_STREAM_INDEX;
mvi->video_frame_size = 0;
}
return 0;
}
AVInputFormat mvi_demuxer = {
"mvi",
NULL_IF_CONFIG_SMALL("Motion Pixels MVI format"),
sizeof(MviDemuxContext),
NULL,
read_header,
read_packet,
.extensions = "mvi"
};
| 123linslouis-android-video-cutter | jni/libavformat/mvi.c | C | asf20 | 4,524 |
/*
* ISS (.iss) file demuxer
* Copyright (c) 2008 Jaikrishnan Menon <realityman@gmx.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Funcom ISS file demuxer
* @author Jaikrishnan Menon
* for more information on the .iss file format, visit:
* http://wiki.multimedia.cx/index.php?title=FunCom_ISS
*/
#include "avformat.h"
#include "libavutil/avstring.h"
#define ISS_SIG "IMA_ADPCM_Sound"
#define ISS_SIG_LEN 15
#define MAX_TOKEN_SIZE 20
typedef struct {
int packet_size;
int sample_start_pos;
} IssDemuxContext;
static void get_token(ByteIOContext *s, char *buf, int maxlen)
{
int i = 0;
char c;
while ((c = get_byte(s))) {
if(c == ' ')
break;
if (i < maxlen-1)
buf[i++] = c;
}
if(!c)
get_byte(s);
buf[i] = 0; /* Ensure null terminated, but may be truncated */
}
static int iss_probe(AVProbeData *p)
{
if (strncmp(p->buf, ISS_SIG, ISS_SIG_LEN))
return 0;
return AVPROBE_SCORE_MAX;
}
static av_cold int iss_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
IssDemuxContext *iss = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
char token[MAX_TOKEN_SIZE];
int stereo, rate_divisor;
get_token(pb, token, sizeof(token)); //"IMA_ADPCM_Sound"
get_token(pb, token, sizeof(token)); //packet size
sscanf(token, "%d", &iss->packet_size);
get_token(pb, token, sizeof(token)); //File ID
get_token(pb, token, sizeof(token)); //out size
get_token(pb, token, sizeof(token)); //stereo
sscanf(token, "%d", &stereo);
get_token(pb, token, sizeof(token)); //Unknown1
get_token(pb, token, sizeof(token)); //RateDivisor
sscanf(token, "%d", &rate_divisor);
get_token(pb, token, sizeof(token)); //Unknown2
get_token(pb, token, sizeof(token)); //Version ID
get_token(pb, token, sizeof(token)); //Size
iss->sample_start_pos = url_ftell(pb);
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_ADPCM_IMA_ISS;
st->codec->channels = stereo ? 2 : 1;
st->codec->sample_rate = 44100;
if(rate_divisor > 0)
st->codec->sample_rate /= rate_divisor;
st->codec->bits_per_coded_sample = 4;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate
* st->codec->bits_per_coded_sample;
st->codec->block_align = iss->packet_size;
av_set_pts_info(st, 32, 1, st->codec->sample_rate);
return 0;
}
static int iss_read_packet(AVFormatContext *s, AVPacket *pkt)
{
IssDemuxContext *iss = s->priv_data;
int ret = av_get_packet(s->pb, pkt, iss->packet_size);
if(ret != iss->packet_size)
return AVERROR(EIO);
pkt->stream_index = 0;
pkt->pts = url_ftell(s->pb) - iss->sample_start_pos;
if(s->streams[0]->codec->channels > 0)
pkt->pts /= s->streams[0]->codec->channels*2;
return 0;
}
AVInputFormat iss_demuxer = {
"ISS",
NULL_IF_CONFIG_SMALL("Funcom ISS format"),
sizeof(IssDemuxContext),
iss_probe,
iss_read_header,
iss_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/iss.c | C | asf20 | 3,892 |
/*
* seek utility functions for use within format handlers
*
* Copyright (c) 2009 Ivan Schreter
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_SEEK_H
#define AVFORMAT_SEEK_H
#include "avformat.h"
/**
* structure to store parser state of one AVStream
*/
typedef struct AVParserStreamState {
// saved members of AVStream
AVCodecParserContext *parser;
AVPacket cur_pkt;
int64_t last_IP_pts;
int64_t cur_dts;
int64_t reference_dts;
const uint8_t *cur_ptr;
int cur_len;
int probe_packets;
} AVParserStreamState;
/**
* structure to store parser state of AVFormat
*/
typedef struct AVParserState {
int64_t fpos; ///< file position at the time of call
// saved members of AVFormatContext
AVStream *cur_st; ///< current stream.
AVPacketList *packet_buffer; ///< packet buffer of original state
AVPacketList *raw_packet_buffer; ///< raw packet buffer of original state
int raw_packet_buffer_remaining_size; ///< remaining space in raw_packet_buffer
// saved info for streams
int nb_streams; ///< number of streams with stored state
AVParserStreamState *stream_states; ///< states of individual streams (array)
} AVParserState;
/**
* Search for the sync point of all active streams.
*
* This routine is not supposed to be called directly by a user application,
* but by demuxers.
*
* A sync point is defined as a point in stream, such that, when decoding start
* from this point, the decoded output of all streams synchronizes closest
* to the given timestamp ts. This routine also takes timestamp limits into account.
* Thus, the output will synchronize no sooner than ts_min and no later than ts_max.
*
* @param stream_index stream index for time base reference of timestamps
* @param pos approximate position where to start searching for key frames
* @param min_ts minimum allowed timestamp (position, if AVSEEK_FLAG_BYTE set)
* @param ts target timestamp (or position, if AVSEEK_FLAG_BYTE set in flags)
* @param max_ts maximum allowed timestamp (position, if AVSEEK_FLAG_BYTE set)
* @param flags if AVSEEK_FLAG_ANY is set, seek to any frame, otherwise only
* to a keyframe. If AVSEEK_FLAG_BYTE is set, search by
* position, not by timestamp.
* @return -1 if no such sync point could be found, otherwise stream position
* (stream is repositioned to this position)
*/
int64_t ff_gen_syncpoint_search(AVFormatContext *s,
int stream_index,
int64_t pos,
int64_t min_ts,
int64_t ts,
int64_t max_ts,
int flags);
/**
* Store current parser state and file position.
*
* This function can be used by demuxers before a destructive seeking algorithm
* to store the parser state. Depending on the outcome of the seek, either the original
* state can be restored or the new state kept and the original state freed.
*
* @note As a side effect, the original parser state is reset, since structures
* are relinked to the stored state instead of being deeply-copied (for
* performance reasons and to keep the code simple).
*
* @param s context from which to save state
* @return parser state object or NULL if memory could not be allocated
*/
AVParserState *ff_store_parser_state(AVFormatContext *s);
/**
* Restore previously saved parser state and file position.
*
* Saved state will be invalidated and freed by this call, since internal
* structures will be relinked back to the stored state instead of being
* deeply-copied.
*
* @param s context to which to restore state (same as used for storing state)
* @param state state to restore
*/
void ff_restore_parser_state(AVFormatContext *s, AVParserState *state);
/**
* Free previously saved parser state.
*
* @param s context to which the state belongs (same as used for storing state)
* @param state state to free
*/
void ff_free_parser_state(AVFormatContext *s, AVParserState *state);
#endif /* AVFORMAT_SEEK_H */
| 123linslouis-android-video-cutter | jni/libavformat/seek.h | C | asf20 | 5,134 |
/*
* Concat URL protocol
* Copyright (c) 2006 Steve Lhomme
* Copyright (c) 2007 Wolfram Gloger
* Copyright (c) 2010 Michele Orrù
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "libavutil/avstring.h"
#include "libavutil/mem.h"
#define AV_CAT_SEPARATOR "|"
struct concat_nodes {
URLContext *uc; ///< node's URLContext
int64_t size; ///< url filesize
};
struct concat_data {
struct concat_nodes *nodes; ///< list of nodes to concat
size_t length; ///< number of cat'ed nodes
size_t current; ///< index of currently read node
};
static av_cold int concat_close(URLContext *h)
{
int err = 0;
size_t i;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes = data->nodes;
for (i = 0; i != data->length; i++)
err |= url_close(nodes[i].uc);
av_freep(&data->nodes);
av_freep(&h->priv_data);
return err < 0 ? -1 : 0;
}
static av_cold int concat_open(URLContext *h, const char *uri, int flags)
{
char *node_uri = NULL, *tmp_uri;
int err = 0;
int64_t size;
size_t len, i;
URLContext *uc;
struct concat_data *data;
struct concat_nodes *nodes;
av_strstart(uri, "concat:", &uri);
/* creating data */
if (!(data = av_mallocz(sizeof(*data))))
return AVERROR(ENOMEM);
h->priv_data = data;
for (i = 0, len = 1; uri[i]; i++)
if (uri[i] == *AV_CAT_SEPARATOR)
/* integer overflow */
if (++len == UINT_MAX / sizeof(*nodes)) {
av_freep(&h->priv_data);
return AVERROR(ENAMETOOLONG);
}
if (!(nodes = av_malloc(sizeof(*nodes) * len))) {
av_freep(&h->priv_data);
return AVERROR(ENOMEM);
} else
data->nodes = nodes;
/* handle input */
if (!*uri)
err = AVERROR(ENOENT);
for (i = 0; *uri; i++) {
/* parsing uri */
len = strcspn(uri, AV_CAT_SEPARATOR);
if (!(tmp_uri = av_realloc(node_uri, len+1))) {
err = AVERROR(ENOMEM);
break;
} else
node_uri = tmp_uri;
av_strlcpy(node_uri, uri, len+1);
uri += len + strspn(uri+len, AV_CAT_SEPARATOR);
/* creating URLContext */
if ((err = url_open(&uc, node_uri, flags)) < 0)
break;
/* creating size */
if ((size = url_filesize(uc)) < 0) {
url_close(uc);
err = AVERROR(ENOSYS);
break;
}
/* assembling */
nodes[i].uc = uc;
nodes[i].size = size;
}
av_free(node_uri);
data->length = i;
if (err < 0)
concat_close(h);
else if (!(nodes = av_realloc(nodes, data->length * sizeof(*nodes)))) {
concat_close(h);
err = AVERROR(ENOMEM);
} else
data->nodes = nodes;
return err;
}
static int concat_read(URLContext *h, unsigned char *buf, int size)
{
int result, total = 0;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes = data->nodes;
size_t i = data->current;
while (size > 0) {
result = url_read(nodes[i].uc, buf, size);
if (result < 0)
return total ? total : result;
if (!result)
if (i + 1 == data->length ||
url_seek(nodes[++i].uc, 0, SEEK_SET) < 0)
break;
total += result;
buf += result;
size -= result;
}
data->current = i;
return total;
}
static int64_t concat_seek(URLContext *h, int64_t pos, int whence)
{
int64_t result;
struct concat_data *data = h->priv_data;
struct concat_nodes *nodes = data->nodes;
size_t i;
switch (whence) {
case SEEK_END:
for (i = data->length - 1;
i && pos < -nodes[i].size;
i--)
pos += nodes[i].size;
break;
case SEEK_CUR:
/* get the absolute position */
for (i = 0; i != data->current; i++)
pos += nodes[i].size;
pos += url_seek(nodes[i].uc, 0, SEEK_CUR);
whence = SEEK_SET;
/* fall through with the absolute position */
case SEEK_SET:
for (i = 0; i != data->length - 1 && pos >= nodes[i].size; i++)
pos -= nodes[i].size;
break;
default:
return AVERROR(EINVAL);
}
result = url_seek(nodes[i].uc, pos, whence);
if (result >= 0) {
data->current = i;
while (i)
result += nodes[--i].size;
}
return result;
}
URLProtocol concat_protocol = {
"concat",
concat_open,
concat_read,
NULL,
concat_seek,
concat_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/concat.c | C | asf20 | 5,414 |
/*
* GXF demuxer.
* Copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "avformat.h"
#include "gxf.h"
struct gxf_stream_info {
int64_t first_field;
int64_t last_field;
AVRational frames_per_second;
int32_t fields_per_frame;
};
/**
* \brief parses a packet header, extracting type and length
* \param pb ByteIOContext to read header from
* \param type detected packet type is stored here
* \param length detected packet length, excluding header is stored here
* \return 0 if header not found or contains invalid data, 1 otherwise
*/
static int parse_packet_header(ByteIOContext *pb, GXFPktType *type, int *length) {
if (get_be32(pb))
return 0;
if (get_byte(pb) != 1)
return 0;
*type = get_byte(pb);
*length = get_be32(pb);
if ((*length >> 24) || *length < 16)
return 0;
*length -= 16;
if (get_be32(pb))
return 0;
if (get_byte(pb) != 0xe1)
return 0;
if (get_byte(pb) != 0xe2)
return 0;
return 1;
}
/**
* \brief check if file starts with a PKT_MAP header
*/
static int gxf_probe(AVProbeData *p) {
static const uint8_t startcode[] = {0, 0, 0, 0, 1, 0xbc}; // start with map packet
static const uint8_t endcode[] = {0, 0, 0, 0, 0xe1, 0xe2};
if (!memcmp(p->buf, startcode, sizeof(startcode)) &&
!memcmp(&p->buf[16 - sizeof(endcode)], endcode, sizeof(endcode)))
return AVPROBE_SCORE_MAX;
return 0;
}
/**
* \brief gets the stream index for the track with the specified id, creates new
* stream if not found
* \param stream id of stream to find / add
* \param format stream format identifier
*/
static int get_sindex(AVFormatContext *s, int id, int format) {
int i;
AVStream *st = NULL;
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->id == id)
return i;
}
st = av_new_stream(s, id);
if (!st)
return AVERROR(ENOMEM);
switch (format) {
case 3:
case 4:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_MJPEG;
break;
case 13:
case 15:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_DVVIDEO;
break;
case 14:
case 16:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_DVVIDEO;
break;
case 11:
case 12:
case 20:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
st->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc.
break;
case 22:
case 23:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_MPEG1VIDEO;
st->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc.
break;
case 9:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_PCM_S24LE;
st->codec->channels = 1;
st->codec->sample_rate = 48000;
st->codec->bit_rate = 3 * 1 * 48000 * 8;
st->codec->block_align = 3 * 1;
st->codec->bits_per_coded_sample = 24;
break;
case 10:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_PCM_S16LE;
st->codec->channels = 1;
st->codec->sample_rate = 48000;
st->codec->bit_rate = 2 * 1 * 48000 * 8;
st->codec->block_align = 2 * 1;
st->codec->bits_per_coded_sample = 16;
break;
case 17:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_AC3;
st->codec->channels = 2;
st->codec->sample_rate = 48000;
break;
// timecode tracks:
case 7:
case 8:
case 24:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = CODEC_ID_NONE;
break;
default:
st->codec->codec_type = AVMEDIA_TYPE_UNKNOWN;
st->codec->codec_id = CODEC_ID_NONE;
break;
}
return s->nb_streams - 1;
}
/**
* \brief filters out interesting tags from material information.
* \param len length of tag section, will be adjusted to contain remaining bytes
* \param si struct to store collected information into
*/
static void gxf_material_tags(ByteIOContext *pb, int *len, struct gxf_stream_info *si) {
si->first_field = AV_NOPTS_VALUE;
si->last_field = AV_NOPTS_VALUE;
while (*len >= 2) {
GXFMatTag tag = get_byte(pb);
int tlen = get_byte(pb);
*len -= 2;
if (tlen > *len)
return;
*len -= tlen;
if (tlen == 4) {
uint32_t value = get_be32(pb);
if (tag == MAT_FIRST_FIELD)
si->first_field = value;
else if (tag == MAT_LAST_FIELD)
si->last_field = value;
} else
url_fskip(pb, tlen);
}
}
/**
* \brief convert fps tag value to AVRational fps
* \param fps fps value from tag
* \return fps as AVRational, or 0 / 0 if unknown
*/
static AVRational fps_tag2avr(int32_t fps) {
extern const AVRational ff_frame_rate_tab[];
if (fps < 1 || fps > 9) fps = 9;
return ff_frame_rate_tab[9 - fps]; // values have opposite order
}
/**
* \brief convert UMF attributes flags to AVRational fps
* \param fps fps value from flags
* \return fps as AVRational, or 0 / 0 if unknown
*/
static AVRational fps_umf2avr(uint32_t flags) {
static const AVRational map[] = {{50, 1}, {60000, 1001}, {24, 1},
{25, 1}, {30000, 1001}};
int idx = av_log2((flags & 0x7c0) >> 6);
return map[idx];
}
/**
* \brief filters out interesting tags from track information.
* \param len length of tag section, will be adjusted to contain remaining bytes
* \param si struct to store collected information into
*/
static void gxf_track_tags(ByteIOContext *pb, int *len, struct gxf_stream_info *si) {
si->frames_per_second = (AVRational){0, 0};
si->fields_per_frame = 0;
while (*len >= 2) {
GXFTrackTag tag = get_byte(pb);
int tlen = get_byte(pb);
*len -= 2;
if (tlen > *len)
return;
*len -= tlen;
if (tlen == 4) {
uint32_t value = get_be32(pb);
if (tag == TRACK_FPS)
si->frames_per_second = fps_tag2avr(value);
else if (tag == TRACK_FPF && (value == 1 || value == 2))
si->fields_per_frame = value;
} else
url_fskip(pb, tlen);
}
}
/**
* \brief read index from FLT packet into stream 0 av_index
*/
static void gxf_read_index(AVFormatContext *s, int pkt_len) {
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[0];
uint32_t fields_per_map = get_le32(pb);
uint32_t map_cnt = get_le32(pb);
int i;
pkt_len -= 8;
if (map_cnt > 1000) {
av_log(s, AV_LOG_ERROR, "too many index entries %u (%x)\n", map_cnt, map_cnt);
map_cnt = 1000;
}
if (pkt_len < 4 * map_cnt) {
av_log(s, AV_LOG_ERROR, "invalid index length\n");
url_fskip(pb, pkt_len);
return;
}
pkt_len -= 4 * map_cnt;
av_add_index_entry(st, 0, 0, 0, 0, 0);
for (i = 0; i < map_cnt; i++)
av_add_index_entry(st, (uint64_t)get_le32(pb) * 1024,
i * (uint64_t)fields_per_map + 1, 0, 0, 0);
url_fskip(pb, pkt_len);
}
static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) {
ByteIOContext *pb = s->pb;
GXFPktType pkt_type;
int map_len;
int len;
AVRational main_timebase = {0, 0};
struct gxf_stream_info si;
int i;
if (!parse_packet_header(pb, &pkt_type, &map_len) || pkt_type != PKT_MAP) {
av_log(s, AV_LOG_ERROR, "map packet not found\n");
return 0;
}
map_len -= 2;
if (get_byte(pb) != 0x0e0 || get_byte(pb) != 0xff) {
av_log(s, AV_LOG_ERROR, "unknown version or invalid map preamble\n");
return 0;
}
map_len -= 2;
len = get_be16(pb); // length of material data section
if (len > map_len) {
av_log(s, AV_LOG_ERROR, "material data longer than map data\n");
return 0;
}
map_len -= len;
gxf_material_tags(pb, &len, &si);
url_fskip(pb, len);
map_len -= 2;
len = get_be16(pb); // length of track description
if (len > map_len) {
av_log(s, AV_LOG_ERROR, "track description longer than map data\n");
return 0;
}
map_len -= len;
while (len > 0) {
int track_type, track_id, track_len;
AVStream *st;
int idx;
len -= 4;
track_type = get_byte(pb);
track_id = get_byte(pb);
track_len = get_be16(pb);
len -= track_len;
gxf_track_tags(pb, &track_len, &si);
url_fskip(pb, track_len);
if (!(track_type & 0x80)) {
av_log(s, AV_LOG_ERROR, "invalid track type %x\n", track_type);
continue;
}
track_type &= 0x7f;
if ((track_id & 0xc0) != 0xc0) {
av_log(s, AV_LOG_ERROR, "invalid track id %x\n", track_id);
continue;
}
track_id &= 0x3f;
idx = get_sindex(s, track_id, track_type);
if (idx < 0) continue;
st = s->streams[idx];
if (!main_timebase.num || !main_timebase.den) {
main_timebase.num = si.frames_per_second.den;
main_timebase.den = si.frames_per_second.num * 2;
}
st->start_time = si.first_field;
if (si.first_field != AV_NOPTS_VALUE && si.last_field != AV_NOPTS_VALUE)
st->duration = si.last_field - si.first_field;
}
if (len < 0)
av_log(s, AV_LOG_ERROR, "invalid track description length specified\n");
if (map_len)
url_fskip(pb, map_len);
if (!parse_packet_header(pb, &pkt_type, &len)) {
av_log(s, AV_LOG_ERROR, "sync lost in header\n");
return -1;
}
if (pkt_type == PKT_FLT) {
gxf_read_index(s, len);
if (!parse_packet_header(pb, &pkt_type, &len)) {
av_log(s, AV_LOG_ERROR, "sync lost in header\n");
return -1;
}
}
if (pkt_type == PKT_UMF) {
if (len >= 0x39) {
AVRational fps;
len -= 0x39;
url_fskip(pb, 5); // preamble
url_fskip(pb, 0x30); // payload description
fps = fps_umf2avr(get_le32(pb));
if (!main_timebase.num || !main_timebase.den) {
// this may not always be correct, but simply the best we can get
main_timebase.num = fps.den;
main_timebase.den = fps.num * 2;
}
} else
av_log(s, AV_LOG_INFO, "UMF packet too short\n");
} else
av_log(s, AV_LOG_INFO, "UMF packet missing\n");
url_fskip(pb, len);
// set a fallback value, 60000/1001 is specified for audio-only files
// so use that regardless of why we do not know the video frame rate.
if (!main_timebase.num || !main_timebase.den)
main_timebase = (AVRational){1001, 60000};
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
av_set_pts_info(st, 32, main_timebase.num, main_timebase.den);
}
return 0;
}
#define READ_ONE() \
{ \
if (!max_interval-- || url_feof(pb)) \
goto out; \
tmp = tmp << 8 | get_byte(pb); \
}
/**
* \brief resync the stream on the next media packet with specified properties
* \param max_interval how many bytes to search for matching packet at most
* \param track track id the media packet must belong to, -1 for any
* \param timestamp minimum timestamp (== field number) the packet must have, -1 for any
* \return timestamp of packet found
*/
static int64_t gxf_resync_media(AVFormatContext *s, uint64_t max_interval, int track, int timestamp) {
uint32_t tmp;
uint64_t last_pos;
uint64_t last_found_pos = 0;
int cur_track;
int64_t cur_timestamp = AV_NOPTS_VALUE;
int len;
ByteIOContext *pb = s->pb;
GXFPktType type;
tmp = get_be32(pb);
start:
while (tmp)
READ_ONE();
READ_ONE();
if (tmp != 1)
goto start;
last_pos = url_ftell(pb);
url_fseek(pb, -5, SEEK_CUR);
if (!parse_packet_header(pb, &type, &len) || type != PKT_MEDIA) {
url_fseek(pb, last_pos, SEEK_SET);
goto start;
}
get_byte(pb);
cur_track = get_byte(pb);
cur_timestamp = get_be32(pb);
last_found_pos = url_ftell(pb) - 16 - 6;
if ((track >= 0 && track != cur_track) || (timestamp >= 0 && timestamp > cur_timestamp)) {
url_fseek(pb, last_pos, SEEK_SET);
goto start;
}
out:
if (last_found_pos)
url_fseek(pb, last_found_pos, SEEK_SET);
return cur_timestamp;
}
static int gxf_packet(AVFormatContext *s, AVPacket *pkt) {
ByteIOContext *pb = s->pb;
GXFPktType pkt_type;
int pkt_len;
while (!url_feof(pb)) {
AVStream *st;
int track_type, track_id, ret;
int field_nr, field_info, skip = 0;
int stream_index;
if (!parse_packet_header(pb, &pkt_type, &pkt_len)) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "sync lost\n");
return -1;
}
if (pkt_type == PKT_FLT) {
gxf_read_index(s, pkt_len);
continue;
}
if (pkt_type != PKT_MEDIA) {
url_fskip(pb, pkt_len);
continue;
}
if (pkt_len < 16) {
av_log(s, AV_LOG_ERROR, "invalid media packet length\n");
continue;
}
pkt_len -= 16;
track_type = get_byte(pb);
track_id = get_byte(pb);
stream_index = get_sindex(s, track_id, track_type);
if (stream_index < 0)
return stream_index;
st = s->streams[stream_index];
field_nr = get_be32(pb);
field_info = get_be32(pb);
get_be32(pb); // "timeline" field number
get_byte(pb); // flags
get_byte(pb); // reserved
if (st->codec->codec_id == CODEC_ID_PCM_S24LE ||
st->codec->codec_id == CODEC_ID_PCM_S16LE) {
int first = field_info >> 16;
int last = field_info & 0xffff; // last is exclusive
int bps = av_get_bits_per_sample(st->codec->codec_id)>>3;
if (first <= last && last*bps <= pkt_len) {
url_fskip(pb, first*bps);
skip = pkt_len - last*bps;
pkt_len = (last-first)*bps;
} else
av_log(s, AV_LOG_ERROR, "invalid first and last sample values\n");
}
ret = av_get_packet(pb, pkt, pkt_len);
if (skip)
url_fskip(pb, skip);
pkt->stream_index = stream_index;
pkt->dts = field_nr;
return ret;
}
return AVERROR(EIO);
}
static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
uint64_t pos;
uint64_t maxlen = 100 * 1024 * 1024;
AVStream *st = s->streams[0];
int64_t start_time = s->streams[stream_index]->start_time;
int64_t found;
int idx;
if (timestamp < start_time) timestamp = start_time;
idx = av_index_search_timestamp(st, timestamp - start_time,
AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
if (idx < 0)
return -1;
pos = st->index_entries[idx].pos;
if (idx < st->nb_index_entries - 2)
maxlen = st->index_entries[idx + 2].pos - pos;
maxlen = FFMAX(maxlen, 200 * 1024);
url_fseek(s->pb, pos, SEEK_SET);
found = gxf_resync_media(s, maxlen, -1, timestamp);
if (FFABS(found - timestamp) > 4)
return -1;
return 0;
}
static int64_t gxf_read_timestamp(AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit) {
ByteIOContext *pb = s->pb;
int64_t res;
url_fseek(pb, *pos, SEEK_SET);
res = gxf_resync_media(s, pos_limit - *pos, -1, -1);
*pos = url_ftell(pb);
return res;
}
AVInputFormat gxf_demuxer = {
"gxf",
NULL_IF_CONFIG_SMALL("GXF format"),
0,
gxf_probe,
gxf_header,
gxf_packet,
NULL,
gxf_seek,
gxf_read_timestamp,
};
| 123linslouis-android-video-cutter | jni/libavformat/gxf.c | C | asf20 | 17,175 |
/*
* AIFF/AIFF-C muxer/demuxer common header
* Copyright (c) 2006 Patrick Guimond
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* common header for AIFF muxer and demuxer
*/
#ifndef AVFORMAT_AIFF_H
#define AVFORMAT_AIFF_H
#include "avformat.h"
#include "riff.h"
static const AVCodecTag ff_codec_aiff_tags[] = {
{ CODEC_ID_PCM_S16BE, MKTAG('N','O','N','E') },
{ CODEC_ID_PCM_S8, MKTAG('N','O','N','E') },
{ CODEC_ID_PCM_S24BE, MKTAG('N','O','N','E') },
{ CODEC_ID_PCM_S32BE, MKTAG('N','O','N','E') },
{ CODEC_ID_PCM_F32BE, MKTAG('f','l','3','2') },
{ CODEC_ID_PCM_F64BE, MKTAG('f','l','6','4') },
{ CODEC_ID_PCM_ALAW, MKTAG('a','l','a','w') },
{ CODEC_ID_PCM_MULAW, MKTAG('u','l','a','w') },
{ CODEC_ID_MACE3, MKTAG('M','A','C','3') },
{ CODEC_ID_MACE6, MKTAG('M','A','C','6') },
{ CODEC_ID_GSM, MKTAG('G','S','M',' ') },
{ CODEC_ID_ADPCM_G726, MKTAG('G','7','2','6') },
{ CODEC_ID_PCM_S16LE, MKTAG('s','o','w','t') },
{ CODEC_ID_ADPCM_IMA_QT, MKTAG('i','m','a','4') },
{ CODEC_ID_QDM2, MKTAG('Q','D','M','2') },
{ CODEC_ID_QCELP, MKTAG('Q','c','l','p') },
{ CODEC_ID_NONE, 0 },
};
#endif /* AVFORMAT_AIFF_H */
| 123linslouis-android-video-cutter | jni/libavformat/aiff.h | C | asf20 | 1,997 |
/*
* TechnoTrend PVA (.pva) demuxer
* Copyright (c) 2007, 2008 Ivo van Poorten
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "mpeg.h"
#define PVA_MAX_PAYLOAD_LENGTH 0x17f8
#define PVA_VIDEO_PAYLOAD 0x01
#define PVA_AUDIO_PAYLOAD 0x02
#define PVA_MAGIC (('A' << 8) + 'V')
typedef struct {
int continue_pes;
} PVAContext;
static int pva_probe(AVProbeData * pd) {
unsigned char *buf = pd->buf;
if (AV_RB16(buf) == PVA_MAGIC && buf[2] && buf[2] < 3 && buf[4] == 0x55)
return AVPROBE_SCORE_MAX / 2;
return 0;
}
static int pva_read_header(AVFormatContext *s, AVFormatParameters *ap) {
AVStream *st;
if (!(st = av_new_stream(s, 0)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
st->need_parsing = AVSTREAM_PARSE_FULL;
av_set_pts_info(st, 32, 1, 90000);
av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME);
if (!(st = av_new_stream(s, 1)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_MP2;
st->need_parsing = AVSTREAM_PARSE_FULL;
av_set_pts_info(st, 33, 1, 90000);
av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME);
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
#define pva_log if (read_packet) av_log
static int read_part_of_packet(AVFormatContext *s, int64_t *pts,
int *len, int *strid, int read_packet) {
ByteIOContext *pb = s->pb;
PVAContext *pvactx = s->priv_data;
int syncword, streamid, reserved, flags, length, pts_flag;
int64_t pva_pts = AV_NOPTS_VALUE, startpos;
recover:
startpos = url_ftell(pb);
syncword = get_be16(pb);
streamid = get_byte(pb);
get_byte(pb); /* counter not used */
reserved = get_byte(pb);
flags = get_byte(pb);
length = get_be16(pb);
pts_flag = flags & 0x10;
if (syncword != PVA_MAGIC) {
pva_log(s, AV_LOG_ERROR, "invalid syncword\n");
return AVERROR(EIO);
}
if (streamid != PVA_VIDEO_PAYLOAD && streamid != PVA_AUDIO_PAYLOAD) {
pva_log(s, AV_LOG_ERROR, "invalid streamid\n");
return AVERROR(EIO);
}
if (reserved != 0x55) {
pva_log(s, AV_LOG_WARNING, "expected reserved byte to be 0x55\n");
}
if (length > PVA_MAX_PAYLOAD_LENGTH) {
pva_log(s, AV_LOG_ERROR, "invalid payload length %u\n", length);
return AVERROR(EIO);
}
if (streamid == PVA_VIDEO_PAYLOAD && pts_flag) {
pva_pts = get_be32(pb);
length -= 4;
} else if (streamid == PVA_AUDIO_PAYLOAD) {
/* PVA Audio Packets either start with a signaled PES packet or
* are a continuation of the previous PES packet. New PES packets
* always start at the beginning of a PVA Packet, never somewhere in
* the middle. */
if (!pvactx->continue_pes) {
int pes_signal, pes_header_data_length, pes_packet_length,
pes_flags;
unsigned char pes_header_data[256];
pes_signal = get_be24(pb);
get_byte(pb);
pes_packet_length = get_be16(pb);
pes_flags = get_be16(pb);
pes_header_data_length = get_byte(pb);
if (pes_signal != 1) {
pva_log(s, AV_LOG_WARNING, "expected signaled PES packet, "
"trying to recover\n");
url_fskip(pb, length - 9);
if (!read_packet)
return AVERROR(EIO);
goto recover;
}
get_buffer(pb, pes_header_data, pes_header_data_length);
length -= 9 + pes_header_data_length;
pes_packet_length -= 3 + pes_header_data_length;
pvactx->continue_pes = pes_packet_length;
if (pes_flags & 0x80 && (pes_header_data[0] & 0xf0) == 0x20)
pva_pts = ff_parse_pes_pts(pes_header_data);
}
pvactx->continue_pes -= length;
if (pvactx->continue_pes < 0) {
pva_log(s, AV_LOG_WARNING, "audio data corruption\n");
pvactx->continue_pes = 0;
}
}
if (pva_pts != AV_NOPTS_VALUE)
av_add_index_entry(s->streams[streamid-1], startpos, pva_pts, 0, 0, AVINDEX_KEYFRAME);
*pts = pva_pts;
*len = length;
*strid = streamid;
return 0;
}
static int pva_read_packet(AVFormatContext *s, AVPacket *pkt) {
ByteIOContext *pb = s->pb;
int64_t pva_pts;
int ret, length, streamid;
if (read_part_of_packet(s, &pva_pts, &length, &streamid, 1) < 0 ||
(ret = av_get_packet(pb, pkt, length)) <= 0)
return AVERROR(EIO);
pkt->stream_index = streamid - 1;
pkt->pts = pva_pts;
return ret;
}
static int64_t pva_read_timestamp(struct AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit) {
ByteIOContext *pb = s->pb;
PVAContext *pvactx = s->priv_data;
int length, streamid;
int64_t res = AV_NOPTS_VALUE;
pos_limit = FFMIN(*pos+PVA_MAX_PAYLOAD_LENGTH*8, (uint64_t)*pos+pos_limit);
while (*pos < pos_limit) {
res = AV_NOPTS_VALUE;
url_fseek(pb, *pos, SEEK_SET);
pvactx->continue_pes = 0;
if (read_part_of_packet(s, &res, &length, &streamid, 0)) {
(*pos)++;
continue;
}
if (streamid - 1 != stream_index || res == AV_NOPTS_VALUE) {
*pos = url_ftell(pb) + length;
continue;
}
break;
}
pvactx->continue_pes = 0;
return res;
}
AVInputFormat pva_demuxer = {
"pva",
NULL_IF_CONFIG_SMALL("TechnoTrend PVA file and stream format"),
sizeof(PVAContext),
pva_probe,
pva_read_header,
pva_read_packet,
.read_timestamp = pva_read_timestamp
};
| 123linslouis-android-video-cutter | jni/libavformat/pva.c | C | asf20 | 6,709 |
/*
* raw FLAC muxer
* Copyright (C) 2009 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/flac.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "flacenc.h"
int ff_flac_write_header(ByteIOContext *pb, AVCodecContext *codec,
int last_block)
{
uint8_t header[8] = {
0x66, 0x4C, 0x61, 0x43, 0x00, 0x00, 0x00, 0x22
};
uint8_t *streaminfo;
enum FLACExtradataFormat format;
header[4] = last_block ? 0x80 : 0x00;
if (!ff_flac_is_extradata_valid(codec, &format, &streaminfo))
return -1;
/* write "fLaC" stream marker and first metadata block header if needed */
if (format == FLAC_EXTRADATA_FORMAT_STREAMINFO) {
put_buffer(pb, header, 8);
}
/* write STREAMINFO or full header */
put_buffer(pb, codec->extradata, codec->extradata_size);
return 0;
}
| 123linslouis-android-video-cutter | jni/libavformat/flacenc_header.c | C | asf20 | 1,614 |
/*
* AVI demuxer
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
//#define DEBUG
//#define DEBUG_SEEK
#include "libavutil/intreadwrite.h"
#include "libavutil/bswap.h"
#include "avformat.h"
#include "avi.h"
#include "dv.h"
#include "riff.h"
#undef NDEBUG
#include <assert.h>
typedef struct AVIStream {
int64_t frame_offset; /* current frame (video) or byte (audio) counter
(used to compute the pts) */
int remaining;
int packet_size;
int scale;
int rate;
int sample_size; /* size of one sample (or packet) (in the rate/scale sense) in bytes */
int64_t cum_len; /* temporary storage (used during seek) */
int prefix; ///< normally 'd'<<8 + 'c' or 'w'<<8 + 'b'
int prefix_count;
uint32_t pal[256];
int has_pal;
} AVIStream;
typedef struct {
int64_t riff_end;
int64_t movi_end;
int64_t fsize;
int64_t movi_list;
int64_t last_pkt_pos;
int index_loaded;
int is_odml;
int non_interleaved;
int stream_index;
DVDemuxContext* dv_demux;
} AVIContext;
static const char avi_headers[][8] = {
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', ' ' },
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', 'X' },
{ 'R', 'I', 'F', 'F', 'A', 'V', 'I', 0x19},
{ 'O', 'N', '2', ' ', 'O', 'N', '2', 'f' },
{ 'R', 'I', 'F', 'F', 'A', 'M', 'V', ' ' },
{ 0 }
};
static int avi_load_index(AVFormatContext *s);
static int guess_ni_flag(AVFormatContext *s);
#ifdef DEBUG
static void print_tag(const char *str, unsigned int tag, int size)
{
dprintf(NULL, "%s: tag=%c%c%c%c size=0x%x\n",
str, tag & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
(tag >> 24) & 0xff,
size);
}
#endif
static int get_riff(AVFormatContext *s, ByteIOContext *pb)
{
AVIContext *avi = s->priv_data;
char header[8];
int i;
/* check RIFF header */
get_buffer(pb, header, 4);
avi->riff_end = get_le32(pb); /* RIFF chunk size */
avi->riff_end += url_ftell(pb); /* RIFF chunk end */
get_buffer(pb, header+4, 4);
for(i=0; avi_headers[i][0]; i++)
if(!memcmp(header, avi_headers[i], 8))
break;
if(!avi_headers[i][0])
return -1;
if(header[7] == 0x19)
av_log(s, AV_LOG_INFO, "This file has been generated by a totally broken muxer.\n");
return 0;
}
static int read_braindead_odml_indx(AVFormatContext *s, int frame_num){
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
int longs_pre_entry= get_le16(pb);
int index_sub_type = get_byte(pb);
int index_type = get_byte(pb);
int entries_in_use = get_le32(pb);
int chunk_id = get_le32(pb);
int64_t base = get_le64(pb);
int stream_id= 10*((chunk_id&0xFF) - '0') + (((chunk_id>>8)&0xFF) - '0');
AVStream *st;
AVIStream *ast;
int i;
int64_t last_pos= -1;
int64_t filesize= url_fsize(s->pb);
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_ERROR, "longs_pre_entry:%d index_type:%d entries_in_use:%d chunk_id:%X base:%16"PRIX64"\n",
longs_pre_entry,index_type, entries_in_use, chunk_id, base);
#endif
if(stream_id >= s->nb_streams || stream_id < 0)
return -1;
st= s->streams[stream_id];
ast = st->priv_data;
if(index_sub_type)
return -1;
get_le32(pb);
if(index_type && longs_pre_entry != 2)
return -1;
if(index_type>1)
return -1;
if(filesize > 0 && base >= filesize){
av_log(s, AV_LOG_ERROR, "ODML index invalid\n");
if(base>>32 == (base & 0xFFFFFFFF) && (base & 0xFFFFFFFF) < filesize && filesize <= 0xFFFFFFFF)
base &= 0xFFFFFFFF;
else
return -1;
}
for(i=0; i<entries_in_use; i++){
if(index_type){
int64_t pos= get_le32(pb) + base - 8;
int len = get_le32(pb);
int key= len >= 0;
len &= 0x7FFFFFFF;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_ERROR, "pos:%"PRId64", len:%X\n", pos, len);
#endif
if(url_feof(pb))
return -1;
if(last_pos == pos || pos == base - 8)
avi->non_interleaved= 1;
if(last_pos != pos && (len || !ast->sample_size))
av_add_index_entry(st, pos, ast->cum_len, len, 0, key ? AVINDEX_KEYFRAME : 0);
if(ast->sample_size)
ast->cum_len += len;
else
ast->cum_len ++;
last_pos= pos;
}else{
int64_t offset, pos;
int duration;
offset = get_le64(pb);
get_le32(pb); /* size */
duration = get_le32(pb);
if(url_feof(pb))
return -1;
pos = url_ftell(pb);
url_fseek(pb, offset+8, SEEK_SET);
read_braindead_odml_indx(s, frame_num);
frame_num += duration;
url_fseek(pb, pos, SEEK_SET);
}
}
avi->index_loaded=1;
return 0;
}
static void clean_index(AVFormatContext *s){
int i;
int64_t j;
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int n= st->nb_index_entries;
int max= ast->sample_size;
int64_t pos, size, ts;
if(n != 1 || ast->sample_size==0)
continue;
while(max < 1024) max+=max;
pos= st->index_entries[0].pos;
size= st->index_entries[0].size;
ts= st->index_entries[0].timestamp;
for(j=0; j<size; j+=max){
av_add_index_entry(st, pos+j, ts+j, FFMIN(max, size-j), 0, AVINDEX_KEYFRAME);
}
}
}
static int avi_read_tag(AVFormatContext *s, AVStream *st, uint32_t tag, uint32_t size)
{
ByteIOContext *pb = s->pb;
char key[5] = {0}, *value;
size += (size & 1);
if (size == UINT_MAX)
return -1;
value = av_malloc(size+1);
if (!value)
return -1;
get_buffer(pb, value, size);
value[size]=0;
AV_WL32(key, tag);
if(st)
return av_metadata_set2(&st->metadata, key, value,
AV_METADATA_DONT_STRDUP_VAL);
else
return av_metadata_set2(&s->metadata, key, value,
AV_METADATA_DONT_STRDUP_VAL);
}
static void avi_read_info(AVFormatContext *s, uint64_t end)
{
while (url_ftell(s->pb) < end) {
uint32_t tag = get_le32(s->pb);
uint32_t size = get_le32(s->pb);
avi_read_tag(s, NULL, tag, size);
}
}
static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
int codec_type, stream_index, frame_period, bit_rate;
unsigned int size;
int i;
AVStream *st;
AVIStream *ast = NULL;
int avih_width=0, avih_height=0;
int amv_file_format=0;
uint64_t list_end = 0;
avi->stream_index= -1;
if (get_riff(s, pb) < 0)
return -1;
avi->fsize = url_fsize(pb);
if(avi->fsize<=0)
avi->fsize= avi->riff_end == 8 ? INT64_MAX : avi->riff_end;
/* first list tag */
stream_index = -1;
codec_type = -1;
frame_period = 0;
for(;;) {
if (url_feof(pb))
goto fail;
tag = get_le32(pb);
size = get_le32(pb);
#ifdef DEBUG
print_tag("tag", tag, size);
#endif
switch(tag) {
case MKTAG('L', 'I', 'S', 'T'):
list_end = url_ftell(pb) + size;
/* Ignored, except at start of video packets. */
tag1 = get_le32(pb);
#ifdef DEBUG
print_tag("list", tag1, 0);
#endif
if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
avi->movi_list = url_ftell(pb) - 4;
if(size) avi->movi_end = avi->movi_list + size + (size & 1);
else avi->movi_end = url_fsize(pb);
dprintf(NULL, "movi end=%"PRIx64"\n", avi->movi_end);
goto end_of_header;
}
else if (tag1 == MKTAG('I', 'N', 'F', 'O'))
avi_read_info(s, list_end);
break;
case MKTAG('d', 'm', 'l', 'h'):
avi->is_odml = 1;
url_fskip(pb, size + (size & 1));
break;
case MKTAG('a', 'm', 'v', 'h'):
amv_file_format=1;
case MKTAG('a', 'v', 'i', 'h'):
/* AVI header */
/* using frame_period is bad idea */
frame_period = get_le32(pb);
bit_rate = get_le32(pb) * 8;
get_le32(pb);
avi->non_interleaved |= get_le32(pb) & AVIF_MUSTUSEINDEX;
url_fskip(pb, 2 * 4);
get_le32(pb);
get_le32(pb);
avih_width=get_le32(pb);
avih_height=get_le32(pb);
url_fskip(pb, size - 10 * 4);
break;
case MKTAG('s', 't', 'r', 'h'):
/* stream header */
tag1 = get_le32(pb);
handler = get_le32(pb); /* codec tag */
if(tag1 == MKTAG('p', 'a', 'd', 's')){
url_fskip(pb, size - 8);
break;
}else{
stream_index++;
st = av_new_stream(s, stream_index);
if (!st)
goto fail;
ast = av_mallocz(sizeof(AVIStream));
if (!ast)
goto fail;
st->priv_data = ast;
}
if(amv_file_format)
tag1 = stream_index ? MKTAG('a','u','d','s') : MKTAG('v','i','d','s');
#ifdef DEBUG
print_tag("strh", tag1, -1);
#endif
if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){
int64_t dv_dur;
/*
* After some consideration -- I don't think we
* have to support anything but DV in type1 AVIs.
*/
if (s->nb_streams != 1)
goto fail;
if (handler != MKTAG('d', 'v', 's', 'd') &&
handler != MKTAG('d', 'v', 'h', 'd') &&
handler != MKTAG('d', 'v', 's', 'l'))
goto fail;
ast = s->streams[0]->priv_data;
av_freep(&s->streams[0]->codec->extradata);
av_freep(&s->streams[0]);
s->nb_streams = 0;
if (CONFIG_DV_DEMUXER) {
avi->dv_demux = dv_init_demux(s);
if (!avi->dv_demux)
goto fail;
}
s->streams[0]->priv_data = ast;
url_fskip(pb, 3 * 4);
ast->scale = get_le32(pb);
ast->rate = get_le32(pb);
url_fskip(pb, 4); /* start time */
dv_dur = get_le32(pb);
if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
dv_dur *= AV_TIME_BASE;
s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
}
/*
* else, leave duration alone; timing estimation in utils.c
* will make a guess based on bitrate.
*/
stream_index = s->nb_streams - 1;
url_fskip(pb, size - 9*4);
break;
}
assert(stream_index < s->nb_streams);
st->codec->stream_codec_tag= handler;
get_le32(pb); /* flags */
get_le16(pb); /* priority */
get_le16(pb); /* language */
get_le32(pb); /* initial frame */
ast->scale = get_le32(pb);
ast->rate = get_le32(pb);
if(!(ast->scale && ast->rate)){
av_log(s, AV_LOG_WARNING, "scale/rate is %u/%u which is invalid. (This file has been generated by broken software.)\n", ast->scale, ast->rate);
if(frame_period){
ast->rate = 1000000;
ast->scale = frame_period;
}else{
ast->rate = 25;
ast->scale = 1;
}
}
av_set_pts_info(st, 64, ast->scale, ast->rate);
ast->cum_len=get_le32(pb); /* start */
st->nb_frames = get_le32(pb);
st->start_time = 0;
get_le32(pb); /* buffer size */
get_le32(pb); /* quality */
ast->sample_size = get_le32(pb); /* sample ssize */
ast->cum_len *= FFMAX(1, ast->sample_size);
// av_log(s, AV_LOG_DEBUG, "%d %d %d %d\n", ast->rate, ast->scale, ast->start, ast->sample_size);
switch(tag1) {
case MKTAG('v', 'i', 'd', 's'):
codec_type = AVMEDIA_TYPE_VIDEO;
ast->sample_size = 0;
break;
case MKTAG('a', 'u', 'd', 's'):
codec_type = AVMEDIA_TYPE_AUDIO;
break;
case MKTAG('t', 'x', 't', 's'):
//FIXME
codec_type = AVMEDIA_TYPE_DATA; //AVMEDIA_TYPE_SUB ? FIXME
break;
case MKTAG('d', 'a', 't', 's'):
codec_type = AVMEDIA_TYPE_DATA;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
goto fail;
}
if(ast->sample_size == 0)
st->duration = st->nb_frames;
ast->frame_offset= ast->cum_len;
url_fskip(pb, size - 12 * 4);
break;
case MKTAG('s', 't', 'r', 'f'):
/* stream header */
if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
url_fskip(pb, size);
} else {
uint64_t cur_pos = url_ftell(pb);
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
switch(codec_type) {
case AVMEDIA_TYPE_VIDEO:
if(amv_file_format){
st->codec->width=avih_width;
st->codec->height=avih_height;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_AMV;
url_fskip(pb, size);
break;
}
get_le32(pb); /* size */
st->codec->width = get_le32(pb);
st->codec->height = (int32_t)get_le32(pb);
get_le16(pb); /* panes */
st->codec->bits_per_coded_sample= get_le16(pb); /* depth */
tag1 = get_le32(pb);
get_le32(pb); /* ImageSize */
get_le32(pb); /* XPelsPerMeter */
get_le32(pb); /* YPelsPerMeter */
get_le32(pb); /* ClrUsed */
get_le32(pb); /* ClrImportant */
if (tag1 == MKTAG('D', 'X', 'S', 'B') || tag1 == MKTAG('D','X','S','A')) {
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_tag = tag1;
st->codec->codec_id = CODEC_ID_XSUB;
break;
}
if(size > 10*4 && size<(1<<30)){
st->codec->extradata_size= size - 10*4;
st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata) {
st->codec->extradata_size= 0;
return AVERROR(ENOMEM);
}
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
}
if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
get_byte(pb);
/* Extract palette from extradata if bpp <= 8. */
/* This code assumes that extradata contains only palette. */
/* This is true for all paletted codecs implemented in FFmpeg. */
if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
#if HAVE_BIGENDIAN
for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
#else
memcpy(st->codec->palctrl->palette, st->codec->extradata,
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
#endif
st->codec->palctrl->palette_changed = 1;
}
#ifdef DEBUG
print_tag("video", tag1, 0);
#endif
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = tag1;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
st->need_parsing = AVSTREAM_PARSE_HEADERS; // This is needed to get the pict type which is necessary for generating correct pts.
// Support "Resolution 1:1" for Avid AVI Codec
if(tag1 == MKTAG('A', 'V', 'R', 'n') &&
st->codec->extradata_size >= 31 &&
!memcmp(&st->codec->extradata[28], "1:1", 3))
st->codec->codec_id = CODEC_ID_RAWVIDEO;
if(st->codec->codec_tag==0 && st->codec->height > 0 && st->codec->extradata_size < 1U<<30){
st->codec->extradata_size+= 9;
st->codec->extradata= av_realloc(st->codec->extradata, st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if(st->codec->extradata)
memcpy(st->codec->extradata + st->codec->extradata_size - 9, "BottomUp", 9);
}
st->codec->height= FFABS(st->codec->height);
// url_fskip(pb, size - 5 * 4);
break;
case AVMEDIA_TYPE_AUDIO:
ff_get_wav_header(pb, st->codec, size);
if(ast->sample_size && st->codec->block_align && ast->sample_size != st->codec->block_align){
av_log(s, AV_LOG_WARNING, "sample size (%d) != block align (%d)\n", ast->sample_size, st->codec->block_align);
ast->sample_size= st->codec->block_align;
}
if (size&1) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
url_fskip(pb, 1);
/* Force parsing as several audio frames can be in
* one packet and timestamps refer to packet start. */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
/* ADTS header is in extradata, AAC without header must be
* stored as exact frames. Parser not needed and it will
* fail. */
if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size)
st->need_parsing = AVSTREAM_PARSE_NONE;
/* AVI files with Xan DPCM audio (wrongly) declare PCM
* audio in the header but have Axan as stream_code_tag. */
if (st->codec->stream_codec_tag == AV_RL32("Axan")){
st->codec->codec_id = CODEC_ID_XAN_DPCM;
st->codec->codec_tag = 0;
}
if (amv_file_format)
st->codec->codec_id = CODEC_ID_ADPCM_IMA_AMV;
break;
default:
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id= CODEC_ID_NONE;
st->codec->codec_tag= 0;
url_fskip(pb, size);
break;
}
}
break;
case MKTAG('i', 'n', 'd', 'x'):
i= url_ftell(pb);
if(!url_is_streamed(pb) && !(s->flags & AVFMT_FLAG_IGNIDX)){
read_braindead_odml_indx(s, 0);
}
url_fseek(pb, i+size, SEEK_SET);
break;
case MKTAG('v', 'p', 'r', 'p'):
if(stream_index < (unsigned)s->nb_streams && size > 9*4){
AVRational active, active_aspect;
st = s->streams[stream_index];
get_le32(pb);
get_le32(pb);
get_le32(pb);
get_le32(pb);
get_le32(pb);
active_aspect.den= get_le16(pb);
active_aspect.num= get_le16(pb);
active.num = get_le32(pb);
active.den = get_le32(pb);
get_le32(pb); //nbFieldsPerFrame
if(active_aspect.num && active_aspect.den && active.num && active.den){
st->sample_aspect_ratio= av_div_q(active_aspect, active);
//av_log(s, AV_LOG_ERROR, "vprp %d/%d %d/%d\n", active_aspect.num, active_aspect.den, active.num, active.den);
}
size -= 9*4;
}
url_fseek(pb, size, SEEK_CUR);
break;
case MKTAG('s', 't', 'r', 'n'):
if(s->nb_streams){
avi_read_tag(s, s->streams[s->nb_streams-1], tag, size);
break;
}
default:
if(size > 1000000){
av_log(s, AV_LOG_ERROR, "Something went wrong during header parsing, "
"I will ignore it and try to continue anyway.\n");
avi->movi_list = url_ftell(pb) - 4;
avi->movi_end = url_fsize(pb);
goto end_of_header;
}
/* skip tag */
size += (size & 1);
url_fskip(pb, size);
break;
}
}
end_of_header:
/* check stream number */
if (stream_index != s->nb_streams - 1) {
fail:
return -1;
}
if(!avi->index_loaded && !url_is_streamed(pb))
avi_load_index(s);
avi->index_loaded = 1;
avi->non_interleaved |= guess_ni_flag(s);
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
if(st->nb_index_entries)
break;
}
if(i==s->nb_streams && avi->non_interleaved) {
av_log(s, AV_LOG_WARNING, "non-interleaved AVI without index, switching to interleaved\n");
avi->non_interleaved=0;
}
if(avi->non_interleaved) {
av_log(s, AV_LOG_INFO, "non-interleaved AVI\n");
clean_index(s);
}
return 0;
}
static int get_stream_idx(int *d){
if( d[0] >= '0' && d[0] <= '9'
&& d[1] >= '0' && d[1] <= '9'){
return (d[0] - '0') * 10 + (d[1] - '0');
}else{
return 100; //invalid stream ID
}
}
static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
int n, d[8];
unsigned int size;
int64_t i, sync;
void* dstr;
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
int size = dv_get_packet(avi->dv_demux, pkt);
if (size >= 0)
return size;
}
if(avi->non_interleaved){
int best_stream_index = 0;
AVStream *best_st= NULL;
AVIStream *best_ast;
int64_t best_ts= INT64_MAX;
int i;
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int64_t ts= ast->frame_offset;
int64_t last_ts;
if(!st->nb_index_entries)
continue;
last_ts = st->index_entries[st->nb_index_entries - 1].timestamp;
if(!ast->remaining && ts > last_ts)
continue;
ts = av_rescale_q(ts, st->time_base, (AVRational){FFMAX(1, ast->sample_size), AV_TIME_BASE});
// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d/%d %"PRId64"\n", ts, st->time_base.num, st->time_base.den, ast->frame_offset);
if(ts < best_ts){
best_ts= ts;
best_st= st;
best_stream_index= i;
}
}
if(!best_st)
return -1;
best_ast = best_st->priv_data;
best_ts = av_rescale_q(best_ts, (AVRational){FFMAX(1, best_ast->sample_size), AV_TIME_BASE}, best_st->time_base);
if(best_ast->remaining)
i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
else{
i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
if(i>=0)
best_ast->frame_offset= best_st->index_entries[i].timestamp;
}
// av_log(s, AV_LOG_DEBUG, "%d\n", i);
if(i>=0){
int64_t pos= best_st->index_entries[i].pos;
pos += best_ast->packet_size - best_ast->remaining;
url_fseek(s->pb, pos + 8, SEEK_SET);
// av_log(s, AV_LOG_DEBUG, "pos=%"PRId64"\n", pos);
assert(best_ast->remaining <= best_ast->packet_size);
avi->stream_index= best_stream_index;
if(!best_ast->remaining)
best_ast->packet_size=
best_ast->remaining= best_st->index_entries[i].size;
}
}
resync:
if(avi->stream_index >= 0){
AVStream *st= s->streams[ avi->stream_index ];
AVIStream *ast= st->priv_data;
int size, err;
if(ast->sample_size <= 1) // minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
size= INT_MAX;
else if(ast->sample_size < 32)
// arbitrary multiplier to avoid tiny packets for raw PCM data
size= 1024*ast->sample_size;
else
size= ast->sample_size;
if(size > ast->remaining)
size= ast->remaining;
avi->last_pkt_pos= url_ftell(pb);
err= av_get_packet(pb, pkt, size);
if(err<0)
return err;
if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){
void *ptr= av_realloc(pkt->data, pkt->size + 4*256 + FF_INPUT_BUFFER_PADDING_SIZE);
if(ptr){
ast->has_pal=0;
pkt->size += 4*256;
pkt->data= ptr;
memcpy(pkt->data + pkt->size - 4*256, ast->pal, 4*256);
}else
av_log(s, AV_LOG_ERROR, "Failed to append palette\n");
}
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
dstr = pkt->destruct;
size = dv_produce_packet(avi->dv_demux, pkt,
pkt->data, pkt->size);
pkt->destruct = dstr;
pkt->flags |= AV_PKT_FLAG_KEY;
} else {
/* XXX: How to handle B-frames in AVI? */
pkt->dts = ast->frame_offset;
// pkt->dts += ast->start;
if(ast->sample_size)
pkt->dts /= ast->sample_size;
//av_log(s, AV_LOG_DEBUG, "dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d base:%d st:%d size:%d\n", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, avi->stream_index, size);
pkt->stream_index = avi->stream_index;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
AVIndexEntry *e;
int index;
assert(st->index_entries);
index= av_index_search_timestamp(st, ast->frame_offset, 0);
e= &st->index_entries[index];
if(index >= 0 && e->timestamp == ast->frame_offset){
if (e->flags & AVINDEX_KEYFRAME)
pkt->flags |= AV_PKT_FLAG_KEY;
}
} else {
pkt->flags |= AV_PKT_FLAG_KEY;
}
if(ast->sample_size)
ast->frame_offset += pkt->size;
else
ast->frame_offset++;
}
ast->remaining -= size;
if(!ast->remaining){
avi->stream_index= -1;
ast->packet_size= 0;
}
return size;
}
memset(d, -1, sizeof(int)*8);
for(i=sync=url_ftell(pb); !url_feof(pb); i++) {
int j;
for(j=0; j<7; j++)
d[j]= d[j+1];
d[7]= get_byte(pb);
size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
n= get_stream_idx(d+2);
//av_log(s, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
if(i + (uint64_t)size > avi->fsize || d[0]<0)
continue;
//parse ix##
if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams)
//parse JUNK
||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K')
||(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')){
url_fskip(pb, size);
//av_log(s, AV_LOG_DEBUG, "SKIP\n");
goto resync;
}
//parse stray LIST
if(d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T'){
url_fskip(pb, 4);
goto resync;
}
n= get_stream_idx(d);
if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams)
continue;
//detect ##ix chunk and skip
if(d[2] == 'i' && d[3] == 'x' && n < s->nb_streams){
url_fskip(pb, size);
goto resync;
}
//parse ##dc/##wb
if(n < s->nb_streams){
AVStream *st;
AVIStream *ast;
st = s->streams[n];
ast = st->priv_data;
if(s->nb_streams>=2){
AVStream *st1 = s->streams[1];
AVIStream *ast1= st1->priv_data;
//workaround for broken small-file-bug402.avi
if( d[2] == 'w' && d[3] == 'b'
&& n==0
&& st ->codec->codec_type == AVMEDIA_TYPE_VIDEO
&& st1->codec->codec_type == AVMEDIA_TYPE_AUDIO
&& ast->prefix == 'd'*256+'c'
&& (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count)
){
n=1;
st = st1;
ast = ast1;
av_log(s, AV_LOG_WARNING, "Invalid stream + prefix combination, assuming audio.\n");
}
}
if( (st->discard >= AVDISCARD_DEFAULT && size==0)
/*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering
|| st->discard >= AVDISCARD_ALL){
if(ast->sample_size) ast->frame_offset += size;
else ast->frame_offset++;
url_fskip(pb, size);
goto resync;
}
if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) {
int k = get_byte(pb);
int last = (k + get_byte(pb) - 1) & 0xFF;
get_le16(pb); //flags
for (; k <= last; k++)
ast->pal[k] = get_be32(pb)>>8;// b + (g << 8) + (r << 16);
ast->has_pal= 1;
goto resync;
} else if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
d[2]*256+d[3] == ast->prefix /*||
(d[2] == 'd' && d[3] == 'c') ||
(d[2] == 'w' && d[3] == 'b')*/) {
//av_log(s, AV_LOG_DEBUG, "OK\n");
if(d[2]*256+d[3] == ast->prefix)
ast->prefix_count++;
else{
ast->prefix= d[2]*256+d[3];
ast->prefix_count= 0;
}
avi->stream_index= n;
ast->packet_size= size + 8;
ast->remaining= size;
if(size || !ast->sample_size){
uint64_t pos= url_ftell(pb) - 8;
if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){
av_add_index_entry(st, pos, ast->frame_offset, size, 0, AVINDEX_KEYFRAME);
}
}
goto resync;
}
}
}
return AVERROR_EOF;
}
/* XXX: We make the implicit supposition that the positions are sorted
for each stream. */
static int avi_read_idx1(AVFormatContext *s, int size)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
int nb_index_entries, i;
AVStream *st;
AVIStream *ast;
unsigned int index, tag, flags, pos, len;
unsigned last_pos= -1;
nb_index_entries = size / 16;
if (nb_index_entries <= 0)
return -1;
/* Read the entries and sort them in each stream component. */
for(i = 0; i < nb_index_entries; i++) {
tag = get_le32(pb);
flags = get_le32(pb);
pos = get_le32(pb);
len = get_le32(pb);
#if defined(DEBUG_SEEK)
av_log(s, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
i, tag, flags, pos, len);
#endif
if(i==0 && pos > avi->movi_list)
avi->movi_list= 0; //FIXME better check
pos += avi->movi_list;
index = ((tag & 0xff) - '0') * 10;
index += ((tag >> 8) & 0xff) - '0';
if (index >= s->nb_streams)
continue;
st = s->streams[index];
ast = st->priv_data;
#if defined(DEBUG_SEEK)
av_log(s, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
#endif
if(url_feof(pb))
return -1;
if(last_pos == pos)
avi->non_interleaved= 1;
else if(len || !ast->sample_size)
av_add_index_entry(st, pos, ast->cum_len, len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
if(ast->sample_size)
ast->cum_len += len;
else
ast->cum_len ++;
last_pos= pos;
}
return 0;
}
static int guess_ni_flag(AVFormatContext *s){
int i;
int64_t last_start=0;
int64_t first_end= INT64_MAX;
int64_t oldpos= url_ftell(s->pb);
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
int n= st->nb_index_entries;
unsigned int size;
if(n <= 0)
continue;
if(n >= 2){
int64_t pos= st->index_entries[0].pos;
url_fseek(s->pb, pos + 4, SEEK_SET);
size= get_le32(s->pb);
if(pos + size > st->index_entries[1].pos)
last_start= INT64_MAX;
}
if(st->index_entries[0].pos > last_start)
last_start= st->index_entries[0].pos;
if(st->index_entries[n-1].pos < first_end)
first_end= st->index_entries[n-1].pos;
}
url_fseek(s->pb, oldpos, SEEK_SET);
return last_start > first_end;
}
static int avi_load_index(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
uint32_t tag, size;
int64_t pos= url_ftell(pb);
int ret = -1;
if (url_fseek(pb, avi->movi_end, SEEK_SET) < 0)
goto the_end; // maybe truncated file
#ifdef DEBUG_SEEK
printf("movi_end=0x%"PRIx64"\n", avi->movi_end);
#endif
for(;;) {
if (url_feof(pb))
break;
tag = get_le32(pb);
size = get_le32(pb);
#ifdef DEBUG_SEEK
printf("tag=%c%c%c%c size=0x%x\n",
tag & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
(tag >> 24) & 0xff,
size);
#endif
switch(tag) {
case MKTAG('i', 'd', 'x', '1'):
if (avi_read_idx1(s, size) < 0)
goto skip;
ret = 0;
goto the_end;
break;
default:
skip:
size += (size & 1);
if (url_fseek(pb, size, SEEK_CUR) < 0)
goto the_end; // something is wrong here
break;
}
}
the_end:
url_fseek(pb, pos, SEEK_SET);
return ret;
}
static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVIContext *avi = s->priv_data;
AVStream *st;
int i, index;
int64_t pos;
AVIStream *ast;
if (!avi->index_loaded) {
/* we only load the index on demand */
avi_load_index(s);
avi->index_loaded = 1;
}
assert(stream_index>= 0);
st = s->streams[stream_index];
ast= st->priv_data;
index= av_index_search_timestamp(st, timestamp * FFMAX(ast->sample_size, 1), flags);
if(index<0)
return -1;
/* find the position */
pos = st->index_entries[index].pos;
timestamp = st->index_entries[index].timestamp / FFMAX(ast->sample_size, 1);
// av_log(s, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
/* One and only one real stream for DV in AVI, and it has video */
/* offsets. Calling with other stream indexes should have failed */
/* the av_index_search_timestamp call above. */
assert(stream_index == 0);
/* Feed the DV video stream version of the timestamp to the */
/* DV demux so it can synthesize correct timestamps. */
dv_offset_reset(avi->dv_demux, timestamp);
url_fseek(s->pb, pos, SEEK_SET);
avi->stream_index= -1;
return 0;
}
for(i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data;
ast2->packet_size=
ast2->remaining= 0;
if (st2->nb_index_entries <= 0)
continue;
// assert(st2->codec->block_align);
assert((int64_t)st2->time_base.num*ast2->rate == (int64_t)st2->time_base.den*ast2->scale);
index = av_index_search_timestamp(
st2,
av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
flags | AVSEEK_FLAG_BACKWARD);
if(index<0)
index=0;
if(!avi->non_interleaved){
while(index>0 && st2->index_entries[index].pos > pos)
index--;
while(index+1 < st2->nb_index_entries && st2->index_entries[index].pos < pos)
index++;
}
// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
/* extract the current frame number */
ast2->frame_offset = st2->index_entries[index].timestamp;
}
/* do the seek */
url_fseek(s->pb, pos, SEEK_SET);
avi->stream_index= -1;
return 0;
}
static int avi_read_close(AVFormatContext *s)
{
int i;
AVIContext *avi = s->priv_data;
for(i=0;i<s->nb_streams;i++) {
AVStream *st = s->streams[i];
av_free(st->codec->palctrl);
}
if (avi->dv_demux)
av_free(avi->dv_demux);
return 0;
}
static int avi_probe(AVProbeData *p)
{
int i;
/* check file header */
for(i=0; avi_headers[i][0]; i++)
if(!memcmp(p->buf , avi_headers[i] , 4) &&
!memcmp(p->buf+8, avi_headers[i]+4, 4))
return AVPROBE_SCORE_MAX;
return 0;
}
AVInputFormat avi_demuxer = {
"avi",
NULL_IF_CONFIG_SMALL("AVI format"),
sizeof(AVIContext),
avi_probe,
avi_read_header,
avi_read_packet,
avi_read_close,
avi_read_seek,
.metadata_conv = ff_avi_metadata_conv,
};
| 123linslouis-android-video-cutter | jni/libavformat/avidec.c | C | asf20 | 40,541 |
/*
* nut
* Copyright (c) 2004-2007 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/tree.h"
#include "nut.h"
const AVCodecTag ff_nut_subtitle_tags[] = {
{ CODEC_ID_TEXT , MKTAG('U', 'T', 'F', '8') },
{ CODEC_ID_SSA , MKTAG('S', 'S', 'A', 0 ) },
{ CODEC_ID_DVD_SUBTITLE, MKTAG('D', 'V', 'D', 'S') },
{ CODEC_ID_DVB_SUBTITLE, MKTAG('D', 'V', 'B', 'S') },
{ CODEC_ID_NONE , 0 }
};
void ff_nut_reset_ts(NUTContext *nut, AVRational time_base, int64_t val){
int i;
for(i=0; i<nut->avf->nb_streams; i++){
nut->stream[i].last_pts= av_rescale_rnd(
val,
time_base.num * (int64_t)nut->stream[i].time_base->den,
time_base.den * (int64_t)nut->stream[i].time_base->num,
AV_ROUND_DOWN);
}
}
int64_t ff_lsb2full(StreamContext *stream, int64_t lsb){
int64_t mask = (1<<stream->msb_pts_shift)-1;
int64_t delta= stream->last_pts - mask/2;
return ((lsb - delta)&mask) + delta;
}
int ff_nut_sp_pos_cmp(const Syncpoint *a, const Syncpoint *b){
return ((a->pos - b->pos) >> 32) - ((b->pos - a->pos) >> 32);
}
int ff_nut_sp_pts_cmp(const Syncpoint *a, const Syncpoint *b){
return ((a->ts - b->ts) >> 32) - ((b->ts - a->ts) >> 32);
}
void ff_nut_add_sp(NUTContext *nut, int64_t pos, int64_t back_ptr, int64_t ts){
Syncpoint *sp= av_mallocz(sizeof(Syncpoint));
struct AVTreeNode *node= av_mallocz(av_tree_node_size);
sp->pos= pos;
sp->back_ptr= back_ptr;
sp->ts= ts;
av_tree_insert(&nut->syncpoints, sp, (void *) ff_nut_sp_pos_cmp, &node);
if(node){
av_free(sp);
av_free(node);
}
}
static int enu_free(void *opaque, void *elem)
{
av_free(elem);
return 0;
}
void ff_nut_free_sp(NUTContext *nut)
{
av_tree_enumerate(nut->syncpoints, NULL, NULL, enu_free);
av_tree_destroy(nut->syncpoints);
}
const Dispositions ff_nut_dispositions[] = {
{"default" , AV_DISPOSITION_DEFAULT},
{"dub" , AV_DISPOSITION_DUB},
{"original" , AV_DISPOSITION_ORIGINAL},
{"comment" , AV_DISPOSITION_COMMENT},
{"lyrics" , AV_DISPOSITION_LYRICS},
{"karaoke" , AV_DISPOSITION_KARAOKE},
{"" , 0}
};
const AVMetadataConv ff_nut_metadata_conv[] = {
{ "Author", "artist" },
{ "X-CreationTime", "date" },
{ "CreationTime", "date" },
{ "SourceFilename", "filename" },
{ "X-Language", "language" },
{ "X-Disposition", "disposition" },
{ "X-Replaces", "replaces" },
{ "X-Depends", "depends" },
{ "X-Uses", "uses" },
{ "X-UsesFont", "usesfont" },
{ 0 },
};
| 123linslouis-android-video-cutter | jni/libavformat/nut.c | C | asf20 | 3,468 |
/*
* APE tag handling
* Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
* based upon libdemac from Dave Chapman.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_APETAG_H
#define AVFORMAT_APETAG_H
#include "avformat.h"
/**
* Read and parse an APE tag
*/
void ff_ape_parse_tag(AVFormatContext *s);
#endif /* AVFORMAT_ID3V2_H */
| 123linslouis-android-video-cutter | jni/libavformat/apetag.h | C | asf20 | 1,072 |
/*
* RTMP packet utilities
* Copyright (c) 2009 Kostya Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTMPPKT_H
#define AVFORMAT_RTMPPKT_H
#include "avformat.h"
/** maximum possible number of different RTMP channels */
#define RTMP_CHANNELS 65599
/**
* channels used to for RTMP packets with different purposes (i.e. data, network
* control, remote procedure calls, etc.)
*/
enum RTMPChannel {
RTMP_NETWORK_CHANNEL = 2, ///< channel for network-related messages (bandwidth report, ping, etc)
RTMP_SYSTEM_CHANNEL, ///< channel for sending server control messages
RTMP_SOURCE_CHANNEL, ///< channel for sending a/v to server
RTMP_VIDEO_CHANNEL = 8, ///< channel for video data
RTMP_AUDIO_CHANNEL, ///< channel for audio data
};
/**
* known RTMP packet types
*/
typedef enum RTMPPacketType {
RTMP_PT_CHUNK_SIZE = 1, ///< chunk size change
RTMP_PT_BYTES_READ = 3, ///< number of bytes read
RTMP_PT_PING, ///< ping
RTMP_PT_SERVER_BW, ///< server bandwidth
RTMP_PT_CLIENT_BW, ///< client bandwidth
RTMP_PT_AUDIO = 8, ///< audio packet
RTMP_PT_VIDEO, ///< video packet
RTMP_PT_FLEX_STREAM = 15, ///< Flex shared stream
RTMP_PT_FLEX_OBJECT, ///< Flex shared object
RTMP_PT_FLEX_MESSAGE, ///< Flex shared message
RTMP_PT_NOTIFY, ///< some notification
RTMP_PT_SHARED_OBJ, ///< shared object
RTMP_PT_INVOKE, ///< invoke some stream action
RTMP_PT_METADATA = 22, ///< FLV metadata
} RTMPPacketType;
/**
* possible RTMP packet header sizes
*/
enum RTMPPacketSize {
RTMP_PS_TWELVEBYTES = 0, ///< packet has 12-byte header
RTMP_PS_EIGHTBYTES, ///< packet has 8-byte header
RTMP_PS_FOURBYTES, ///< packet has 4-byte header
RTMP_PS_ONEBYTE ///< packet is really a next chunk of a packet
};
/**
* structure for holding RTMP packets
*/
typedef struct RTMPPacket {
int channel_id; ///< RTMP channel ID (nothing to do with audio/video channels though)
RTMPPacketType type; ///< packet payload type
uint32_t timestamp; ///< packet full timestamp
uint32_t ts_delta; ///< timestamp increment to the previous one in milliseconds (latter only for media packets)
uint32_t extra; ///< probably an additional channel ID used during streaming data
uint8_t *data; ///< packet payload
int data_size; ///< packet payload size
} RTMPPacket;
/**
* Creates new RTMP packet with given attributes.
*
* @param pkt packet
* @param channel_id packet channel ID
* @param type packet type
* @param timestamp packet timestamp
* @param size packet size
* @return zero on success, negative value otherwise
*/
int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
int timestamp, int size);
/**
* Frees RTMP packet.
*
* @param pkt packet
*/
void ff_rtmp_packet_destroy(RTMPPacket *pkt);
/**
* Reads RTMP packet sent by the server.
*
* @param h reader context
* @param p packet
* @param chunk_size current chunk size
* @param prev_pkt previously read packet headers for all channels
* (may be needed for restoring incomplete packet header)
* @return number of bytes read on success, negative value otherwise
*/
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
int chunk_size, RTMPPacket *prev_pkt);
/**
* Sends RTMP packet to the server.
*
* @param h reader context
* @param p packet to send
* @param chunk_size current chunk size
* @param prev_pkt previously sent packet headers for all channels
* (may be used for packet header compressing)
* @return number of bytes written on success, negative value otherwise
*/
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *p,
int chunk_size, RTMPPacket *prev_pkt);
/**
* Prints information and contents of RTMP packet.
*
* @param h output context
* @param p packet to dump
*/
void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p);
/**
* @defgroup amffuncs functions used to work with AMF format (which is also used in .flv)
* @see amf_* funcs in libavformat/flvdec.c
* @{
*/
/**
* Calculates number of bytes taken by first AMF entry in data.
*
* @param data input data
* @param data_end input buffer end
* @return number of bytes used by first AMF entry
*/
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end);
/**
* Retrieves value of given AMF object field in string form.
*
* @param data AMF object data
* @param data_end input buffer end
* @param name name of field to retrieve
* @param dst buffer for storing result
* @param dst_size output buffer size
* @return 0 if search and retrieval succeeded, negative value otherwise
*/
int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
const uint8_t *name, uint8_t *dst, int dst_size);
/**
* Writes boolean value in AMF format to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param val value to write
*/
void ff_amf_write_bool(uint8_t **dst, int val);
/**
* Writes number in AMF format to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param num value to write
*/
void ff_amf_write_number(uint8_t **dst, double num);
/**
* Writes string in AMF format to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param str string to write
*/
void ff_amf_write_string(uint8_t **dst, const char *str);
/**
* Writes AMF NULL value to buffer.
*
* @param dst pointer to the input buffer (will be modified)
*/
void ff_amf_write_null(uint8_t **dst);
/**
* Writes marker for AMF object to buffer.
*
* @param dst pointer to the input buffer (will be modified)
*/
void ff_amf_write_object_start(uint8_t **dst);
/**
* Writes string used as field name in AMF object to buffer.
*
* @param dst pointer to the input buffer (will be modified)
* @param str string to write
*/
void ff_amf_write_field_name(uint8_t **dst, const char *str);
/**
* Writes marker for end of AMF object to buffer.
*
* @param dst pointer to the input buffer (will be modified)
*/
void ff_amf_write_object_end(uint8_t **dst);
/** @} */ // AMF funcs
#endif /* AVFORMAT_RTMPPKT_H */
| 123linslouis-android-video-cutter | jni/libavformat/rtmppkt.h | C | asf20 | 7,256 |
/*
* "Real" compatible demuxer.
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "riff.h"
#include "rm.h"
struct RMStream {
AVPacket pkt; ///< place to store merged video frame / reordered audio data
int videobufsize; ///< current assembled frame size
int videobufpos; ///< position for the next slice in the video buffer
int curpic_num; ///< picture number of current frame
int cur_slice, slices;
int64_t pktpos; ///< first slice position in file
/// Audio descrambling matrix parameters
int64_t audiotimestamp; ///< Audio packet timestamp
int sub_packet_cnt; // Subpacket counter, used while reading
int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
int audio_framesize; /// Audio frame size from container
int sub_packet_lengths[16]; /// Length of each subpacket
};
typedef struct {
int nb_packets;
int old_format;
int current_stream;
int remaining_len;
int audio_stream_num; ///< Stream number for audio packets
int audio_pkt_cnt; ///< Output packet counter
} RMDemuxContext;
static const AVCodecTag rm_codec_tags[] = {
{ CODEC_ID_RV10, MKTAG('R','V','1','0') },
{ CODEC_ID_RV20, MKTAG('R','V','2','0') },
{ CODEC_ID_RV20, MKTAG('R','V','T','R') },
{ CODEC_ID_RV30, MKTAG('R','V','3','0') },
{ CODEC_ID_RV40, MKTAG('R','V','4','0') },
{ CODEC_ID_AC3, MKTAG('d','n','e','t') },
{ CODEC_ID_RA_144, MKTAG('l','p','c','J') },
{ CODEC_ID_RA_288, MKTAG('2','8','_','8') },
{ CODEC_ID_COOK, MKTAG('c','o','o','k') },
{ CODEC_ID_ATRAC3, MKTAG('a','t','r','c') },
{ CODEC_ID_SIPR, MKTAG('s','i','p','r') },
{ CODEC_ID_AAC, MKTAG('r','a','a','c') },
{ CODEC_ID_AAC, MKTAG('r','a','c','p') },
{ CODEC_ID_NONE },
};
static const unsigned char sipr_swaps[38][2] = {
{ 0, 63 }, { 1, 22 }, { 2, 44 }, { 3, 90 },
{ 5, 81 }, { 7, 31 }, { 8, 86 }, { 9, 58 },
{ 10, 36 }, { 12, 68 }, { 13, 39 }, { 14, 73 },
{ 15, 53 }, { 16, 69 }, { 17, 57 }, { 19, 88 },
{ 20, 34 }, { 21, 71 }, { 24, 46 }, { 25, 94 },
{ 26, 54 }, { 28, 75 }, { 29, 50 }, { 32, 70 },
{ 33, 92 }, { 35, 74 }, { 38, 85 }, { 40, 56 },
{ 42, 87 }, { 43, 65 }, { 45, 59 }, { 48, 79 },
{ 49, 93 }, { 51, 89 }, { 55, 95 }, { 61, 76 },
{ 67, 83 }, { 77, 80 }
};
const unsigned char ff_sipr_subpk_size[4] = { 29, 19, 37, 20 };
static inline void get_strl(ByteIOContext *pb, char *buf, int buf_size, int len)
{
int i;
char *q, r;
q = buf;
for(i=0;i<len;i++) {
r = get_byte(pb);
if (i < buf_size - 1)
*q++ = r;
}
if (buf_size > 0) *q = '\0';
}
static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
{
get_strl(pb, buf, buf_size, get_byte(pb));
}
static int rm_read_extradata(ByteIOContext *pb, AVCodecContext *avctx, unsigned size)
{
if (size >= 1<<24)
return -1;
avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata)
return AVERROR(ENOMEM);
avctx->extradata_size = get_buffer(pb, avctx->extradata, size);
memset(avctx->extradata + avctx->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
if (avctx->extradata_size != size)
return AVERROR(EIO);
return 0;
}
static void rm_read_metadata(AVFormatContext *s, int wide)
{
char buf[1024];
int i;
for (i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
int len = wide ? get_be16(s->pb) : get_byte(s->pb);
get_strl(s->pb, buf, sizeof(buf), len);
av_metadata_set2(&s->metadata, ff_rm_metadata[i], buf, 0);
}
}
RMStream *ff_rm_alloc_rmstream (void)
{
RMStream *rms = av_mallocz(sizeof(RMStream));
rms->curpic_num = -1;
return rms;
}
void ff_rm_free_rmstream (RMStream *rms)
{
av_free_packet(&rms->pkt);
}
static int rm_read_audio_stream_info(AVFormatContext *s, ByteIOContext *pb,
AVStream *st, RMStream *ast, int read_all)
{
char buf[256];
uint32_t version;
int ret;
/* ra type header */
version = get_be16(pb); /* version */
if (version == 3) {
int header_size = get_be16(pb);
int64_t startpos = url_ftell(pb);
url_fskip(pb, 14);
rm_read_metadata(s, 0);
if ((startpos + header_size) >= url_ftell(pb) + 2) {
// fourcc (should always be "lpcJ")
get_byte(pb);
get_str8(pb, buf, sizeof(buf));
}
// Skip extra header crap (this should never happen)
if ((startpos + header_size) > url_ftell(pb))
url_fskip(pb, header_size + startpos - url_ftell(pb));
st->codec->sample_rate = 8000;
st->codec->channels = 1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_RA_144;
} else {
int flavor, sub_packet_h, coded_framesize, sub_packet_size;
int codecdata_length;
/* old version (4) */
url_fskip(pb, 2); /* unused */
get_be32(pb); /* .ra4 */
get_be32(pb); /* data size */
get_be16(pb); /* version2 */
get_be32(pb); /* header size */
flavor= get_be16(pb); /* add codec info / flavor */
ast->coded_framesize = coded_framesize = get_be32(pb); /* coded frame size */
get_be32(pb); /* ??? */
get_be32(pb); /* ??? */
get_be32(pb); /* ??? */
ast->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
st->codec->block_align= get_be16(pb); /* frame size */
ast->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */
get_be16(pb); /* ??? */
if (version == 5) {
get_be16(pb); get_be16(pb); get_be16(pb);
}
st->codec->sample_rate = get_be16(pb);
get_be32(pb);
st->codec->channels = get_be16(pb);
if (version == 5) {
get_be32(pb);
get_buffer(pb, buf, 4);
buf[4] = 0;
} else {
get_str8(pb, buf, sizeof(buf)); /* desc */
get_str8(pb, buf, sizeof(buf)); /* desc */
}
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = AV_RL32(buf);
st->codec->codec_id = ff_codec_get_id(rm_codec_tags, st->codec->codec_tag);
switch (st->codec->codec_id) {
case CODEC_ID_AC3:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case CODEC_ID_RA_288:
st->codec->extradata_size= 0;
ast->audio_framesize = st->codec->block_align;
st->codec->block_align = coded_framesize;
if(ast->audio_framesize >= UINT_MAX / sub_packet_h){
av_log(s, AV_LOG_ERROR, "ast->audio_framesize * sub_packet_h too large\n");
return -1;
}
av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h);
break;
case CODEC_ID_COOK:
case CODEC_ID_ATRAC3:
case CODEC_ID_SIPR:
get_be16(pb); get_byte(pb);
if (version == 5)
get_byte(pb);
codecdata_length = get_be32(pb);
if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
return -1;
}
if (!strcmp(buf, "cook")) st->codec->codec_id = CODEC_ID_COOK;
else if (!strcmp(buf, "sipr")) st->codec->codec_id = CODEC_ID_SIPR;
else st->codec->codec_id = CODEC_ID_ATRAC3;
ast->audio_framesize = st->codec->block_align;
if (st->codec->codec_id == CODEC_ID_SIPR) {
if (flavor > 3) {
av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n",
flavor);
return -1;
}
st->codec->block_align = ff_sipr_subpk_size[flavor];
} else {
if(sub_packet_size <= 0){
av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
return -1;
}
st->codec->block_align = ast->sub_packet_size;
}
if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0)
return ret;
if(ast->audio_framesize >= UINT_MAX / sub_packet_h){
av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
return -1;
}
av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h);
break;
case CODEC_ID_AAC:
get_be16(pb); get_byte(pb);
if (version == 5)
get_byte(pb);
st->codec->codec_id = CODEC_ID_AAC;
codecdata_length = get_be32(pb);
if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
return -1;
}
if (codecdata_length >= 1) {
get_byte(pb);
if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0)
return ret;
}
break;
default:
av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
}
if (read_all) {
get_byte(pb);
get_byte(pb);
get_byte(pb);
rm_read_metadata(s, 0);
}
}
return 0;
}
int
ff_rm_read_mdpr_codecdata (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, RMStream *rst, int codec_data_size)
{
unsigned int v;
int size;
int64_t codec_pos;
int ret;
av_set_pts_info(st, 64, 1, 1000);
codec_pos = url_ftell(pb);
v = get_be32(pb);
if (v == MKTAG(0xfd, 'a', 'r', '.')) {
/* ra type header */
if (rm_read_audio_stream_info(s, pb, st, rst, 0))
return -1;
} else {
int fps, fps2;
if (get_le32(pb) != MKTAG('V', 'I', 'D', 'O')) {
fail1:
av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
goto skip;
}
st->codec->codec_tag = get_le32(pb);
st->codec->codec_id = ff_codec_get_id(rm_codec_tags, st->codec->codec_tag);
// av_log(s, AV_LOG_DEBUG, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
if (st->codec->codec_id == CODEC_ID_NONE)
goto fail1;
st->codec->width = get_be16(pb);
st->codec->height = get_be16(pb);
st->codec->time_base.num= 1;
fps= get_be16(pb);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
get_be32(pb);
fps2= get_be16(pb);
get_be16(pb);
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (url_ftell(pb) - codec_pos))) < 0)
return ret;
// av_log(s, AV_LOG_DEBUG, "fps= %d fps2= %d\n", fps, fps2);
st->codec->time_base.den = fps * st->codec->time_base.num;
//XXX: do we really need that?
switch(st->codec->extradata[4]>>4){
case 1: st->codec->codec_id = CODEC_ID_RV10; break;
case 2: st->codec->codec_id = CODEC_ID_RV20; break;
case 3: st->codec->codec_id = CODEC_ID_RV30; break;
case 4: st->codec->codec_id = CODEC_ID_RV40; break;
default:
av_log(st->codec, AV_LOG_ERROR, "extra:%02X %02X %02X %02X %02X\n", st->codec->extradata[0], st->codec->extradata[1], st->codec->extradata[2], st->codec->extradata[3], st->codec->extradata[4]);
goto fail1;
}
}
skip:
/* skip codec info */
size = url_ftell(pb) - codec_pos;
url_fskip(pb, codec_data_size - size);
return 0;
}
/** this function assumes that the demuxer has already seeked to the start
* of the INDX chunk, and will bail out if not. */
static int rm_read_index(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
unsigned int size, n_pkts, str_id, next_off, n, pos, pts;
AVStream *st;
do {
if (get_le32(pb) != MKTAG('I','N','D','X'))
return -1;
size = get_be32(pb);
if (size < 20)
return -1;
url_fskip(pb, 2);
n_pkts = get_be32(pb);
str_id = get_be16(pb);
next_off = get_be32(pb);
for (n = 0; n < s->nb_streams; n++)
if (s->streams[n]->id == str_id) {
st = s->streams[n];
break;
}
if (n == s->nb_streams)
goto skip;
for (n = 0; n < n_pkts; n++) {
url_fskip(pb, 2);
pts = get_be32(pb);
pos = get_be32(pb);
url_fskip(pb, 4); /* packet no. */
av_add_index_entry(st, pos, pts, 0, 0, AVINDEX_KEYFRAME);
}
skip:
if (next_off && url_ftell(pb) != next_off &&
url_fseek(pb, next_off, SEEK_SET) < 0)
return -1;
} while (next_off);
return 0;
}
static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap)
{
RMDemuxContext *rm = s->priv_data;
AVStream *st;
rm->old_format = 1;
st = av_new_stream(s, 0);
if (!st)
return -1;
st->priv_data = ff_rm_alloc_rmstream();
return rm_read_audio_stream_info(s, s->pb, st, st->priv_data, 1);
}
static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
RMDemuxContext *rm = s->priv_data;
AVStream *st;
ByteIOContext *pb = s->pb;
unsigned int tag;
int tag_size;
unsigned int start_time, duration;
unsigned int data_off = 0, indx_off = 0;
char buf[128];
int flags = 0;
tag = get_le32(pb);
if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
/* very old .ra format */
return rm_read_header_old(s, ap);
} else if (tag != MKTAG('.', 'R', 'M', 'F')) {
return AVERROR(EIO);
}
get_be32(pb); /* header size */
get_be16(pb);
get_be32(pb);
get_be32(pb); /* number of headers */
for(;;) {
if (url_feof(pb))
return -1;
tag = get_le32(pb);
tag_size = get_be32(pb);
get_be16(pb);
#if 0
printf("tag=%c%c%c%c (%08x) size=%d\n",
(tag) & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
(tag >> 24) & 0xff,
tag,
tag_size);
#endif
if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
return -1;
switch(tag) {
case MKTAG('P', 'R', 'O', 'P'):
/* file header */
get_be32(pb); /* max bit rate */
get_be32(pb); /* avg bit rate */
get_be32(pb); /* max packet size */
get_be32(pb); /* avg packet size */
get_be32(pb); /* nb packets */
get_be32(pb); /* duration */
get_be32(pb); /* preroll */
indx_off = get_be32(pb); /* index offset */
data_off = get_be32(pb); /* data offset */
get_be16(pb); /* nb streams */
flags = get_be16(pb); /* flags */
break;
case MKTAG('C', 'O', 'N', 'T'):
rm_read_metadata(s, 1);
break;
case MKTAG('M', 'D', 'P', 'R'):
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->id = get_be16(pb);
get_be32(pb); /* max bit rate */
st->codec->bit_rate = get_be32(pb); /* bit rate */
get_be32(pb); /* max packet size */
get_be32(pb); /* avg packet size */
start_time = get_be32(pb); /* start time */
get_be32(pb); /* preroll */
duration = get_be32(pb); /* duration */
st->start_time = start_time;
st->duration = duration;
get_str8(pb, buf, sizeof(buf)); /* desc */
get_str8(pb, buf, sizeof(buf)); /* mimetype */
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->priv_data = ff_rm_alloc_rmstream();
if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
get_be32(pb)) < 0)
return -1;
break;
case MKTAG('D', 'A', 'T', 'A'):
goto header_end;
default:
/* unknown tag: skip it */
url_fskip(pb, tag_size - 10);
break;
}
}
header_end:
rm->nb_packets = get_be32(pb); /* number of packets */
if (!rm->nb_packets && (flags & 4))
rm->nb_packets = 3600 * 25;
get_be32(pb); /* next data header */
if (!data_off)
data_off = url_ftell(pb) - 18;
if (indx_off && url_fseek(pb, indx_off, SEEK_SET) >= 0) {
rm_read_index(s);
url_fseek(pb, data_off + 18, SEEK_SET);
}
return 0;
}
static int get_num(ByteIOContext *pb, int *len)
{
int n, n1;
n = get_be16(pb);
(*len)-=2;
n &= 0x7FFF;
if (n >= 0x4000) {
return n - 0x4000;
} else {
n1 = get_be16(pb);
(*len)-=2;
return (n << 16) | n1;
}
}
/* multiple of 20 bytes for ra144 (ugly) */
#define RAW_PACKET_SIZE 1000
static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
RMDemuxContext *rm = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
uint32_t state=0xFFFFFFFF;
while(!url_feof(pb)){
int len, num, i;
*pos= url_ftell(pb) - 3;
if(rm->remaining_len > 0){
num= rm->current_stream;
len= rm->remaining_len;
*timestamp = AV_NOPTS_VALUE;
*flags= 0;
}else{
state= (state<<8) + get_byte(pb);
if(state == MKBETAG('I', 'N', 'D', 'X')){
int n_pkts, expected_len;
len = get_be32(pb);
url_fskip(pb, 2);
n_pkts = get_be32(pb);
expected_len = 20 + n_pkts * 14;
if (len == 20)
/* some files don't add index entries to chunk size... */
len = expected_len;
else if (len != expected_len)
av_log(s, AV_LOG_WARNING,
"Index size %d (%d pkts) is wrong, should be %d.\n",
len, n_pkts, expected_len);
len -= 14; // we already read part of the index header
if(len<0)
continue;
goto skip;
} else if (state == MKBETAG('D','A','T','A')) {
av_log(s, AV_LOG_WARNING,
"DATA tag in middle of chunk, file may be broken.\n");
}
if(state > (unsigned)0xFFFF || state <= 12)
continue;
len=state - 12;
state= 0xFFFFFFFF;
num = get_be16(pb);
*timestamp = get_be32(pb);
get_byte(pb); /* reserved */
*flags = get_byte(pb); /* flags */
}
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
if (num == st->id)
break;
}
if (i == s->nb_streams) {
skip:
/* skip packet if unknown number */
url_fskip(pb, len);
rm->remaining_len = 0;
continue;
}
*stream_index= i;
return len;
}
return -1;
}
static int rm_assemble_video_frame(AVFormatContext *s, ByteIOContext *pb,
RMDemuxContext *rm, RMStream *vst,
AVPacket *pkt, int len, int *pseq)
{
int hdr, seq, pic_num, len2, pos;
int type;
hdr = get_byte(pb); len--;
type = hdr >> 6;
if(type != 3){ // not frame as a part of packet
seq = get_byte(pb); len--;
}
if(type != 1){ // not whole frame
len2 = get_num(pb, &len);
pos = get_num(pb, &len);
pic_num = get_byte(pb); len--;
}
if(len<0)
return -1;
rm->remaining_len = len;
if(type&1){ // frame, not slice
if(type == 3) // frame as a part of packet
len= len2;
if(rm->remaining_len < len)
return -1;
rm->remaining_len -= len;
if(av_new_packet(pkt, len + 9) < 0)
return AVERROR(EIO);
pkt->data[0] = 0;
AV_WL32(pkt->data + 1, 1);
AV_WL32(pkt->data + 5, 0);
get_buffer(pb, pkt->data + 9, len);
return 0;
}
//now we have to deal with single slice
*pseq = seq;
if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
vst->slices = ((hdr & 0x3F) << 1) + 1;
vst->videobufsize = len2 + 8*vst->slices + 1;
av_free_packet(&vst->pkt); //FIXME this should be output.
if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
return AVERROR(ENOMEM);
vst->videobufpos = 8*vst->slices + 1;
vst->cur_slice = 0;
vst->curpic_num = pic_num;
vst->pktpos = url_ftell(pb);
}
if(type == 2)
len = FFMIN(len, pos);
if(++vst->cur_slice > vst->slices)
return 1;
AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
if(vst->videobufpos + len > vst->videobufsize)
return 1;
if (get_buffer(pb, vst->pkt.data + vst->videobufpos, len) != len)
return AVERROR(EIO);
vst->videobufpos += len;
rm->remaining_len-= len;
if(type == 2 || (vst->videobufpos) == vst->videobufsize){
vst->pkt.data[0] = vst->cur_slice-1;
*pkt= vst->pkt;
vst->pkt.data= NULL;
vst->pkt.size= 0;
if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
vst->videobufpos - 1 - 8*vst->slices);
pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
pkt->pts = AV_NOPTS_VALUE;
pkt->pos = vst->pktpos;
vst->slices = 0;
return 0;
}
return 1;
}
static inline void
rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt)
{
uint8_t *ptr;
int j;
if (st->codec->codec_id == CODEC_ID_AC3) {
ptr = pkt->data;
for (j=0;j<pkt->size;j+=2) {
FFSWAP(int, ptr[0], ptr[1]);
ptr += 2;
}
}
}
/**
* Perform 4-bit block reordering for SIPR data.
* @todo This can be optimized, e.g. use memcpy() if data blocks are aligned
*/
void ff_rm_reorder_sipr_data(uint8_t *buf, int sub_packet_h, int framesize)
{
int n, bs = sub_packet_h * framesize * 2 / 96; // nibbles per subpacket
for (n = 0; n < 38; n++) {
int j;
int i = bs * sipr_swaps[n][0];
int o = bs * sipr_swaps[n][1];
/* swap 4bit-nibbles of block 'i' with 'o' */
for (j = 0; j < bs; j++, i++, o++) {
int x = (buf[i >> 1] >> (4 * (i & 1))) & 0xF,
y = (buf[o >> 1] >> (4 * (o & 1))) & 0xF;
buf[o >> 1] = (x << (4 * (o & 1))) |
(buf[o >> 1] & (0xF << (4 * !(o & 1))));
buf[i >> 1] = (y << (4 * (i & 1))) |
(buf[i >> 1] & (0xF << (4 * !(i & 1))));
}
}
}
int
ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, RMStream *ast, int len, AVPacket *pkt,
int *seq, int flags, int64_t timestamp)
{
RMDemuxContext *rm = s->priv_data;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
rm->current_stream= st->id;
if(rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq))
return -1; //got partial frame
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if ((st->codec->codec_id == CODEC_ID_RA_288) ||
(st->codec->codec_id == CODEC_ID_COOK) ||
(st->codec->codec_id == CODEC_ID_ATRAC3) ||
(st->codec->codec_id == CODEC_ID_SIPR)) {
int x;
int sps = ast->sub_packet_size;
int cfs = ast->coded_framesize;
int h = ast->sub_packet_h;
int y = ast->sub_packet_cnt;
int w = ast->audio_framesize;
if (flags & 2)
y = ast->sub_packet_cnt = 0;
if (!y)
ast->audiotimestamp = timestamp;
switch(st->codec->codec_id) {
case CODEC_ID_RA_288:
for (x = 0; x < h/2; x++)
get_buffer(pb, ast->pkt.data+x*2*w+y*cfs, cfs);
break;
case CODEC_ID_ATRAC3:
case CODEC_ID_COOK:
for (x = 0; x < w/sps; x++)
get_buffer(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
break;
case CODEC_ID_SIPR:
get_buffer(pb, ast->pkt.data + y * w, w);
break;
}
if (++(ast->sub_packet_cnt) < h)
return -1;
if (st->codec->codec_id == CODEC_ID_SIPR)
ff_rm_reorder_sipr_data(ast->pkt.data, h, w);
ast->sub_packet_cnt = 0;
rm->audio_stream_num = st->index;
rm->audio_pkt_cnt = h * w / st->codec->block_align;
} else if (st->codec->codec_id == CODEC_ID_AAC) {
int x;
rm->audio_stream_num = st->index;
ast->sub_packet_cnt = (get_be16(pb) & 0xf0) >> 4;
if (ast->sub_packet_cnt) {
for (x = 0; x < ast->sub_packet_cnt; x++)
ast->sub_packet_lengths[x] = get_be16(pb);
rm->audio_pkt_cnt = ast->sub_packet_cnt;
ast->audiotimestamp = timestamp;
} else
return -1;
} else {
av_get_packet(pb, pkt, len);
rm_ac3_swap_bytes(st, pkt);
}
} else
av_get_packet(pb, pkt, len);
pkt->stream_index = st->index;
#if 0
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(st->codec->codec_id == CODEC_ID_RV20){
int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
seq |= (timestamp&~0x3FFF);
if(seq - timestamp > 0x2000) seq -= 0x4000;
if(seq - timestamp < -0x2000) seq += 0x4000;
}
}
#endif
pkt->pts= timestamp;
if (flags & 2)
pkt->flags |= AV_PKT_FLAG_KEY;
return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
}
int
ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, RMStream *ast, AVPacket *pkt)
{
RMDemuxContext *rm = s->priv_data;
assert (rm->audio_pkt_cnt > 0);
if (st->codec->codec_id == CODEC_ID_AAC)
av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]);
else {
av_new_packet(pkt, st->codec->block_align);
memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this
(ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
st->codec->block_align);
}
rm->audio_pkt_cnt--;
if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) {
ast->audiotimestamp = AV_NOPTS_VALUE;
pkt->flags = AV_PKT_FLAG_KEY;
} else
pkt->flags = 0;
pkt->stream_index = st->index;
return rm->audio_pkt_cnt;
}
static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
{
RMDemuxContext *rm = s->priv_data;
AVStream *st;
int i, len, res, seq = 1;
int64_t timestamp, pos;
int flags;
for (;;) {
if (rm->audio_pkt_cnt) {
// If there are queued audio packet return them first
st = s->streams[rm->audio_stream_num];
ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt);
flags = 0;
} else {
if (rm->old_format) {
RMStream *ast;
st = s->streams[0];
ast = st->priv_data;
timestamp = AV_NOPTS_VALUE;
len = !ast->audio_framesize ? RAW_PACKET_SIZE :
ast->coded_framesize * ast->sub_packet_h / 2;
flags = (seq++ == 1) ? 2 : 0;
pos = url_ftell(s->pb);
} else {
len=sync(s, ×tamp, &flags, &i, &pos);
if (len > 0)
st = s->streams[i];
}
if(len<0 || url_feof(s->pb))
return AVERROR(EIO);
res = ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt,
&seq, flags, timestamp);
if((flags&2) && (seq&0x7F) == 1)
av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
if (res)
continue;
}
if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
|| st->discard >= AVDISCARD_ALL){
av_free_packet(pkt);
} else
break;
}
return 0;
}
static int rm_read_close(AVFormatContext *s)
{
int i;
for (i=0;i<s->nb_streams;i++)
ff_rm_free_rmstream(s->streams[i]->priv_data);
return 0;
}
static int rm_probe(AVProbeData *p)
{
/* check file header */
if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
p->buf[2] == 'M' && p->buf[3] == 'F' &&
p->buf[4] == 0 && p->buf[5] == 0) ||
(p->buf[0] == '.' && p->buf[1] == 'r' &&
p->buf[2] == 'a' && p->buf[3] == 0xfd))
return AVPROBE_SCORE_MAX;
else
return 0;
}
static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
RMDemuxContext *rm = s->priv_data;
int64_t pos, dts;
int stream_index2, flags, len, h;
pos = *ppos;
if(rm->old_format)
return AV_NOPTS_VALUE;
url_fseek(s->pb, pos, SEEK_SET);
rm->remaining_len=0;
for(;;){
int seq=1;
AVStream *st;
len=sync(s, &dts, &flags, &stream_index2, &pos);
if(len<0)
return AV_NOPTS_VALUE;
st = s->streams[stream_index2];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
h= get_byte(s->pb); len--;
if(!(h & 0x40)){
seq = get_byte(s->pb); len--;
}
}
if((flags&2) && (seq&0x7F) == 1){
// av_log(s, AV_LOG_DEBUG, "%d %d-%d %"PRId64" %d\n", flags, stream_index2, stream_index, dts, seq);
av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
if(stream_index2 == stream_index)
break;
}
url_fskip(s->pb, len);
}
*ppos = pos;
return dts;
}
AVInputFormat rm_demuxer = {
"rm",
NULL_IF_CONFIG_SMALL("RealMedia format"),
sizeof(RMDemuxContext),
rm_probe,
rm_read_header,
rm_read_packet,
rm_read_close,
NULL,
rm_read_dts,
};
AVInputFormat rdt_demuxer = {
"rdt",
NULL_IF_CONFIG_SMALL("RDT demuxer"),
sizeof(RMDemuxContext),
NULL,
NULL,
NULL,
rm_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/rmdec.c | C | asf20 | 32,092 |
/*
* MP3 muxer and demuxer
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <strings.h>
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "id3v2.h"
#include "id3v1.h"
#if CONFIG_MP3_DEMUXER
#include "libavcodec/mpegaudio.h"
#include "libavcodec/mpegaudiodecheader.h"
/* mp3 read */
static int mp3_read_probe(AVProbeData *p)
{
int max_frames, first_frames = 0;
int fsize, frames, sample_rate;
uint32_t header;
uint8_t *buf, *buf0, *buf2, *end;
AVCodecContext avctx;
buf0 = p->buf;
if(ff_id3v2_match(buf0)) {
buf0 += ff_id3v2_tag_len(buf0);
}
end = p->buf + p->buf_size - sizeof(uint32_t);
while(buf0 < end && !*buf0)
buf0++;
max_frames = 0;
buf = buf0;
for(; buf < end; buf= buf2+1) {
buf2 = buf;
for(frames = 0; buf2 < end; frames++) {
header = AV_RB32(buf2);
fsize = ff_mpa_decode_header(&avctx, header, &sample_rate, &sample_rate, &sample_rate, &sample_rate);
if(fsize < 0)
break;
buf2 += fsize;
}
max_frames = FFMAX(max_frames, frames);
if(buf == buf0)
first_frames= frames;
}
// keep this in sync with ac3 probe, both need to avoid
// issues with MPEG-files!
if (first_frames>=4) return AVPROBE_SCORE_MAX/2+1;
else if(max_frames>500)return AVPROBE_SCORE_MAX/2;
else if(max_frames>=4) return AVPROBE_SCORE_MAX/4;
else if(buf0!=p->buf) return AVPROBE_SCORE_MAX/4-1;
else if(max_frames>=1) return 1;
else return 0;
//mpegps_mp3_unrecognized_format.mpg has max_frames=3
}
/**
* Try to find Xing/Info/VBRI tags and compute duration from info therein
*/
static int mp3_parse_vbr_tags(AVFormatContext *s, AVStream *st, int64_t base)
{
uint32_t v, spf;
int frames = -1; /* Total number of frames in file */
const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
MPADecodeHeader c;
int vbrtag_size = 0;
v = get_be32(s->pb);
if(ff_mpa_check_header(v) < 0)
return -1;
if (ff_mpegaudio_decode_header(&c, v) == 0)
vbrtag_size = c.frame_size;
if(c.layer != 3)
return -1;
/* Check for Xing / Info tag */
url_fseek(s->pb, xing_offtbl[c.lsf == 1][c.nb_channels == 1], SEEK_CUR);
v = get_be32(s->pb);
if(v == MKBETAG('X', 'i', 'n', 'g') || v == MKBETAG('I', 'n', 'f', 'o')) {
v = get_be32(s->pb);
if(v & 0x1)
frames = get_be32(s->pb);
}
/* Check for VBRI tag (always 32 bytes after end of mpegaudio header) */
url_fseek(s->pb, base + 4 + 32, SEEK_SET);
v = get_be32(s->pb);
if(v == MKBETAG('V', 'B', 'R', 'I')) {
/* Check tag version */
if(get_be16(s->pb) == 1) {
/* skip delay, quality and total bytes */
url_fseek(s->pb, 8, SEEK_CUR);
frames = get_be32(s->pb);
}
}
if(frames < 0)
return -1;
/* Skip the vbr tag frame */
url_fseek(s->pb, base + vbrtag_size, SEEK_SET);
spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */
st->duration = av_rescale_q(frames, (AVRational){spf, c.sample_rate},
st->time_base);
return 0;
}
static int mp3_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
AVStream *st;
int64_t off;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_MP3;
st->need_parsing = AVSTREAM_PARSE_FULL;
st->start_time = 0;
// lcm of all mp3 sample rates
av_set_pts_info(st, 64, 1, 14112000);
ff_id3v2_read(s);
off = url_ftell(s->pb);
if (!av_metadata_get(s->metadata, "", NULL, AV_METADATA_IGNORE_SUFFIX))
ff_id3v1_read(s);
if (mp3_parse_vbr_tags(s, st, off) < 0)
url_fseek(s->pb, off, SEEK_SET);
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
#define MP3_PACKET_SIZE 1024
static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
// AVStream *st = s->streams[0];
size= MP3_PACKET_SIZE;
ret= av_get_packet(s->pb, pkt, size);
pkt->stream_index = 0;
if (ret <= 0) {
return AVERROR(EIO);
}
/* note: we need to modify the packet size here to handle the last
packet */
pkt->size = ret;
return ret;
}
AVInputFormat mp3_demuxer = {
"mp3",
NULL_IF_CONFIG_SMALL("MPEG audio layer 2/3"),
0,
mp3_read_probe,
mp3_read_header,
mp3_read_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "mp2,mp3,m2a", /* XXX: use probe */
.metadata_conv = ff_id3v2_metadata_conv,
};
#endif
#if CONFIG_MP2_MUXER || CONFIG_MP3_MUXER
static int id3v1_set_string(AVFormatContext *s, const char *key,
uint8_t *buf, int buf_size)
{
AVMetadataTag *tag;
if ((tag = av_metadata_get(s->metadata, key, NULL, 0)))
strncpy(buf, tag->value, buf_size);
return !!tag;
}
static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf)
{
AVMetadataTag *tag;
int i, count = 0;
memset(buf, 0, ID3v1_TAG_SIZE); /* fail safe */
buf[0] = 'T';
buf[1] = 'A';
buf[2] = 'G';
count += id3v1_set_string(s, "title", buf + 3, 30);
count += id3v1_set_string(s, "author", buf + 33, 30);
count += id3v1_set_string(s, "album", buf + 63, 30);
count += id3v1_set_string(s, "date", buf + 93, 4);
count += id3v1_set_string(s, "comment", buf + 97, 30);
if ((tag = av_metadata_get(s->metadata, "track", NULL, 0))) {
buf[125] = 0;
buf[126] = atoi(tag->value);
count++;
}
buf[127] = 0xFF; /* default to unknown genre */
if ((tag = av_metadata_get(s->metadata, "genre", NULL, 0))) {
for(i = 0; i <= ID3v1_GENRE_MAX; i++) {
if (!strcasecmp(tag->value, ff_id3v1_genre_str[i])) {
buf[127] = i;
count++;
break;
}
}
}
return count;
}
/* simple formats */
static void id3v2_put_size(AVFormatContext *s, int size)
{
put_byte(s->pb, size >> 21 & 0x7f);
put_byte(s->pb, size >> 14 & 0x7f);
put_byte(s->pb, size >> 7 & 0x7f);
put_byte(s->pb, size & 0x7f);
}
static void id3v2_put_ttag(AVFormatContext *s, const char *buf, int len,
uint32_t tag)
{
put_be32(s->pb, tag);
id3v2_put_size(s, len + 1);
put_be16(s->pb, 0);
put_byte(s->pb, 3); /* UTF-8 */
put_buffer(s->pb, buf, len);
}
static int mp3_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
put_buffer(s->pb, pkt->data, pkt->size);
put_flush_packet(s->pb);
return 0;
}
static int mp3_write_trailer(struct AVFormatContext *s)
{
uint8_t buf[ID3v1_TAG_SIZE];
/* write the id3v1 tag */
if (id3v1_create_tag(s, buf) > 0) {
put_buffer(s->pb, buf, ID3v1_TAG_SIZE);
put_flush_packet(s->pb);
}
return 0;
}
#endif /* CONFIG_MP2_MUXER || CONFIG_MP3_MUXER */
#if CONFIG_MP2_MUXER
AVOutputFormat mp2_muxer = {
"mp2",
NULL_IF_CONFIG_SMALL("MPEG audio layer 2"),
"audio/x-mpeg",
"mp2,m2a",
0,
CODEC_ID_MP2,
CODEC_ID_NONE,
NULL,
mp3_write_packet,
mp3_write_trailer,
};
#endif
#if CONFIG_MP3_MUXER
/**
* Write an ID3v2.4 header at beginning of stream
*/
static int mp3_write_header(struct AVFormatContext *s)
{
AVMetadataTag *t = NULL;
int totlen = 0;
int64_t size_pos, cur_pos;
put_be32(s->pb, MKBETAG('I', 'D', '3', 0x04)); /* ID3v2.4 */
put_byte(s->pb, 0);
put_byte(s->pb, 0); /* flags */
/* reserve space for size */
size_pos = url_ftell(s->pb);
put_be32(s->pb, 0);
while ((t = av_metadata_get(s->metadata, "", t, AV_METADATA_IGNORE_SUFFIX))) {
uint32_t tag = 0;
if (t->key[0] == 'T' && strlen(t->key) == 4) {
int i;
for (i = 0; *ff_id3v2_tags[i]; i++)
if (AV_RB32(t->key) == AV_RB32(ff_id3v2_tags[i])) {
int len = strlen(t->value);
tag = AV_RB32(t->key);
totlen += len + ID3v2_HEADER_SIZE + 2;
id3v2_put_ttag(s, t->value, len + 1, tag);
break;
}
}
if (!tag) { /* unknown tag, write as TXXX frame */
int len = strlen(t->key), len1 = strlen(t->value);
char *buf = av_malloc(len + len1 + 2);
if (!buf)
return AVERROR(ENOMEM);
tag = MKBETAG('T', 'X', 'X', 'X');
strcpy(buf, t->key);
strcpy(buf + len + 1, t->value);
id3v2_put_ttag(s, buf, len + len1 + 2, tag);
totlen += len + len1 + ID3v2_HEADER_SIZE + 3;
av_free(buf);
}
}
cur_pos = url_ftell(s->pb);
url_fseek(s->pb, size_pos, SEEK_SET);
id3v2_put_size(s, totlen);
url_fseek(s->pb, cur_pos, SEEK_SET);
return 0;
}
AVOutputFormat mp3_muxer = {
"mp3",
NULL_IF_CONFIG_SMALL("MPEG audio layer 3"),
"audio/x-mpeg",
"mp3",
0,
CODEC_ID_MP3,
CODEC_ID_NONE,
mp3_write_header,
mp3_write_packet,
mp3_write_trailer,
AVFMT_NOTIMESTAMPS,
.metadata_conv = ff_id3v2_metadata_conv,
};
#endif
| 123linslouis-android-video-cutter | jni/libavformat/mp3.c | C | asf20 | 10,138 |
/*
* MPEG2 transport stream (aka DVB) demuxer
* Copyright (c) 2002-2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
//#define DEBUG
//#define DEBUG_SEEK
//#define USE_SYNCPOINT_SEARCH
#include "libavutil/crc.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "mpegts.h"
#include "internal.h"
#include "seek.h"
/* 1.0 second at 24Mbit/s */
#define MAX_SCAN_PACKETS 32000
/* maximum size in which we look for synchronisation if
synchronisation is lost */
#define MAX_RESYNC_SIZE 65536
#define MAX_PES_PAYLOAD 200*1024
enum MpegTSFilterType {
MPEGTS_PES,
MPEGTS_SECTION,
};
typedef struct MpegTSFilter MpegTSFilter;
typedef int PESCallback(MpegTSFilter *f, const uint8_t *buf, int len, int is_start, int64_t pos);
typedef struct MpegTSPESFilter {
PESCallback *pes_cb;
void *opaque;
} MpegTSPESFilter;
typedef void SectionCallback(MpegTSFilter *f, const uint8_t *buf, int len);
typedef void SetServiceCallback(void *opaque, int ret);
typedef struct MpegTSSectionFilter {
int section_index;
int section_h_size;
uint8_t *section_buf;
unsigned int check_crc:1;
unsigned int end_of_section_reached:1;
SectionCallback *section_cb;
void *opaque;
} MpegTSSectionFilter;
struct MpegTSFilter {
int pid;
int last_cc; /* last cc code (-1 if first packet) */
enum MpegTSFilterType type;
union {
MpegTSPESFilter pes_filter;
MpegTSSectionFilter section_filter;
} u;
};
#define MAX_PIDS_PER_PROGRAM 64
struct Program {
unsigned int id; //program id/service id
unsigned int nb_pids;
unsigned int pids[MAX_PIDS_PER_PROGRAM];
};
struct MpegTSContext {
/* user data */
AVFormatContext *stream;
/** raw packet size, including FEC if present */
int raw_packet_size;
int pos47;
/** if true, all pids are analyzed to find streams */
int auto_guess;
/** compute exact PCR for each transport stream packet */
int mpeg2ts_compute_pcr;
int64_t cur_pcr; /**< used to estimate the exact PCR */
int pcr_incr; /**< used to estimate the exact PCR */
/* data needed to handle file based ts */
/** stop parsing loop */
int stop_parse;
/** packet containing Audio/Video data */
AVPacket *pkt;
/** to detect seek */
int64_t last_pos;
/******************************************/
/* private mpegts data */
/* scan context */
/** structure to keep track of Program->pids mapping */
unsigned int nb_prg;
struct Program *prg;
/** filters for various streams specified by PMT + for the PAT and PMT */
MpegTSFilter *pids[NB_PID_MAX];
};
/* TS stream handling */
enum MpegTSState {
MPEGTS_HEADER = 0,
MPEGTS_PESHEADER,
MPEGTS_PESHEADER_FILL,
MPEGTS_PAYLOAD,
MPEGTS_SKIP,
};
/* enough for PES header + length */
#define PES_START_SIZE 6
#define PES_HEADER_SIZE 9
#define MAX_PES_HEADER_SIZE (9 + 255)
typedef struct PESContext {
int pid;
int pcr_pid; /**< if -1 then all packets containing PCR are considered */
int stream_type;
MpegTSContext *ts;
AVFormatContext *stream;
AVStream *st;
AVStream *sub_st; /**< stream for the embedded AC3 stream in HDMV TrueHD */
enum MpegTSState state;
/* used to get the format */
int data_index;
int total_size;
int pes_header_size;
int extended_stream_id;
int64_t pts, dts;
int64_t ts_packet_pos; /**< position of first TS packet of this PES packet */
uint8_t header[MAX_PES_HEADER_SIZE];
uint8_t *buffer;
} PESContext;
extern AVInputFormat mpegts_demuxer;
static void clear_program(MpegTSContext *ts, unsigned int programid)
{
int i;
for(i=0; i<ts->nb_prg; i++)
if(ts->prg[i].id == programid)
ts->prg[i].nb_pids = 0;
}
static void clear_programs(MpegTSContext *ts)
{
av_freep(&ts->prg);
ts->nb_prg=0;
}
static void add_pat_entry(MpegTSContext *ts, unsigned int programid)
{
struct Program *p;
void *tmp = av_realloc(ts->prg, (ts->nb_prg+1)*sizeof(struct Program));
if(!tmp)
return;
ts->prg = tmp;
p = &ts->prg[ts->nb_prg];
p->id = programid;
p->nb_pids = 0;
ts->nb_prg++;
}
static void add_pid_to_pmt(MpegTSContext *ts, unsigned int programid, unsigned int pid)
{
int i;
struct Program *p = NULL;
for(i=0; i<ts->nb_prg; i++) {
if(ts->prg[i].id == programid) {
p = &ts->prg[i];
break;
}
}
if(!p)
return;
if(p->nb_pids >= MAX_PIDS_PER_PROGRAM)
return;
p->pids[p->nb_pids++] = pid;
}
/**
* \brief discard_pid() decides if the pid is to be discarded according
* to caller's programs selection
* \param ts : - TS context
* \param pid : - pid
* \return 1 if the pid is only comprised in programs that have .discard=AVDISCARD_ALL
* 0 otherwise
*/
static int discard_pid(MpegTSContext *ts, unsigned int pid)
{
int i, j, k;
int used = 0, discarded = 0;
struct Program *p;
for(i=0; i<ts->nb_prg; i++) {
p = &ts->prg[i];
for(j=0; j<p->nb_pids; j++) {
if(p->pids[j] != pid)
continue;
//is program with id p->id set to be discarded?
for(k=0; k<ts->stream->nb_programs; k++) {
if(ts->stream->programs[k]->id == p->id) {
if(ts->stream->programs[k]->discard == AVDISCARD_ALL)
discarded++;
else
used++;
}
}
}
}
return !used && discarded;
}
/**
* Assembles PES packets out of TS packets, and then calls the "section_cb"
* function when they are complete.
*/
static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
const uint8_t *buf, int buf_size, int is_start)
{
MpegTSSectionFilter *tss = &tss1->u.section_filter;
int len;
if (is_start) {
memcpy(tss->section_buf, buf, buf_size);
tss->section_index = buf_size;
tss->section_h_size = -1;
tss->end_of_section_reached = 0;
} else {
if (tss->end_of_section_reached)
return;
len = 4096 - tss->section_index;
if (buf_size < len)
len = buf_size;
memcpy(tss->section_buf + tss->section_index, buf, len);
tss->section_index += len;
}
/* compute section length if possible */
if (tss->section_h_size == -1 && tss->section_index >= 3) {
len = (AV_RB16(tss->section_buf + 1) & 0xfff) + 3;
if (len > 4096)
return;
tss->section_h_size = len;
}
if (tss->section_h_size != -1 && tss->section_index >= tss->section_h_size) {
tss->end_of_section_reached = 1;
if (!tss->check_crc ||
av_crc(av_crc_get_table(AV_CRC_32_IEEE), -1,
tss->section_buf, tss->section_h_size) == 0)
tss->section_cb(tss1, tss->section_buf, tss->section_h_size);
}
}
static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
SectionCallback *section_cb, void *opaque,
int check_crc)
{
MpegTSFilter *filter;
MpegTSSectionFilter *sec;
dprintf(ts->stream, "Filter: pid=0x%x\n", pid);
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
filter = av_mallocz(sizeof(MpegTSFilter));
if (!filter)
return NULL;
ts->pids[pid] = filter;
filter->type = MPEGTS_SECTION;
filter->pid = pid;
filter->last_cc = -1;
sec = &filter->u.section_filter;
sec->section_cb = section_cb;
sec->opaque = opaque;
sec->section_buf = av_malloc(MAX_SECTION_SIZE);
sec->check_crc = check_crc;
if (!sec->section_buf) {
av_free(filter);
return NULL;
}
return filter;
}
static MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
PESCallback *pes_cb,
void *opaque)
{
MpegTSFilter *filter;
MpegTSPESFilter *pes;
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
filter = av_mallocz(sizeof(MpegTSFilter));
if (!filter)
return NULL;
ts->pids[pid] = filter;
filter->type = MPEGTS_PES;
filter->pid = pid;
filter->last_cc = -1;
pes = &filter->u.pes_filter;
pes->pes_cb = pes_cb;
pes->opaque = opaque;
return filter;
}
static void mpegts_close_filter(MpegTSContext *ts, MpegTSFilter *filter)
{
int pid;
pid = filter->pid;
if (filter->type == MPEGTS_SECTION)
av_freep(&filter->u.section_filter.section_buf);
else if (filter->type == MPEGTS_PES) {
PESContext *pes = filter->u.pes_filter.opaque;
av_freep(&pes->buffer);
/* referenced private data will be freed later in
* av_close_input_stream */
if (!((PESContext *)filter->u.pes_filter.opaque)->st) {
av_freep(&filter->u.pes_filter.opaque);
}
}
av_free(filter);
ts->pids[pid] = NULL;
}
static int analyze(const uint8_t *buf, int size, int packet_size, int *index){
int stat[TS_MAX_PACKET_SIZE];
int i;
int x=0;
int best_score=0;
memset(stat, 0, packet_size*sizeof(int));
for(x=i=0; i<size-3; i++){
if(buf[i] == 0x47 && !(buf[i+1] & 0x80) && (buf[i+3] & 0x30)){
stat[x]++;
if(stat[x] > best_score){
best_score= stat[x];
if(index) *index= x;
}
}
x++;
if(x == packet_size) x= 0;
}
return best_score;
}
/* autodetect fec presence. Must have at least 1024 bytes */
static int get_packet_size(const uint8_t *buf, int size)
{
int score, fec_score, dvhs_score;
if (size < (TS_FEC_PACKET_SIZE * 5 + 1))
return -1;
score = analyze(buf, size, TS_PACKET_SIZE, NULL);
dvhs_score = analyze(buf, size, TS_DVHS_PACKET_SIZE, NULL);
fec_score= analyze(buf, size, TS_FEC_PACKET_SIZE, NULL);
// av_log(NULL, AV_LOG_DEBUG, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score);
if (score > fec_score && score > dvhs_score) return TS_PACKET_SIZE;
else if(dvhs_score > score && dvhs_score > fec_score) return TS_DVHS_PACKET_SIZE;
else if(score < fec_score && dvhs_score < fec_score) return TS_FEC_PACKET_SIZE;
else return -1;
}
typedef struct SectionHeader {
uint8_t tid;
uint16_t id;
uint8_t version;
uint8_t sec_num;
uint8_t last_sec_num;
} SectionHeader;
static inline int get8(const uint8_t **pp, const uint8_t *p_end)
{
const uint8_t *p;
int c;
p = *pp;
if (p >= p_end)
return -1;
c = *p++;
*pp = p;
return c;
}
static inline int get16(const uint8_t **pp, const uint8_t *p_end)
{
const uint8_t *p;
int c;
p = *pp;
if ((p + 1) >= p_end)
return -1;
c = AV_RB16(p);
p += 2;
*pp = p;
return c;
}
/* read and allocate a DVB string preceeded by its length */
static char *getstr8(const uint8_t **pp, const uint8_t *p_end)
{
int len;
const uint8_t *p;
char *str;
p = *pp;
len = get8(&p, p_end);
if (len < 0)
return NULL;
if ((p + len) > p_end)
return NULL;
str = av_malloc(len + 1);
if (!str)
return NULL;
memcpy(str, p, len);
str[len] = '\0';
p += len;
*pp = p;
return str;
}
static int parse_section_header(SectionHeader *h,
const uint8_t **pp, const uint8_t *p_end)
{
int val;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->tid = val;
*pp += 2;
val = get16(pp, p_end);
if (val < 0)
return -1;
h->id = val;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->version = (val >> 1) & 0x1f;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->sec_num = val;
val = get8(pp, p_end);
if (val < 0)
return -1;
h->last_sec_num = val;
return 0;
}
typedef struct {
uint32_t stream_type;
enum AVMediaType codec_type;
enum CodecID codec_id;
} StreamType;
static const StreamType ISO_types[] = {
{ 0x01, AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG2VIDEO },
{ 0x02, AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG2VIDEO },
{ 0x03, AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3 },
{ 0x04, AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3 },
{ 0x0f, AVMEDIA_TYPE_AUDIO, CODEC_ID_AAC },
{ 0x10, AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG4 },
//{ 0x11, AVMEDIA_TYPE_AUDIO, CODEC_ID_AAC }, /* LATM syntax */
{ 0x1b, AVMEDIA_TYPE_VIDEO, CODEC_ID_H264 },
{ 0xd1, AVMEDIA_TYPE_VIDEO, CODEC_ID_DIRAC },
{ 0xea, AVMEDIA_TYPE_VIDEO, CODEC_ID_VC1 },
{ 0 },
};
static const StreamType HDMV_types[] = {
{ 0x80, AVMEDIA_TYPE_AUDIO, CODEC_ID_PCM_BLURAY },
{ 0x81, AVMEDIA_TYPE_AUDIO, CODEC_ID_AC3 },
{ 0x82, AVMEDIA_TYPE_AUDIO, CODEC_ID_DTS },
{ 0x83, AVMEDIA_TYPE_AUDIO, CODEC_ID_TRUEHD },
{ 0x84, AVMEDIA_TYPE_AUDIO, CODEC_ID_EAC3 },
{ 0x90, AVMEDIA_TYPE_SUBTITLE, CODEC_ID_HDMV_PGS_SUBTITLE },
{ 0 },
};
/* ATSC ? */
static const StreamType MISC_types[] = {
{ 0x81, AVMEDIA_TYPE_AUDIO, CODEC_ID_AC3 },
{ 0x8a, AVMEDIA_TYPE_AUDIO, CODEC_ID_DTS },
{ 0 },
};
static const StreamType REGD_types[] = {
{ MKTAG('d','r','a','c'), AVMEDIA_TYPE_VIDEO, CODEC_ID_DIRAC },
{ MKTAG('A','C','-','3'), AVMEDIA_TYPE_AUDIO, CODEC_ID_AC3 },
{ 0 },
};
/* descriptor present */
static const StreamType DESC_types[] = {
{ 0x6a, AVMEDIA_TYPE_AUDIO, CODEC_ID_AC3 }, /* AC-3 descriptor */
{ 0x7a, AVMEDIA_TYPE_AUDIO, CODEC_ID_EAC3 }, /* E-AC-3 descriptor */
{ 0x7b, AVMEDIA_TYPE_AUDIO, CODEC_ID_DTS },
{ 0x56, AVMEDIA_TYPE_SUBTITLE, CODEC_ID_DVB_TELETEXT },
{ 0x59, AVMEDIA_TYPE_SUBTITLE, CODEC_ID_DVB_SUBTITLE }, /* subtitling descriptor */
{ 0 },
};
static void mpegts_find_stream_type(AVStream *st,
uint32_t stream_type, const StreamType *types)
{
for (; types->stream_type; types++) {
if (stream_type == types->stream_type) {
st->codec->codec_type = types->codec_type;
st->codec->codec_id = types->codec_id;
return;
}
}
}
static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
uint32_t stream_type, uint32_t prog_reg_desc)
{
av_set_pts_info(st, 33, 1, 90000);
st->priv_data = pes;
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = CODEC_ID_NONE;
st->need_parsing = AVSTREAM_PARSE_FULL;
pes->st = st;
pes->stream_type = stream_type;
av_log(pes->stream, AV_LOG_DEBUG,
"stream=%d stream_type=%x pid=%x prog_reg_desc=%.4s\n",
st->index, pes->stream_type, pes->pid, (char*)&prog_reg_desc);
st->codec->codec_tag = pes->stream_type;
mpegts_find_stream_type(st, pes->stream_type, ISO_types);
if (prog_reg_desc == AV_RL32("HDMV") &&
st->codec->codec_id == CODEC_ID_NONE) {
mpegts_find_stream_type(st, pes->stream_type, HDMV_types);
if (pes->stream_type == 0x83) {
// HDMV TrueHD streams also contain an AC3 coded version of the
// audio track - add a second stream for this
AVStream *sub_st;
// priv_data cannot be shared between streams
PESContext *sub_pes = av_malloc(sizeof(*sub_pes));
if (!sub_pes)
return AVERROR(ENOMEM);
memcpy(sub_pes, pes, sizeof(*sub_pes));
sub_st = av_new_stream(pes->stream, pes->pid);
if (!sub_st) {
av_free(sub_pes);
return AVERROR(ENOMEM);
}
av_set_pts_info(sub_st, 33, 1, 90000);
sub_st->priv_data = sub_pes;
sub_st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
sub_st->codec->codec_id = CODEC_ID_AC3;
sub_st->need_parsing = AVSTREAM_PARSE_FULL;
sub_pes->sub_st = pes->sub_st = sub_st;
}
}
if (pes->stream_type == 0x11)
av_log(pes->stream, AV_LOG_WARNING,
"AAC LATM not currently supported, patch welcome\n");
if (st->codec->codec_id == CODEC_ID_NONE)
mpegts_find_stream_type(st, pes->stream_type, MISC_types);
return 0;
}
static int64_t get_pts(const uint8_t *p)
{
int64_t pts = (int64_t)((p[0] >> 1) & 0x07) << 30;
pts |= (AV_RB16(p + 1) >> 1) << 15;
pts |= AV_RB16(p + 3) >> 1;
return pts;
}
static void new_pes_packet(PESContext *pes, AVPacket *pkt)
{
av_init_packet(pkt);
pkt->destruct = av_destruct_packet;
pkt->data = pes->buffer;
pkt->size = pes->data_index;
memset(pkt->data+pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
// Separate out the AC3 substream from an HDMV combined TrueHD/AC3 PID
if (pes->sub_st && pes->stream_type == 0x83 && pes->extended_stream_id == 0x76)
pkt->stream_index = pes->sub_st->index;
else
pkt->stream_index = pes->st->index;
pkt->pts = pes->pts;
pkt->dts = pes->dts;
/* store position of first TS packet of this PES packet */
pkt->pos = pes->ts_packet_pos;
/* reset pts values */
pes->pts = AV_NOPTS_VALUE;
pes->dts = AV_NOPTS_VALUE;
pes->buffer = NULL;
pes->data_index = 0;
}
/* return non zero if a packet could be constructed */
static int mpegts_push_data(MpegTSFilter *filter,
const uint8_t *buf, int buf_size, int is_start,
int64_t pos)
{
PESContext *pes = filter->u.pes_filter.opaque;
MpegTSContext *ts = pes->ts;
const uint8_t *p;
int len, code;
if(!ts->pkt)
return 0;
if (is_start) {
if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) {
new_pes_packet(pes, ts->pkt);
ts->stop_parse = 1;
}
pes->state = MPEGTS_HEADER;
pes->data_index = 0;
pes->ts_packet_pos = pos;
}
p = buf;
while (buf_size > 0) {
switch(pes->state) {
case MPEGTS_HEADER:
len = PES_START_SIZE - pes->data_index;
if (len > buf_size)
len = buf_size;
memcpy(pes->header + pes->data_index, p, len);
pes->data_index += len;
p += len;
buf_size -= len;
if (pes->data_index == PES_START_SIZE) {
/* we got all the PES or section header. We can now
decide */
#if 0
av_hex_dump_log(pes->stream, AV_LOG_DEBUG, pes->header, pes->data_index);
#endif
if (pes->header[0] == 0x00 && pes->header[1] == 0x00 &&
pes->header[2] == 0x01) {
/* it must be an mpeg2 PES stream */
code = pes->header[3] | 0x100;
dprintf(pes->stream, "pid=%x pes_code=%#x\n", pes->pid, code);
if ((!pes->st && pes->stream->nb_streams == MAX_STREAMS) ||
(pes->st && pes->st->discard == AVDISCARD_ALL) ||
code == 0x1be) /* padding_stream */
goto skip;
/* stream not present in PMT */
if (!pes->st) {
pes->st = av_new_stream(ts->stream, pes->pid);
if (!pes->st)
return AVERROR(ENOMEM);
mpegts_set_stream_info(pes->st, pes, 0, 0);
}
pes->total_size = AV_RB16(pes->header + 4);
/* NOTE: a zero total size means the PES size is
unbounded */
if (!pes->total_size)
pes->total_size = MAX_PES_PAYLOAD;
/* allocate pes buffer */
pes->buffer = av_malloc(pes->total_size+FF_INPUT_BUFFER_PADDING_SIZE);
if (!pes->buffer)
return AVERROR(ENOMEM);
if (code != 0x1bc && code != 0x1bf && /* program_stream_map, private_stream_2 */
code != 0x1f0 && code != 0x1f1 && /* ECM, EMM */
code != 0x1ff && code != 0x1f2 && /* program_stream_directory, DSMCC_stream */
code != 0x1f8) { /* ITU-T Rec. H.222.1 type E stream */
pes->state = MPEGTS_PESHEADER;
if (pes->st->codec->codec_id == CODEC_ID_NONE) {
dprintf(pes->stream, "pid=%x stream_type=%x probing\n",
pes->pid, pes->stream_type);
pes->st->codec->codec_id = CODEC_ID_PROBE;
}
} else {
pes->state = MPEGTS_PAYLOAD;
pes->data_index = 0;
}
} else {
/* otherwise, it should be a table */
/* skip packet */
skip:
pes->state = MPEGTS_SKIP;
continue;
}
}
break;
/**********************************************/
/* PES packing parsing */
case MPEGTS_PESHEADER:
len = PES_HEADER_SIZE - pes->data_index;
if (len < 0)
return -1;
if (len > buf_size)
len = buf_size;
memcpy(pes->header + pes->data_index, p, len);
pes->data_index += len;
p += len;
buf_size -= len;
if (pes->data_index == PES_HEADER_SIZE) {
pes->pes_header_size = pes->header[8] + 9;
pes->state = MPEGTS_PESHEADER_FILL;
}
break;
case MPEGTS_PESHEADER_FILL:
len = pes->pes_header_size - pes->data_index;
if (len < 0)
return -1;
if (len > buf_size)
len = buf_size;
memcpy(pes->header + pes->data_index, p, len);
pes->data_index += len;
p += len;
buf_size -= len;
if (pes->data_index == pes->pes_header_size) {
const uint8_t *r;
unsigned int flags, pes_ext, skip;
flags = pes->header[7];
r = pes->header + 9;
pes->pts = AV_NOPTS_VALUE;
pes->dts = AV_NOPTS_VALUE;
if ((flags & 0xc0) == 0x80) {
pes->dts = pes->pts = get_pts(r);
r += 5;
} else if ((flags & 0xc0) == 0xc0) {
pes->pts = get_pts(r);
r += 5;
pes->dts = get_pts(r);
r += 5;
}
pes->extended_stream_id = -1;
if (flags & 0x01) { /* PES extension */
pes_ext = *r++;
/* Skip PES private data, program packet sequence counter and P-STD buffer */
skip = (pes_ext >> 4) & 0xb;
skip += skip & 0x9;
r += skip;
if ((pes_ext & 0x41) == 0x01 &&
(r + 2) <= (pes->header + pes->pes_header_size)) {
/* PES extension 2 */
if ((r[0] & 0x7f) > 0 && (r[1] & 0x80) == 0)
pes->extended_stream_id = r[1];
}
}
/* we got the full header. We parse it and get the payload */
pes->state = MPEGTS_PAYLOAD;
pes->data_index = 0;
}
break;
case MPEGTS_PAYLOAD:
if (buf_size > 0 && pes->buffer) {
if (pes->data_index+buf_size > pes->total_size) {
new_pes_packet(pes, ts->pkt);
pes->total_size = MAX_PES_PAYLOAD;
pes->buffer = av_malloc(pes->total_size+FF_INPUT_BUFFER_PADDING_SIZE);
if (!pes->buffer)
return AVERROR(ENOMEM);
ts->stop_parse = 1;
}
memcpy(pes->buffer+pes->data_index, p, buf_size);
pes->data_index += buf_size;
}
buf_size = 0;
/* emit complete packets with known packet size
* decreases demuxer delay for infrequent packets like subtitles from
* a couple of seconds to milliseconds for properly muxed files.
* total_size is the number of bytes following pes_packet_length
* in the pes header, i.e. not counting the first 6 bytes */
if (pes->total_size < MAX_PES_PAYLOAD &&
pes->pes_header_size + pes->data_index == pes->total_size + 6) {
ts->stop_parse = 1;
new_pes_packet(pes, ts->pkt);
}
break;
case MPEGTS_SKIP:
buf_size = 0;
break;
}
}
return 0;
}
static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int pcr_pid)
{
MpegTSFilter *tss;
PESContext *pes;
/* if no pid found, then add a pid context */
pes = av_mallocz(sizeof(PESContext));
if (!pes)
return 0;
pes->ts = ts;
pes->stream = ts->stream;
pes->pid = pid;
pes->pcr_pid = pcr_pid;
pes->state = MPEGTS_SKIP;
pes->pts = AV_NOPTS_VALUE;
pes->dts = AV_NOPTS_VALUE;
tss = mpegts_open_pes_filter(ts, pid, mpegts_push_data, pes);
if (!tss) {
av_free(pes);
return 0;
}
return pes;
}
static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h1, *h = &h1;
PESContext *pes;
AVStream *st;
const uint8_t *p, *p_end, *desc_list_end, *desc_end;
int program_info_length, pcr_pid, pid, stream_type;
int desc_list_len, desc_len, desc_tag;
int comp_page, anc_page;
char language[4];
uint32_t prog_reg_desc = 0; /* registration descriptor */
#ifdef DEBUG
dprintf(ts->stream, "PMT: len %i\n", section_len);
av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
dprintf(ts->stream, "sid=0x%x sec_num=%d/%d\n",
h->id, h->sec_num, h->last_sec_num);
if (h->tid != PMT_TID)
return;
clear_program(ts, h->id);
pcr_pid = get16(&p, p_end) & 0x1fff;
if (pcr_pid < 0)
return;
add_pid_to_pmt(ts, h->id, pcr_pid);
dprintf(ts->stream, "pcr_pid=0x%x\n", pcr_pid);
program_info_length = get16(&p, p_end) & 0xfff;
if (program_info_length < 0)
return;
while(program_info_length >= 2) {
uint8_t tag, len;
tag = get8(&p, p_end);
len = get8(&p, p_end);
if(len > program_info_length - 2)
//something else is broken, exit the program_descriptors_loop
break;
program_info_length -= len + 2;
if(tag == 0x05 && len >= 4) { // registration descriptor
prog_reg_desc = bytestream_get_le32(&p);
len -= 4;
}
p += len;
}
p += program_info_length;
if (p >= p_end)
return;
// stop parsing after pmt, we found header
if (!ts->stream->nb_streams)
ts->stop_parse = 1;
for(;;) {
st = 0;
stream_type = get8(&p, p_end);
if (stream_type < 0)
break;
pid = get16(&p, p_end) & 0x1fff;
if (pid < 0)
break;
/* now create ffmpeg stream */
if (ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES) {
pes = ts->pids[pid]->u.pes_filter.opaque;
st = pes->st;
} else {
if (ts->pids[pid]) mpegts_close_filter(ts, ts->pids[pid]); //wrongly added sdt filter probably
pes = add_pes_stream(ts, pid, pcr_pid);
if (pes)
st = av_new_stream(pes->stream, pes->pid);
}
if (!st)
return;
if (!pes->stream_type)
mpegts_set_stream_info(st, pes, stream_type, prog_reg_desc);
add_pid_to_pmt(ts, h->id, pid);
ff_program_add_stream_index(ts->stream, h->id, st->index);
desc_list_len = get16(&p, p_end) & 0xfff;
if (desc_list_len < 0)
break;
desc_list_end = p + desc_list_len;
if (desc_list_end > p_end)
break;
for(;;) {
desc_tag = get8(&p, desc_list_end);
if (desc_tag < 0)
break;
desc_len = get8(&p, desc_list_end);
if (desc_len < 0)
break;
desc_end = p + desc_len;
if (desc_end > desc_list_end)
break;
dprintf(ts->stream, "tag: 0x%02x len=%d\n",
desc_tag, desc_len);
if (st->codec->codec_id == CODEC_ID_NONE &&
stream_type == STREAM_TYPE_PRIVATE_DATA)
mpegts_find_stream_type(st, desc_tag, DESC_types);
switch(desc_tag) {
case 0x56: /* DVB teletext descriptor */
language[0] = get8(&p, desc_end);
language[1] = get8(&p, desc_end);
language[2] = get8(&p, desc_end);
language[3] = 0;
av_metadata_set2(&st->metadata, "language", language, 0);
break;
case 0x59: /* subtitling descriptor */
language[0] = get8(&p, desc_end);
language[1] = get8(&p, desc_end);
language[2] = get8(&p, desc_end);
language[3] = 0;
get8(&p, desc_end);
comp_page = get16(&p, desc_end);
anc_page = get16(&p, desc_end);
st->codec->sub_id = (anc_page << 16) | comp_page;
av_metadata_set2(&st->metadata, "language", language, 0);
break;
case 0x0a: /* ISO 639 language descriptor */
language[0] = get8(&p, desc_end);
language[1] = get8(&p, desc_end);
language[2] = get8(&p, desc_end);
language[3] = 0;
av_metadata_set2(&st->metadata, "language", language, 0);
break;
case 0x05: /* registration descriptor */
st->codec->codec_tag = bytestream_get_le32(&p);
dprintf(ts->stream, "reg_desc=%.4s\n", (char*)&st->codec->codec_tag);
if (st->codec->codec_id == CODEC_ID_NONE &&
stream_type == STREAM_TYPE_PRIVATE_DATA)
mpegts_find_stream_type(st, st->codec->codec_tag, REGD_types);
break;
default:
break;
}
p = desc_end;
if (prog_reg_desc == AV_RL32("HDMV") && stream_type == 0x83 && pes->sub_st) {
ff_program_add_stream_index(ts->stream, h->id, pes->sub_st->index);
pes->sub_st->codec->codec_tag = st->codec->codec_tag;
}
}
p = desc_list_end;
}
/* all parameters are there */
mpegts_close_filter(ts, filter);
}
static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h1, *h = &h1;
const uint8_t *p, *p_end;
int sid, pmt_pid;
#ifdef DEBUG
dprintf(ts->stream, "PAT:\n");
av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
if (h->tid != PAT_TID)
return;
clear_programs(ts);
for(;;) {
sid = get16(&p, p_end);
if (sid < 0)
break;
pmt_pid = get16(&p, p_end) & 0x1fff;
if (pmt_pid < 0)
break;
dprintf(ts->stream, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
if (sid == 0x0000) {
/* NIT info */
} else {
av_new_program(ts->stream, sid);
mpegts_open_section_filter(ts, pmt_pid, pmt_cb, ts, 1);
add_pat_entry(ts, sid);
add_pid_to_pmt(ts, sid, 0); //add pat pid to program
add_pid_to_pmt(ts, sid, pmt_pid);
}
}
}
static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
{
MpegTSContext *ts = filter->u.section_filter.opaque;
SectionHeader h1, *h = &h1;
const uint8_t *p, *p_end, *desc_list_end, *desc_end;
int onid, val, sid, desc_list_len, desc_tag, desc_len, service_type;
char *name, *provider_name;
#ifdef DEBUG
dprintf(ts->stream, "SDT:\n");
av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
if (h->tid != SDT_TID)
return;
onid = get16(&p, p_end);
if (onid < 0)
return;
val = get8(&p, p_end);
if (val < 0)
return;
for(;;) {
sid = get16(&p, p_end);
if (sid < 0)
break;
val = get8(&p, p_end);
if (val < 0)
break;
desc_list_len = get16(&p, p_end) & 0xfff;
if (desc_list_len < 0)
break;
desc_list_end = p + desc_list_len;
if (desc_list_end > p_end)
break;
for(;;) {
desc_tag = get8(&p, desc_list_end);
if (desc_tag < 0)
break;
desc_len = get8(&p, desc_list_end);
desc_end = p + desc_len;
if (desc_end > desc_list_end)
break;
dprintf(ts->stream, "tag: 0x%02x len=%d\n",
desc_tag, desc_len);
switch(desc_tag) {
case 0x48:
service_type = get8(&p, p_end);
if (service_type < 0)
break;
provider_name = getstr8(&p, p_end);
if (!provider_name)
break;
name = getstr8(&p, p_end);
if (name) {
AVProgram *program = av_new_program(ts->stream, sid);
if(program) {
av_metadata_set2(&program->metadata, "name", name, 0);
av_metadata_set2(&program->metadata, "provider_name", provider_name, 0);
}
}
av_free(name);
av_free(provider_name);
break;
default:
break;
}
p = desc_end;
}
p = desc_list_end;
}
}
/* handle one TS packet */
static int handle_packet(MpegTSContext *ts, const uint8_t *packet)
{
AVFormatContext *s = ts->stream;
MpegTSFilter *tss;
int len, pid, cc, cc_ok, afc, is_start;
const uint8_t *p, *p_end;
int64_t pos;
pid = AV_RB16(packet + 1) & 0x1fff;
if(pid && discard_pid(ts, pid))
return 0;
is_start = packet[1] & 0x40;
tss = ts->pids[pid];
if (ts->auto_guess && tss == NULL && is_start) {
add_pes_stream(ts, pid, -1);
tss = ts->pids[pid];
}
if (!tss)
return 0;
/* continuity check (currently not used) */
cc = (packet[3] & 0xf);
cc_ok = (tss->last_cc < 0) || ((((tss->last_cc + 1) & 0x0f) == cc));
tss->last_cc = cc;
/* skip adaptation field */
afc = (packet[3] >> 4) & 3;
p = packet + 4;
if (afc == 0) /* reserved value */
return 0;
if (afc == 2) /* adaptation field only */
return 0;
if (afc == 3) {
/* skip adapation field */
p += p[0] + 1;
}
/* if past the end of packet, ignore */
p_end = packet + TS_PACKET_SIZE;
if (p >= p_end)
return 0;
pos = url_ftell(ts->stream->pb);
ts->pos47= pos % ts->raw_packet_size;
if (tss->type == MPEGTS_SECTION) {
if (is_start) {
/* pointer field present */
len = *p++;
if (p + len > p_end)
return 0;
if (len && cc_ok) {
/* write remaining section bytes */
write_section_data(s, tss,
p, len, 0);
/* check whether filter has been closed */
if (!ts->pids[pid])
return 0;
}
p += len;
if (p < p_end) {
write_section_data(s, tss,
p, p_end - p, 1);
}
} else {
if (cc_ok) {
write_section_data(s, tss,
p, p_end - p, 0);
}
}
} else {
int ret;
// Note: The position here points actually behind the current packet.
if ((ret = tss->u.pes_filter.pes_cb(tss, p, p_end - p, is_start,
pos - ts->raw_packet_size)) < 0)
return ret;
}
return 0;
}
/* XXX: try to find a better synchro over several packets (use
get_packet_size() ?) */
static int mpegts_resync(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
int c, i;
for(i = 0;i < MAX_RESYNC_SIZE; i++) {
c = url_fgetc(pb);
if (c < 0)
return -1;
if (c == 0x47) {
url_fseek(pb, -1, SEEK_CUR);
return 0;
}
}
av_log(s, AV_LOG_ERROR, "max resync size reached, could not find sync byte\n");
/* no sync found */
return -1;
}
/* return -1 if error or EOF. Return 0 if OK. */
static int read_packet(AVFormatContext *s, uint8_t *buf, int raw_packet_size)
{
ByteIOContext *pb = s->pb;
int skip, len;
for(;;) {
len = get_buffer(pb, buf, TS_PACKET_SIZE);
if (len != TS_PACKET_SIZE)
return AVERROR(EIO);
/* check paquet sync byte */
if (buf[0] != 0x47) {
/* find a new packet start */
url_fseek(pb, -TS_PACKET_SIZE, SEEK_CUR);
if (mpegts_resync(s) < 0)
return AVERROR(EAGAIN);
else
continue;
} else {
skip = raw_packet_size - TS_PACKET_SIZE;
if (skip > 0)
url_fskip(pb, skip);
break;
}
}
return 0;
}
static int handle_packets(MpegTSContext *ts, int nb_packets)
{
AVFormatContext *s = ts->stream;
uint8_t packet[TS_PACKET_SIZE];
int packet_num, ret;
ts->stop_parse = 0;
packet_num = 0;
for(;;) {
if (ts->stop_parse>0)
break;
packet_num++;
if (nb_packets != 0 && packet_num >= nb_packets)
break;
ret = read_packet(s, packet, ts->raw_packet_size);
if (ret != 0)
return ret;
ret = handle_packet(ts, packet);
if (ret != 0)
return ret;
}
return 0;
}
static int mpegts_probe(AVProbeData *p)
{
#if 1
const int size= p->buf_size;
int score, fec_score, dvhs_score;
int check_count= size / TS_FEC_PACKET_SIZE;
#define CHECK_COUNT 10
if (check_count < CHECK_COUNT)
return -1;
score = analyze(p->buf, TS_PACKET_SIZE *check_count, TS_PACKET_SIZE , NULL)*CHECK_COUNT/check_count;
dvhs_score= analyze(p->buf, TS_DVHS_PACKET_SIZE*check_count, TS_DVHS_PACKET_SIZE, NULL)*CHECK_COUNT/check_count;
fec_score = analyze(p->buf, TS_FEC_PACKET_SIZE *check_count, TS_FEC_PACKET_SIZE , NULL)*CHECK_COUNT/check_count;
// av_log(NULL, AV_LOG_DEBUG, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score);
// we need a clear definition for the returned score otherwise things will become messy sooner or later
if (score > fec_score && score > dvhs_score && score > 6) return AVPROBE_SCORE_MAX + score - CHECK_COUNT;
else if(dvhs_score > score && dvhs_score > fec_score && dvhs_score > 6) return AVPROBE_SCORE_MAX + dvhs_score - CHECK_COUNT;
else if( fec_score > 6) return AVPROBE_SCORE_MAX + fec_score - CHECK_COUNT;
else return -1;
#else
/* only use the extension for safer guess */
if (av_match_ext(p->filename, "ts"))
return AVPROBE_SCORE_MAX;
else
return 0;
#endif
}
/* return the 90kHz PCR and the extension for the 27MHz PCR. return
(-1) if not available */
static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
const uint8_t *packet)
{
int afc, len, flags;
const uint8_t *p;
unsigned int v;
afc = (packet[3] >> 4) & 3;
if (afc <= 1)
return -1;
p = packet + 4;
len = p[0];
p++;
if (len == 0)
return -1;
flags = *p++;
len--;
if (!(flags & 0x10))
return -1;
if (len < 6)
return -1;
v = AV_RB32(p);
*ppcr_high = ((int64_t)v << 1) | (p[4] >> 7);
*ppcr_low = ((p[4] & 1) << 8) | p[5];
return 0;
}
static int mpegts_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
MpegTSContext *ts = s->priv_data;
ByteIOContext *pb = s->pb;
uint8_t buf[5*1024];
int len;
int64_t pos;
if (ap) {
ts->mpeg2ts_compute_pcr = ap->mpeg2ts_compute_pcr;
if(ap->mpeg2ts_raw){
av_log(s, AV_LOG_ERROR, "use mpegtsraw_demuxer!\n");
return -1;
}
}
/* read the first 1024 bytes to get packet size */
pos = url_ftell(pb);
len = get_buffer(pb, buf, sizeof(buf));
if (len != sizeof(buf))
goto fail;
ts->raw_packet_size = get_packet_size(buf, sizeof(buf));
if (ts->raw_packet_size <= 0)
goto fail;
ts->stream = s;
ts->auto_guess = 0;
if (s->iformat == &mpegts_demuxer) {
/* normal demux */
/* first do a scaning to get all the services */
url_fseek(pb, pos, SEEK_SET);
mpegts_open_section_filter(ts, SDT_PID, sdt_cb, ts, 1);
mpegts_open_section_filter(ts, PAT_PID, pat_cb, ts, 1);
handle_packets(ts, s->probesize / ts->raw_packet_size);
/* if could not find service, enable auto_guess */
ts->auto_guess = 1;
dprintf(ts->stream, "tuning done\n");
s->ctx_flags |= AVFMTCTX_NOHEADER;
} else {
AVStream *st;
int pcr_pid, pid, nb_packets, nb_pcrs, ret, pcr_l;
int64_t pcrs[2], pcr_h;
int packet_count[2];
uint8_t packet[TS_PACKET_SIZE];
/* only read packets */
st = av_new_stream(s, 0);
if (!st)
goto fail;
av_set_pts_info(st, 60, 1, 27000000);
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = CODEC_ID_MPEG2TS;
/* we iterate until we find two PCRs to estimate the bitrate */
pcr_pid = -1;
nb_pcrs = 0;
nb_packets = 0;
for(;;) {
ret = read_packet(s, packet, ts->raw_packet_size);
if (ret < 0)
return -1;
pid = AV_RB16(packet + 1) & 0x1fff;
if ((pcr_pid == -1 || pcr_pid == pid) &&
parse_pcr(&pcr_h, &pcr_l, packet) == 0) {
pcr_pid = pid;
packet_count[nb_pcrs] = nb_packets;
pcrs[nb_pcrs] = pcr_h * 300 + pcr_l;
nb_pcrs++;
if (nb_pcrs >= 2)
break;
}
nb_packets++;
}
/* NOTE1: the bitrate is computed without the FEC */
/* NOTE2: it is only the bitrate of the start of the stream */
ts->pcr_incr = (pcrs[1] - pcrs[0]) / (packet_count[1] - packet_count[0]);
ts->cur_pcr = pcrs[0] - ts->pcr_incr * packet_count[0];
s->bit_rate = (TS_PACKET_SIZE * 8) * 27e6 / ts->pcr_incr;
st->codec->bit_rate = s->bit_rate;
st->start_time = ts->cur_pcr;
#if 0
av_log(ts->stream, AV_LOG_DEBUG, "start=%0.3f pcr=%0.3f incr=%d\n",
st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr);
#endif
}
url_fseek(pb, pos, SEEK_SET);
return 0;
fail:
return -1;
}
#define MAX_PACKET_READAHEAD ((128 * 1024) / 188)
static int mpegts_raw_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MpegTSContext *ts = s->priv_data;
int ret, i;
int64_t pcr_h, next_pcr_h, pos;
int pcr_l, next_pcr_l;
uint8_t pcr_buf[12];
if (av_new_packet(pkt, TS_PACKET_SIZE) < 0)
return AVERROR(ENOMEM);
pkt->pos= url_ftell(s->pb);
ret = read_packet(s, pkt->data, ts->raw_packet_size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (ts->mpeg2ts_compute_pcr) {
/* compute exact PCR for each packet */
if (parse_pcr(&pcr_h, &pcr_l, pkt->data) == 0) {
/* we read the next PCR (XXX: optimize it by using a bigger buffer */
pos = url_ftell(s->pb);
for(i = 0; i < MAX_PACKET_READAHEAD; i++) {
url_fseek(s->pb, pos + i * ts->raw_packet_size, SEEK_SET);
get_buffer(s->pb, pcr_buf, 12);
if (parse_pcr(&next_pcr_h, &next_pcr_l, pcr_buf) == 0) {
/* XXX: not precise enough */
ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
(i + 1);
break;
}
}
url_fseek(s->pb, pos, SEEK_SET);
/* no next PCR found: we use previous increment */
ts->cur_pcr = pcr_h * 300 + pcr_l;
}
pkt->pts = ts->cur_pcr;
pkt->duration = ts->pcr_incr;
ts->cur_pcr += ts->pcr_incr;
}
pkt->stream_index = 0;
return 0;
}
static int mpegts_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MpegTSContext *ts = s->priv_data;
int ret, i;
if (url_ftell(s->pb) != ts->last_pos) {
/* seek detected, flush pes buffer */
for (i = 0; i < NB_PID_MAX; i++) {
if (ts->pids[i] && ts->pids[i]->type == MPEGTS_PES) {
PESContext *pes = ts->pids[i]->u.pes_filter.opaque;
av_freep(&pes->buffer);
pes->data_index = 0;
pes->state = MPEGTS_SKIP; /* skip until pes header */
}
}
}
ts->pkt = pkt;
ret = handle_packets(ts, 0);
if (ret < 0) {
/* flush pes data left */
for (i = 0; i < NB_PID_MAX; i++) {
if (ts->pids[i] && ts->pids[i]->type == MPEGTS_PES) {
PESContext *pes = ts->pids[i]->u.pes_filter.opaque;
if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) {
new_pes_packet(pes, pkt);
pes->state = MPEGTS_SKIP;
ret = 0;
break;
}
}
}
}
ts->last_pos = url_ftell(s->pb);
return ret;
}
static int mpegts_read_close(AVFormatContext *s)
{
MpegTSContext *ts = s->priv_data;
int i;
clear_programs(ts);
for(i=0;i<NB_PID_MAX;i++)
if (ts->pids[i]) mpegts_close_filter(ts, ts->pids[i]);
return 0;
}
static int64_t mpegts_get_pcr(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
MpegTSContext *ts = s->priv_data;
int64_t pos, timestamp;
uint8_t buf[TS_PACKET_SIZE];
int pcr_l, pcr_pid = ((PESContext*)s->streams[stream_index]->priv_data)->pcr_pid;
const int find_next= 1;
pos = ((*ppos + ts->raw_packet_size - 1 - ts->pos47) / ts->raw_packet_size) * ts->raw_packet_size + ts->pos47;
if (find_next) {
for(;;) {
url_fseek(s->pb, pos, SEEK_SET);
if (get_buffer(s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
return AV_NOPTS_VALUE;
if ((pcr_pid < 0 || (AV_RB16(buf + 1) & 0x1fff) == pcr_pid) &&
parse_pcr(×tamp, &pcr_l, buf) == 0) {
break;
}
pos += ts->raw_packet_size;
}
} else {
for(;;) {
pos -= ts->raw_packet_size;
if (pos < 0)
return AV_NOPTS_VALUE;
url_fseek(s->pb, pos, SEEK_SET);
if (get_buffer(s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
return AV_NOPTS_VALUE;
if ((pcr_pid < 0 || (AV_RB16(buf + 1) & 0x1fff) == pcr_pid) &&
parse_pcr(×tamp, &pcr_l, buf) == 0) {
break;
}
}
}
*ppos = pos;
return timestamp;
}
#ifdef USE_SYNCPOINT_SEARCH
static int read_seek2(AVFormatContext *s,
int stream_index,
int64_t min_ts,
int64_t target_ts,
int64_t max_ts,
int flags)
{
int64_t pos;
int64_t ts_ret, ts_adj;
int stream_index_gen_search;
AVStream *st;
AVParserState *backup;
backup = ff_store_parser_state(s);
// detect direction of seeking for search purposes
flags |= (target_ts - min_ts > (uint64_t)(max_ts - target_ts)) ?
AVSEEK_FLAG_BACKWARD : 0;
if (flags & AVSEEK_FLAG_BYTE) {
// use position directly, we will search starting from it
pos = target_ts;
} else {
// search for some position with good timestamp match
if (stream_index < 0) {
stream_index_gen_search = av_find_default_stream_index(s);
if (stream_index_gen_search < 0) {
ff_restore_parser_state(s, backup);
return -1;
}
st = s->streams[stream_index_gen_search];
// timestamp for default must be expressed in AV_TIME_BASE units
ts_adj = av_rescale(target_ts,
st->time_base.den,
AV_TIME_BASE * (int64_t)st->time_base.num);
} else {
ts_adj = target_ts;
stream_index_gen_search = stream_index;
}
pos = av_gen_search(s, stream_index_gen_search, ts_adj,
0, INT64_MAX, -1,
AV_NOPTS_VALUE,
AV_NOPTS_VALUE,
flags, &ts_ret, mpegts_get_pcr);
if (pos < 0) {
ff_restore_parser_state(s, backup);
return -1;
}
}
// search for actual matching keyframe/starting position for all streams
if (ff_gen_syncpoint_search(s, stream_index, pos,
min_ts, target_ts, max_ts,
flags) < 0) {
ff_restore_parser_state(s, backup);
return -1;
}
ff_free_parser_state(s, backup);
return 0;
}
static int read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
{
int ret;
if (flags & AVSEEK_FLAG_BACKWARD) {
flags &= ~AVSEEK_FLAG_BACKWARD;
ret = read_seek2(s, stream_index, INT64_MIN, target_ts, target_ts, flags);
if (ret < 0)
// for compatibility reasons, seek to the best-fitting timestamp
ret = read_seek2(s, stream_index, INT64_MIN, target_ts, INT64_MAX, flags);
} else {
ret = read_seek2(s, stream_index, target_ts, target_ts, INT64_MAX, flags);
if (ret < 0)
// for compatibility reasons, seek to the best-fitting timestamp
ret = read_seek2(s, stream_index, INT64_MIN, target_ts, INT64_MAX, flags);
}
return ret;
}
#else
static int read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
MpegTSContext *ts = s->priv_data;
uint8_t buf[TS_PACKET_SIZE];
int64_t pos;
if(av_seek_frame_binary(s, stream_index, target_ts, flags) < 0)
return -1;
pos= url_ftell(s->pb);
for(;;) {
url_fseek(s->pb, pos, SEEK_SET);
if (get_buffer(s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
return -1;
// pid = AV_RB16(buf + 1) & 0x1fff;
if(buf[1] & 0x40) break;
pos += ts->raw_packet_size;
}
url_fseek(s->pb, pos, SEEK_SET);
return 0;
}
#endif
/**************************************************************/
/* parsing functions - called from other demuxers such as RTP */
MpegTSContext *ff_mpegts_parse_open(AVFormatContext *s)
{
MpegTSContext *ts;
ts = av_mallocz(sizeof(MpegTSContext));
if (!ts)
return NULL;
/* no stream case, currently used by RTP */
ts->raw_packet_size = TS_PACKET_SIZE;
ts->stream = s;
ts->auto_guess = 1;
return ts;
}
/* return the consumed length if a packet was output, or -1 if no
packet is output */
int ff_mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
const uint8_t *buf, int len)
{
int len1;
len1 = len;
ts->pkt = pkt;
ts->stop_parse = 0;
for(;;) {
if (ts->stop_parse>0)
break;
if (len < TS_PACKET_SIZE)
return -1;
if (buf[0] != 0x47) {
buf++;
len--;
} else {
handle_packet(ts, buf);
buf += TS_PACKET_SIZE;
len -= TS_PACKET_SIZE;
}
}
return len1 - len;
}
void ff_mpegts_parse_close(MpegTSContext *ts)
{
int i;
for(i=0;i<NB_PID_MAX;i++)
av_free(ts->pids[i]);
av_free(ts);
}
AVInputFormat mpegts_demuxer = {
"mpegts",
NULL_IF_CONFIG_SMALL("MPEG-2 transport stream format"),
sizeof(MpegTSContext),
mpegts_probe,
mpegts_read_header,
mpegts_read_packet,
mpegts_read_close,
read_seek,
mpegts_get_pcr,
.flags = AVFMT_SHOW_IDS|AVFMT_TS_DISCONT,
#ifdef USE_SYNCPOINT_SEARCH
.read_seek2 = read_seek2,
#endif
};
AVInputFormat mpegtsraw_demuxer = {
"mpegtsraw",
NULL_IF_CONFIG_SMALL("MPEG-2 raw transport stream format"),
sizeof(MpegTSContext),
NULL,
mpegts_read_header,
mpegts_raw_read_packet,
mpegts_read_close,
read_seek,
mpegts_get_pcr,
.flags = AVFMT_SHOW_IDS|AVFMT_TS_DISCONT,
#ifdef USE_SYNCPOINT_SEARCH
.read_seek2 = read_seek2,
#endif
};
| 123linslouis-android-video-cutter | jni/libavformat/mpegts.c | C | asf20 | 55,293 |
/*
* RTP H264 Protocol (RFC3984)
* Copyright (c) 2006 Ryan Martell
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @brief H.264 / RTP Code (RFC3984)
* @author Ryan Martell <rdm4@martellventures.com>
*
* @note Notes:
* Notes:
* This currently supports packetization mode:
* Single Nal Unit Mode (0), or
* Non-Interleaved Mode (1). It currently does not support
* Interleaved Mode (2). (This requires implementing STAP-B, MTAP16, MTAP24, FU-B packet types)
*
* @note TODO:
* 1) RTCP sender reports for udp streams are required..
*
*/
#include "libavutil/base64.h"
#include "libavutil/avstring.h"
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "mpegts.h"
#include <unistd.h>
#include "network.h"
#include <assert.h>
#include "rtpdec.h"
#include "rtpdec_h264.h"
/**
RTP/H264 specific private data.
*/
struct PayloadContext {
unsigned long cookie; ///< sanity check, to make sure we get the pointer we're expecting.
//sdp setup parameters
uint8_t profile_idc; ///< from the sdp setup parameters.
uint8_t profile_iop; ///< from the sdp setup parameters.
uint8_t level_idc; ///< from the sdp setup parameters.
int packetization_mode; ///< from the sdp setup parameters.
#ifdef DEBUG
int packet_types_received[32];
#endif
};
#define MAGIC_COOKIE (0xdeadbeef) ///< Cookie for the extradata; to verify we are what we think we are, and that we haven't been freed.
#define DEAD_COOKIE (0xdeaddead) ///< Cookie for the extradata; once it is freed.
/* ---------------- private code */
static void sdp_parse_fmtp_config_h264(AVStream * stream,
PayloadContext * h264_data,
char *attr, char *value)
{
AVCodecContext *codec = stream->codec;
assert(codec->codec_id == CODEC_ID_H264);
assert(h264_data != NULL);
if (!strcmp(attr, "packetization-mode")) {
av_log(codec, AV_LOG_DEBUG, "RTP Packetization Mode: %d\n", atoi(value));
h264_data->packetization_mode = atoi(value);
/*
Packetization Mode:
0 or not present: Single NAL mode (Only nals from 1-23 are allowed)
1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed.
2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.
*/
if (h264_data->packetization_mode > 1)
av_log(codec, AV_LOG_ERROR,
"Interleaved RTP mode is not supported yet.");
} else if (!strcmp(attr, "profile-level-id")) {
if (strlen(value) == 6) {
char buffer[3];
// 6 characters=3 bytes, in hex.
uint8_t profile_idc;
uint8_t profile_iop;
uint8_t level_idc;
buffer[0] = value[0]; buffer[1] = value[1]; buffer[2] = '\0';
profile_idc = strtol(buffer, NULL, 16);
buffer[0] = value[2]; buffer[1] = value[3];
profile_iop = strtol(buffer, NULL, 16);
buffer[0] = value[4]; buffer[1] = value[5];
level_idc = strtol(buffer, NULL, 16);
// set the parameters...
av_log(codec, AV_LOG_DEBUG,
"RTP Profile IDC: %x Profile IOP: %x Level: %x\n",
profile_idc, profile_iop, level_idc);
h264_data->profile_idc = profile_idc;
h264_data->profile_iop = profile_iop;
h264_data->level_idc = level_idc;
}
} else if (!strcmp(attr, "sprop-parameter-sets")) {
uint8_t start_sequence[]= { 0, 0, 1 };
codec->extradata_size= 0;
codec->extradata= NULL;
while (*value) {
char base64packet[1024];
uint8_t decoded_packet[1024];
uint32_t packet_size;
char *dst = base64packet;
while (*value && *value != ','
&& (dst - base64packet) < sizeof(base64packet) - 1) {
*dst++ = *value++;
}
*dst++ = '\0';
if (*value == ',')
value++;
packet_size= av_base64_decode(decoded_packet, base64packet, sizeof(decoded_packet));
if (packet_size) {
uint8_t *dest = av_malloc(packet_size + sizeof(start_sequence) +
codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if(dest)
{
if(codec->extradata_size)
{
// av_realloc?
memcpy(dest, codec->extradata, codec->extradata_size);
av_free(codec->extradata);
}
memcpy(dest+codec->extradata_size, start_sequence, sizeof(start_sequence));
memcpy(dest+codec->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
memset(dest+codec->extradata_size+sizeof(start_sequence)+
packet_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
codec->extradata= dest;
codec->extradata_size+= sizeof(start_sequence)+packet_size;
} else {
av_log(codec, AV_LOG_ERROR, "Unable to allocate memory for extradata!");
}
}
}
av_log(codec, AV_LOG_DEBUG, "Extradata set to %p (size: %d)!", codec->extradata, codec->extradata_size);
}
}
// return 0 on packet, no more left, 1 on packet, 1 on partial packet...
static int h264_handle_packet(AVFormatContext *ctx,
PayloadContext *data,
AVStream *st,
AVPacket * pkt,
uint32_t * timestamp,
const uint8_t * buf,
int len, int flags)
{
uint8_t nal = buf[0];
uint8_t type = (nal & 0x1f);
int result= 0;
uint8_t start_sequence[]= {0, 0, 1};
#ifdef DEBUG
assert(data);
assert(data->cookie == MAGIC_COOKIE);
#endif
assert(buf);
if (type >= 1 && type <= 23)
type = 1; // simplify the case. (these are all the nal types used internally by the h264 codec)
switch (type) {
case 0: // undefined;
result= -1;
break;
case 1:
av_new_packet(pkt, len+sizeof(start_sequence));
memcpy(pkt->data, start_sequence, sizeof(start_sequence));
memcpy(pkt->data+sizeof(start_sequence), buf, len);
#ifdef DEBUG
data->packet_types_received[nal & 0x1f]++;
#endif
break;
case 24: // STAP-A (one packet, multiple nals)
// consume the STAP-A NAL
buf++;
len--;
// first we are going to figure out the total size....
{
int pass= 0;
int total_length= 0;
uint8_t *dst= NULL;
for(pass= 0; pass<2; pass++) {
const uint8_t *src= buf;
int src_len= len;
do {
uint16_t nal_size = AV_RB16(src); // this going to be a problem if unaligned (can it be?)
// consume the length of the aggregate...
src += 2;
src_len -= 2;
if (nal_size <= src_len) {
if(pass==0) {
// counting...
total_length+= sizeof(start_sequence)+nal_size;
} else {
// copying
assert(dst);
memcpy(dst, start_sequence, sizeof(start_sequence));
dst+= sizeof(start_sequence);
memcpy(dst, src, nal_size);
#ifdef DEBUG
data->packet_types_received[*src & 0x1f]++;
#endif
dst+= nal_size;
}
} else {
av_log(ctx, AV_LOG_ERROR,
"nal size exceeds length: %d %d\n", nal_size, src_len);
}
// eat what we handled...
src += nal_size;
src_len -= nal_size;
if (src_len < 0)
av_log(ctx, AV_LOG_ERROR,
"Consumed more bytes than we got! (%d)\n", src_len);
} while (src_len > 2); // because there could be rtp padding..
if(pass==0) {
// now we know the total size of the packet (with the start sequences added)
av_new_packet(pkt, total_length);
dst= pkt->data;
} else {
assert(dst-pkt->data==total_length);
}
}
}
break;
case 25: // STAP-B
case 26: // MTAP-16
case 27: // MTAP-24
case 29: // FU-B
av_log(ctx, AV_LOG_ERROR,
"Unhandled type (%d) (See RFC for implementation details\n",
type);
result= -1;
break;
case 28: // FU-A (fragmented nal)
buf++;
len--; // skip the fu_indicator
{
// these are the same as above, we just redo them here for clarity...
uint8_t fu_indicator = nal;
uint8_t fu_header = *buf; // read the fu_header.
uint8_t start_bit = fu_header >> 7;
// uint8_t end_bit = (fu_header & 0x40) >> 6;
uint8_t nal_type = (fu_header & 0x1f);
uint8_t reconstructed_nal;
// reconstruct this packet's true nal; only the data follows..
reconstructed_nal = fu_indicator & (0xe0); // the original nal forbidden bit and NRI are stored in this packet's nal;
reconstructed_nal |= nal_type;
// skip the fu_header...
buf++;
len--;
#ifdef DEBUG
if (start_bit)
data->packet_types_received[nal_type]++;
#endif
if(start_bit) {
// copy in the start sequence, and the reconstructed nal....
av_new_packet(pkt, sizeof(start_sequence)+sizeof(nal)+len);
memcpy(pkt->data, start_sequence, sizeof(start_sequence));
pkt->data[sizeof(start_sequence)]= reconstructed_nal;
memcpy(pkt->data+sizeof(start_sequence)+sizeof(nal), buf, len);
} else {
av_new_packet(pkt, len);
memcpy(pkt->data, buf, len);
}
}
break;
case 30: // undefined
case 31: // undefined
default:
av_log(ctx, AV_LOG_ERROR, "Undefined type (%d)", type);
result= -1;
break;
}
pkt->stream_index = st->index;
return result;
}
/* ---------------- public code */
static PayloadContext *h264_new_context(void)
{
PayloadContext *data =
av_mallocz(sizeof(PayloadContext) +
FF_INPUT_BUFFER_PADDING_SIZE);
if (data) {
data->cookie = MAGIC_COOKIE;
}
return data;
}
static void h264_free_context(PayloadContext *data)
{
#ifdef DEBUG
int ii;
for (ii = 0; ii < 32; ii++) {
if (data->packet_types_received[ii])
av_log(NULL, AV_LOG_DEBUG, "Received %d packets of type %d\n",
data->packet_types_received[ii], ii);
}
#endif
assert(data);
assert(data->cookie == MAGIC_COOKIE);
// avoid stale pointers (assert)
data->cookie = DEAD_COOKIE;
// and clear out this...
av_free(data);
}
static int parse_h264_sdp_line(AVFormatContext *s, int st_index,
PayloadContext *h264_data, const char *line)
{
AVStream *stream = s->streams[st_index];
AVCodecContext *codec = stream->codec;
const char *p = line;
assert(h264_data->cookie == MAGIC_COOKIE);
if (av_strstart(p, "framesize:", &p)) {
char buf1[50];
char *dst = buf1;
// remove the protocol identifier..
while (*p && *p == ' ') p++; // strip spaces.
while (*p && *p != ' ') p++; // eat protocol identifier
while (*p && *p == ' ') p++; // strip trailing spaces.
while (*p && *p != '-' && (dst - buf1) < sizeof(buf1) - 1) {
*dst++ = *p++;
}
*dst = '\0';
// a='framesize:96 320-240'
// set our parameters..
codec->width = atoi(buf1);
codec->height = atoi(p + 1); // skip the -
codec->pix_fmt = PIX_FMT_YUV420P;
} else if (av_strstart(p, "fmtp:", &p)) {
char attr[256];
char value[4096];
// remove the protocol identifier..
while (*p && *p == ' ') p++; // strip spaces.
while (*p && *p != ' ') p++; // eat protocol identifier
while (*p && *p == ' ') p++; // strip trailing spaces.
/* loop on each attribute */
while (ff_rtsp_next_attr_and_value
(&p, attr, sizeof(attr), value, sizeof(value))) {
/* grab the codec extra_data from the config parameter of the fmtp line */
sdp_parse_fmtp_config_h264(stream, h264_data, attr, value);
}
} else if (av_strstart(p, "cliprect:", &p)) {
// could use this if we wanted.
}
av_set_pts_info(stream, 33, 1, 90000); // 33 should be right, because the pts is 64 bit? (done elsewhere; this is a one time thing)
return 0; // keep processing it the normal way...
}
/**
This is the structure for expanding on the dynamic rtp protocols (makes everything static. yay!)
*/
RTPDynamicProtocolHandler ff_h264_dynamic_handler = {
.enc_name = "H264",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = CODEC_ID_H264,
.parse_sdp_a_line = parse_h264_sdp_line,
.open = h264_new_context,
.close = h264_free_context,
.parse_packet = h264_handle_packet
};
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec_h264.c | C | asf20 | 14,924 |
/*
* HTTP protocol for ffmpeg client
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avstring.h"
#include "avformat.h"
#include <unistd.h>
#include <strings.h>
#include "internal.h"
#include "network.h"
#include "os_support.h"
#include "httpauth.h"
/* XXX: POST protocol is not completely implemented because ffmpeg uses
only a subset of it. */
/* used for protocol handling */
#define BUFFER_SIZE 1024
#define URL_SIZE 4096
#define MAX_REDIRECTS 8
typedef struct {
URLContext *hd;
unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end;
int line_count;
int http_code;
int64_t chunksize; /**< Used if "Transfer-Encoding: chunked" otherwise -1. */
int64_t off, filesize;
char location[URL_SIZE];
HTTPAuthState auth_state;
} HTTPContext;
static int http_connect(URLContext *h, const char *path, const char *hoststr,
const char *auth, int *new_location);
static int http_write(URLContext *h, uint8_t *buf, int size);
/* return non zero if error */
static int http_open_cnx(URLContext *h)
{
const char *path, *proxy_path;
char hostname[1024], hoststr[1024];
char auth[1024];
char path1[1024];
char buf[1024];
int port, use_proxy, err, location_changed = 0, redirects = 0;
HTTPAuthType cur_auth_type;
HTTPContext *s = h->priv_data;
URLContext *hd = NULL;
proxy_path = getenv("http_proxy");
use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
av_strstart(proxy_path, "http://", NULL);
/* fill the dest addr */
redo:
/* needed in any case to build the host string */
ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
path1, sizeof(path1), s->location);
ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);
if (use_proxy) {
ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
NULL, 0, proxy_path);
path = s->location;
} else {
if (path1[0] == '\0')
path = "/";
else
path = path1;
}
if (port < 0)
port = 80;
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
err = url_open(&hd, buf, URL_RDWR);
if (err < 0)
goto fail;
s->hd = hd;
cur_auth_type = s->auth_state.auth_type;
if (http_connect(h, path, hoststr, auth, &location_changed) < 0)
goto fail;
if (s->http_code == 401) {
if (cur_auth_type == HTTP_AUTH_NONE && s->auth_state.auth_type != HTTP_AUTH_NONE) {
url_close(hd);
goto redo;
} else
goto fail;
}
if ((s->http_code == 302 || s->http_code == 303) && location_changed == 1) {
/* url moved, get next */
url_close(hd);
if (redirects++ >= MAX_REDIRECTS)
return AVERROR(EIO);
location_changed = 0;
goto redo;
}
return 0;
fail:
if (hd)
url_close(hd);
return AVERROR(EIO);
}
static int http_open(URLContext *h, const char *uri, int flags)
{
HTTPContext *s;
int ret;
h->is_streamed = 1;
s = av_malloc(sizeof(HTTPContext));
if (!s) {
return AVERROR(ENOMEM);
}
h->priv_data = s;
s->filesize = -1;
s->chunksize = -1;
s->off = 0;
memset(&s->auth_state, 0, sizeof(s->auth_state));
av_strlcpy(s->location, uri, URL_SIZE);
ret = http_open_cnx(h);
if (ret != 0)
av_free (s);
return ret;
}
static int http_getc(HTTPContext *s)
{
int len;
if (s->buf_ptr >= s->buf_end) {
len = url_read(s->hd, s->buffer, BUFFER_SIZE);
if (len < 0) {
return AVERROR(EIO);
} else if (len == 0) {
return -1;
} else {
s->buf_ptr = s->buffer;
s->buf_end = s->buffer + len;
}
}
return *s->buf_ptr++;
}
static int http_get_line(HTTPContext *s, char *line, int line_size)
{
int ch;
char *q;
q = line;
for(;;) {
ch = http_getc(s);
if (ch < 0)
return AVERROR(EIO);
if (ch == '\n') {
/* process line */
if (q > line && q[-1] == '\r')
q--;
*q = '\0';
return 0;
} else {
if ((q - line) < line_size - 1)
*q++ = ch;
}
}
}
static int process_line(URLContext *h, char *line, int line_count,
int *new_location)
{
HTTPContext *s = h->priv_data;
char *tag, *p;
/* end of header */
if (line[0] == '\0')
return 0;
p = line;
if (line_count == 0) {
while (!isspace(*p) && *p != '\0')
p++;
while (isspace(*p))
p++;
s->http_code = strtol(p, NULL, 10);
dprintf(NULL, "http_code=%d\n", s->http_code);
/* error codes are 4xx and 5xx, but regard 401 as a success, so we
* don't abort until all headers have been parsed. */
if (s->http_code >= 400 && s->http_code < 600 && s->http_code != 401)
return -1;
} else {
while (*p != '\0' && *p != ':')
p++;
if (*p != ':')
return 1;
*p = '\0';
tag = line;
p++;
while (isspace(*p))
p++;
if (!strcmp(tag, "Location")) {
strcpy(s->location, p);
*new_location = 1;
} else if (!strcmp (tag, "Content-Length") && s->filesize == -1) {
s->filesize = atoll(p);
} else if (!strcmp (tag, "Content-Range")) {
/* "bytes $from-$to/$document_size" */
const char *slash;
if (!strncmp (p, "bytes ", 6)) {
p += 6;
s->off = atoll(p);
if ((slash = strchr(p, '/')) && strlen(slash) > 0)
s->filesize = atoll(slash+1);
}
h->is_streamed = 0; /* we _can_ in fact seek */
} else if (!strcmp (tag, "Transfer-Encoding") && !strncasecmp(p, "chunked", 7)) {
s->filesize = -1;
s->chunksize = 0;
} else if (!strcmp (tag, "WWW-Authenticate")) {
ff_http_auth_handle_header(&s->auth_state, tag, p);
} else if (!strcmp (tag, "Authentication-Info")) {
ff_http_auth_handle_header(&s->auth_state, tag, p);
}
}
return 1;
}
static int http_connect(URLContext *h, const char *path, const char *hoststr,
const char *auth, int *new_location)
{
HTTPContext *s = h->priv_data;
int post, err;
char line[1024];
char *authstr = NULL;
int64_t off = s->off;
/* send http header */
post = h->flags & URL_WRONLY;
authstr = ff_http_auth_create_response(&s->auth_state, auth, path,
post ? "POST" : "GET");
snprintf(s->buffer, sizeof(s->buffer),
"%s %s HTTP/1.1\r\n"
"User-Agent: %s\r\n"
"Accept: */*\r\n"
"Range: bytes=%"PRId64"-\r\n"
"Host: %s\r\n"
"%s"
"Connection: close\r\n"
"%s"
"\r\n",
post ? "POST" : "GET",
path,
LIBAVFORMAT_IDENT,
s->off,
hoststr,
authstr ? authstr : "",
post ? "Transfer-Encoding: chunked\r\n" : "");
av_freep(&authstr);
if (http_write(h, s->buffer, strlen(s->buffer)) < 0)
return AVERROR(EIO);
/* init input buffer */
s->buf_ptr = s->buffer;
s->buf_end = s->buffer;
s->line_count = 0;
s->off = 0;
s->filesize = -1;
if (post) {
/* always use chunked encoding for upload data */
s->chunksize = 0;
return 0;
}
/* wait for header */
for(;;) {
if (http_get_line(s, line, sizeof(line)) < 0)
return AVERROR(EIO);
dprintf(NULL, "header='%s'\n", line);
err = process_line(h, line, s->line_count, new_location);
if (err < 0)
return err;
if (err == 0)
break;
s->line_count++;
}
return (off == s->off) ? 0 : -1;
}
static int http_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int len;
if (s->chunksize >= 0) {
if (!s->chunksize) {
char line[32];
for(;;) {
do {
if (http_get_line(s, line, sizeof(line)) < 0)
return AVERROR(EIO);
} while (!*line); /* skip CR LF from last chunk */
s->chunksize = strtoll(line, NULL, 16);
dprintf(NULL, "Chunked encoding data size: %"PRId64"'\n", s->chunksize);
if (!s->chunksize)
return 0;
break;
}
}
size = FFMIN(size, s->chunksize);
}
/* read bytes from input buffer first */
len = s->buf_end - s->buf_ptr;
if (len > 0) {
if (len > size)
len = size;
memcpy(buf, s->buf_ptr, len);
s->buf_ptr += len;
} else {
len = url_read(s->hd, buf, size);
}
if (len > 0) {
s->off += len;
if (s->chunksize > 0)
s->chunksize -= len;
}
return len;
}
/* used only when posting data */
static int http_write(URLContext *h, uint8_t *buf, int size)
{
char temp[11]; /* 32-bit hex + CRLF + nul */
int ret;
char crlf[] = "\r\n";
HTTPContext *s = h->priv_data;
if (s->chunksize == -1) {
/* headers are sent without any special encoding */
return url_write(s->hd, buf, size);
}
/* silently ignore zero-size data since chunk encoding that would
* signal EOF */
if (size > 0) {
/* upload data using chunked encoding */
snprintf(temp, sizeof(temp), "%x\r\n", size);
if ((ret = url_write(s->hd, temp, strlen(temp))) < 0 ||
(ret = url_write(s->hd, buf, size)) < 0 ||
(ret = url_write(s->hd, crlf, sizeof(crlf) - 1)) < 0)
return ret;
}
return size;
}
static int http_close(URLContext *h)
{
int ret = 0;
char footer[] = "0\r\n\r\n";
HTTPContext *s = h->priv_data;
/* signal end of chunked encoding if used */
if ((h->flags & URL_WRONLY) && s->chunksize != -1) {
ret = url_write(s->hd, footer, sizeof(footer) - 1);
ret = ret > 0 ? 0 : ret;
}
url_close(s->hd);
av_free(s);
return ret;
}
static int64_t http_seek(URLContext *h, int64_t off, int whence)
{
HTTPContext *s = h->priv_data;
URLContext *old_hd = s->hd;
int64_t old_off = s->off;
uint8_t old_buf[BUFFER_SIZE];
int old_buf_size;
if (whence == AVSEEK_SIZE)
return s->filesize;
else if ((s->filesize == -1 && whence == SEEK_END) || h->is_streamed)
return -1;
/* we save the old context in case the seek fails */
old_buf_size = s->buf_end - s->buf_ptr;
memcpy(old_buf, s->buf_ptr, old_buf_size);
s->hd = NULL;
if (whence == SEEK_CUR)
off += s->off;
else if (whence == SEEK_END)
off += s->filesize;
s->off = off;
/* if it fails, continue on old connection */
if (http_open_cnx(h) < 0) {
memcpy(s->buffer, old_buf, old_buf_size);
s->buf_ptr = s->buffer;
s->buf_end = s->buffer + old_buf_size;
s->hd = old_hd;
s->off = old_off;
return -1;
}
url_close(old_hd);
return off;
}
static int
http_get_file_handle(URLContext *h)
{
HTTPContext *s = h->priv_data;
return url_get_file_handle(s->hd);
}
URLProtocol http_protocol = {
"http",
http_open,
http_read,
http_write,
http_seek,
http_close,
.url_get_file_handle = http_get_file_handle,
};
| 123linslouis-android-video-cutter | jni/libavformat/http.c | C | asf20 | 12,560 |
/*
* Interplay C93 demuxer
* Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "voc.h"
#include "libavutil/intreadwrite.h"
typedef struct {
uint16_t index;
uint8_t length;
uint8_t frames;
} C93BlockRecord;
typedef struct {
VocDecContext voc;
C93BlockRecord block_records[512];
int current_block;
uint32_t frame_offsets[32];
int current_frame;
int next_pkt_is_audio;
AVStream *audio;
} C93DemuxContext;
static int probe(AVProbeData *p)
{
int i;
int index = 1;
if (p->buf_size < 16)
return 0;
for (i = 0; i < 16; i += 4) {
if (AV_RL16(p->buf + i) != index || !p->buf[i + 2] || !p->buf[i + 3])
return 0;
index += p->buf[i + 2];
}
return AVPROBE_SCORE_MAX;
}
static int read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
AVStream *video;
ByteIOContext *pb = s->pb;
C93DemuxContext *c93 = s->priv_data;
int i;
int framecount = 0;
for (i = 0; i < 512; i++) {
c93->block_records[i].index = get_le16(pb);
c93->block_records[i].length = get_byte(pb);
c93->block_records[i].frames = get_byte(pb);
if (c93->block_records[i].frames > 32) {
av_log(s, AV_LOG_ERROR, "too many frames in block\n");
return AVERROR_INVALIDDATA;
}
framecount += c93->block_records[i].frames;
}
/* Audio streams are added if audio packets are found */
s->ctx_flags |= AVFMTCTX_NOHEADER;
video = av_new_stream(s, 0);
if (!video)
return AVERROR(ENOMEM);
video->codec->codec_type = AVMEDIA_TYPE_VIDEO;
video->codec->codec_id = CODEC_ID_C93;
video->codec->width = 320;
video->codec->height = 192;
/* 4:3 320x200 with 8 empty lines */
video->sample_aspect_ratio = (AVRational) { 5, 6 };
video->time_base = (AVRational) { 2, 25 };
video->nb_frames = framecount;
video->duration = framecount;
video->start_time = 0;
c93->current_block = 0;
c93->current_frame = 0;
c93->next_pkt_is_audio = 0;
return 0;
}
#define C93_HAS_PALETTE 0x01
#define C93_FIRST_FRAME 0x02
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
C93DemuxContext *c93 = s->priv_data;
C93BlockRecord *br = &c93->block_records[c93->current_block];
int datasize;
int ret, i;
if (c93->next_pkt_is_audio) {
c93->current_frame++;
c93->next_pkt_is_audio = 0;
datasize = get_le16(pb);
if (datasize > 42) {
if (!c93->audio) {
c93->audio = av_new_stream(s, 1);
if (!c93->audio)
return AVERROR(ENOMEM);
c93->audio->codec->codec_type = AVMEDIA_TYPE_AUDIO;
}
url_fskip(pb, 26); /* VOC header */
ret = voc_get_packet(s, pkt, c93->audio, datasize - 26);
if (ret > 0) {
pkt->stream_index = 1;
pkt->flags |= AV_PKT_FLAG_KEY;
return ret;
}
}
}
if (c93->current_frame >= br->frames) {
if (c93->current_block >= 511 || !br[1].length)
return AVERROR(EIO);
br++;
c93->current_block++;
c93->current_frame = 0;
}
if (c93->current_frame == 0) {
url_fseek(pb, br->index * 2048, SEEK_SET);
for (i = 0; i < 32; i++) {
c93->frame_offsets[i] = get_le32(pb);
}
}
url_fseek(pb,br->index * 2048 +
c93->frame_offsets[c93->current_frame], SEEK_SET);
datasize = get_le16(pb); /* video frame size */
ret = av_new_packet(pkt, datasize + 768 + 1);
if (ret < 0)
return ret;
pkt->data[0] = 0;
pkt->size = datasize + 1;
ret = get_buffer(pb, pkt->data + 1, datasize);
if (ret < datasize) {
ret = AVERROR(EIO);
goto fail;
}
datasize = get_le16(pb); /* palette size */
if (datasize) {
if (datasize != 768) {
av_log(s, AV_LOG_ERROR, "invalid palette size %u\n", datasize);
ret = AVERROR_INVALIDDATA;
goto fail;
}
pkt->data[0] |= C93_HAS_PALETTE;
ret = get_buffer(pb, pkt->data + pkt->size, datasize);
if (ret < datasize) {
ret = AVERROR(EIO);
goto fail;
}
pkt->size += 768;
}
pkt->stream_index = 0;
c93->next_pkt_is_audio = 1;
/* only the first frame is guaranteed to not reference previous frames */
if (c93->current_block == 0 && c93->current_frame == 0) {
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->data[0] |= C93_FIRST_FRAME;
}
return 0;
fail:
av_free_packet(pkt);
return ret;
}
AVInputFormat c93_demuxer = {
"c93",
NULL_IF_CONFIG_SMALL("Interplay C93"),
sizeof(C93DemuxContext),
probe,
read_header,
read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/c93.c | C | asf20 | 5,697 |
/*
* GXF demuxer
* copyright (c) 2006 Reimar Doeffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_GXF_H
#define AVFORMAT_GXF_H
typedef enum {
PKT_MAP = 0xbc,
PKT_MEDIA = 0xbf,
PKT_EOS = 0xfb,
PKT_FLT = 0xfc,
PKT_UMF = 0xfd,
} GXFPktType;
typedef enum {
MAT_NAME = 0x40,
MAT_FIRST_FIELD = 0x41,
MAT_LAST_FIELD = 0x42,
MAT_MARK_IN = 0x43,
MAT_MARK_OUT = 0x44,
MAT_SIZE = 0x45,
} GXFMatTag;
typedef enum {
TRACK_NAME = 0x4c,
TRACK_AUX = 0x4d,
TRACK_VER = 0x4e,
TRACK_MPG_AUX = 0x4f,
TRACK_FPS = 0x50,
TRACK_LINES = 0x51,
TRACK_FPF = 0x52,
} GXFTrackTag;
#endif /* AVFORMAT_GXF_H */
| 123linslouis-android-video-cutter | jni/libavformat/gxf.h | C | asf20 | 1,491 |
/*
* WavPack demuxer
* Copyright (c) 2006 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "apetag.h"
#include "id3v1.h"
// specs say that maximum block size is 1Mb
#define WV_BLOCK_LIMIT 1047576
#define WV_EXTRA_SIZE 12
enum WV_FLAGS{
WV_MONO = 0x0004,
WV_HYBRID = 0x0008,
WV_JOINT = 0x0010,
WV_CROSSD = 0x0020,
WV_HSHAPE = 0x0040,
WV_FLOAT = 0x0080,
WV_INT32 = 0x0100,
WV_HBR = 0x0200,
WV_HBAL = 0x0400,
WV_MCINIT = 0x0800,
WV_MCEND = 0x1000,
};
static const int wv_rates[16] = {
6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000,
32000, 44100, 48000, 64000, 88200, 96000, 192000, -1
};
typedef struct{
uint32_t blksize, flags;
int rate, chan, bpp;
uint32_t samples, soff;
int block_parsed;
uint8_t extra[WV_EXTRA_SIZE];
int64_t pos;
}WVContext;
static int wv_probe(AVProbeData *p)
{
/* check file header */
if (p->buf_size <= 32)
return 0;
if (p->buf[0] == 'w' && p->buf[1] == 'v' &&
p->buf[2] == 'p' && p->buf[3] == 'k')
return AVPROBE_SCORE_MAX;
else
return 0;
}
static int wv_read_block_header(AVFormatContext *ctx, ByteIOContext *pb)
{
WVContext *wc = ctx->priv_data;
uint32_t tag, ver;
int size;
int rate, bpp, chan;
wc->pos = url_ftell(pb);
tag = get_le32(pb);
if (tag != MKTAG('w', 'v', 'p', 'k'))
return -1;
size = get_le32(pb);
if(size < 24 || size > WV_BLOCK_LIMIT){
av_log(ctx, AV_LOG_ERROR, "Incorrect block size %i\n", size);
return -1;
}
wc->blksize = size;
ver = get_le16(pb);
if(ver < 0x402 || ver > 0x410){
av_log(ctx, AV_LOG_ERROR, "Unsupported version %03X\n", ver);
return -1;
}
get_byte(pb); // track no
get_byte(pb); // track sub index
wc->samples = get_le32(pb); // total samples in file
wc->soff = get_le32(pb); // offset in samples of current block
get_buffer(pb, wc->extra, WV_EXTRA_SIZE);
wc->flags = AV_RL32(wc->extra + 4);
//parse flags
bpp = ((wc->flags & 3) + 1) << 3;
chan = 1 + !(wc->flags & WV_MONO);
rate = wv_rates[(wc->flags >> 23) & 0xF];
if(rate == -1 && !wc->block_parsed){
int64_t block_end = url_ftell(pb) + wc->blksize - 24;
if(url_is_streamed(pb)){
av_log(ctx, AV_LOG_ERROR, "Cannot determine custom sampling rate\n");
return -1;
}
while(url_ftell(pb) < block_end){
int id, size;
id = get_byte(pb);
size = (id & 0x80) ? get_le24(pb) : get_byte(pb);
size <<= 1;
if(id&0x40)
size--;
if((id&0x3F) == 0x27){
rate = get_le24(pb);
break;
}else{
url_fskip(pb, size);
}
}
if(rate == -1){
av_log(ctx, AV_LOG_ERROR, "Cannot determine custom sampling rate\n");
return -1;
}
url_fseek(pb, block_end - wc->blksize + 24, SEEK_SET);
}
if(!wc->bpp) wc->bpp = bpp;
if(!wc->chan) wc->chan = chan;
if(!wc->rate) wc->rate = rate;
if(wc->flags && bpp != wc->bpp){
av_log(ctx, AV_LOG_ERROR, "Bits per sample differ, this block: %i, header block: %i\n", bpp, wc->bpp);
return -1;
}
if(wc->flags && chan != wc->chan){
av_log(ctx, AV_LOG_ERROR, "Channels differ, this block: %i, header block: %i\n", chan, wc->chan);
return -1;
}
if(wc->flags && rate != -1 && rate != wc->rate){
av_log(ctx, AV_LOG_ERROR, "Sampling rate differ, this block: %i, header block: %i\n", rate, wc->rate);
return -1;
}
wc->blksize = size - 24;
return 0;
}
static int wv_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
WVContext *wc = s->priv_data;
AVStream *st;
wc->block_parsed = 0;
if(wv_read_block_header(s, pb) < 0)
return -1;
/* now we are ready: build format streams */
st = av_new_stream(s, 0);
if (!st)
return -1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_WAVPACK;
st->codec->channels = wc->chan;
st->codec->sample_rate = wc->rate;
st->codec->bits_per_coded_sample = wc->bpp;
av_set_pts_info(st, 64, 1, wc->rate);
st->start_time = 0;
st->duration = wc->samples;
if(!url_is_streamed(s->pb)) {
int64_t cur = url_ftell(s->pb);
ff_ape_parse_tag(s);
if(!av_metadata_get(s->metadata, "", NULL, AV_METADATA_IGNORE_SUFFIX))
ff_id3v1_read(s);
url_fseek(s->pb, cur, SEEK_SET);
}
return 0;
}
static int wv_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
WVContext *wc = s->priv_data;
int ret;
if (url_feof(s->pb))
return AVERROR(EIO);
if(wc->block_parsed){
if(wv_read_block_header(s, s->pb) < 0)
return -1;
}
if(av_new_packet(pkt, wc->blksize + WV_EXTRA_SIZE) < 0)
return AVERROR(ENOMEM);
memcpy(pkt->data, wc->extra, WV_EXTRA_SIZE);
ret = get_buffer(s->pb, pkt->data + WV_EXTRA_SIZE, wc->blksize);
if(ret != wc->blksize){
av_free_packet(pkt);
return AVERROR(EIO);
}
pkt->stream_index = 0;
wc->block_parsed = 1;
pkt->size = ret + WV_EXTRA_SIZE;
pkt->pts = wc->soff;
av_add_index_entry(s->streams[0], wc->pos, pkt->pts, 0, 0, AVINDEX_KEYFRAME);
return 0;
}
static int wv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
WVContext *wc = s->priv_data;
AVPacket pkt1, *pkt = &pkt1;
int ret;
int index = av_index_search_timestamp(st, timestamp, flags);
int64_t pos, pts;
/* if found, seek there */
if (index >= 0){
wc->block_parsed = 1;
url_fseek(s->pb, st->index_entries[index].pos, SEEK_SET);
return 0;
}
/* if timestamp is out of bounds, return error */
if(timestamp < 0 || timestamp >= s->duration)
return -1;
pos = url_ftell(s->pb);
do{
ret = av_read_frame(s, pkt);
if (ret < 0){
url_fseek(s->pb, pos, SEEK_SET);
return -1;
}
pts = pkt->pts;
av_free_packet(pkt);
}while(pts < timestamp);
return 0;
}
AVInputFormat wv_demuxer = {
"wv",
NULL_IF_CONFIG_SMALL("WavPack"),
sizeof(WVContext),
wv_probe,
wv_read_header,
wv_read_packet,
NULL,
wv_read_seek,
};
| 123linslouis-android-video-cutter | jni/libavformat/wv.c | C | asf20 | 7,355 |
/*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_AVIO_H
#define AVFORMAT_AVIO_H
/**
* @file
* unbuffered I/O operations
*
* @warning This file has to be considered an internal but installed
* header, so it should not be directly included in your projects.
*/
#include <stdint.h>
#include "libavutil/common.h"
/* unbuffered I/O */
/**
* URL Context.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(URLContext) must not be used outside libav*.
*/
typedef struct URLContext {
#if LIBAVFORMAT_VERSION_MAJOR >= 53
const AVClass *av_class; ///< information for av_log(). Set by url_open().
#endif
struct URLProtocol *prot;
int flags;
int is_streamed; /**< true if streamed (no seek possible), default = false */
int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */
void *priv_data;
char *filename; /**< specified URL */
} URLContext;
typedef struct URLPollEntry {
URLContext *handle;
int events;
int revents;
} URLPollEntry;
#define URL_RDONLY 0
#define URL_WRONLY 1
#define URL_RDWR 2
typedef int URLInterruptCB(void);
/**
* Creates an URLContext for accessing to the resource indicated by
* url, and opens it using the URLProtocol up.
*
* @param puc pointer to the location where, in case of success, the
* function puts the pointer to the created URLContext
* @param flags flags which control how the resource indicated by url
* is to be opened
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int url_open_protocol (URLContext **puc, struct URLProtocol *up,
const char *url, int flags);
/**
* Creates an URLContext for accessing to the resource indicated by
* url, and opens it.
*
* @param puc pointer to the location where, in case of success, the
* function puts the pointer to the created URLContext
* @param flags flags which control how the resource indicated by url
* is to be opened
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int url_open(URLContext **h, const char *url, int flags);
/**
* Reads up to size bytes from the resource accessed by h, and stores
* the read bytes in buf.
*
* @return The number of bytes actually read, or a negative value
* corresponding to an AVERROR code in case of error. A value of zero
* indicates that it is not possible to read more from the accessed
* resource (except if the value of the size argument is also zero).
*/
int url_read(URLContext *h, unsigned char *buf, int size);
/**
* Read as many bytes as possible (up to size), calling the
* read function multiple times if necessary.
* Will also retry if the read function returns AVERROR(EAGAIN).
* This makes special short-read handling in applications
* unnecessary, if the return value is < size then it is
* certain there was either an error or the end of file was reached.
*/
int url_read_complete(URLContext *h, unsigned char *buf, int size);
int url_write(URLContext *h, unsigned char *buf, int size);
/**
* Changes the position that will be used by the next read/write
* operation on the resource accessed by h.
*
* @param pos specifies the new position to set
* @param whence specifies how pos should be interpreted, it must be
* one of SEEK_SET (seek from the beginning), SEEK_CUR (seek from the
* current position), SEEK_END (seek from the end), or AVSEEK_SIZE
* (return the filesize of the requested resource, pos is ignored).
* @return a negative value corresponding to an AVERROR code in case
* of failure, or the resulting file position, measured in bytes from
* the beginning of the file. You can use this feature together with
* SEEK_CUR to read the current file position.
*/
int64_t url_seek(URLContext *h, int64_t pos, int whence);
/**
* Closes the resource accessed by the URLContext h, and frees the
* memory used by it.
*
* @return a negative value if an error condition occurred, 0
* otherwise
*/
int url_close(URLContext *h);
/**
* Returns a non-zero value if the resource indicated by url
* exists, 0 otherwise.
*/
int url_exist(const char *url);
int64_t url_filesize(URLContext *h);
/**
* Return the file descriptor associated with this URL. For RTP, this
* will return only the RTP file descriptor, not the RTCP file descriptor.
* To get both, use rtp_get_file_handles().
*
* @return the file descriptor associated with this URL, or <0 on error.
*/
int url_get_file_handle(URLContext *h);
/**
* Return the maximum packet size associated to packetized file
* handle. If the file is not packetized (stream like HTTP or file on
* disk), then 0 is returned.
*
* @param h file handle
* @return maximum packet size in bytes
*/
int url_get_max_packet_size(URLContext *h);
void url_get_filename(URLContext *h, char *buf, int buf_size);
/**
* The callback is called in blocking functions to test regulary if
* asynchronous interruption is needed. AVERROR(EINTR) is returned
* in this case by the interrupted function. 'NULL' means no interrupt
* callback is given.
*/
void url_set_interrupt_cb(URLInterruptCB *interrupt_cb);
/* not implemented */
int url_poll(URLPollEntry *poll_table, int n, int timeout);
/**
* Pause and resume playing - only meaningful if using a network streaming
* protocol (e.g. MMS).
* @param pause 1 for pause, 0 for resume
*/
int av_url_read_pause(URLContext *h, int pause);
/**
* Seek to a given timestamp relative to some component stream.
* Only meaningful if using a network streaming protocol (e.g. MMS.).
* @param stream_index The stream index that the timestamp is relative to.
* If stream_index is (-1) the timestamp should be in AV_TIME_BASE
* units from the beginning of the presentation.
* If a stream_index >= 0 is used and the protocol does not support
* seeking based on component streams, the call will fail with ENOTSUP.
* @param timestamp timestamp in AVStream.time_base units
* or if there is no stream specified then in AV_TIME_BASE units.
* @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE
* and AVSEEK_FLAG_ANY. The protocol may silently ignore
* AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will
* fail with ENOTSUP if used and not supported.
* @return >= 0 on success
* @see AVInputFormat::read_seek
*/
int64_t av_url_read_seek(URLContext *h, int stream_index,
int64_t timestamp, int flags);
/**
* Passing this as the "whence" parameter to a seek function causes it to
* return the filesize without seeking anywhere. Supporting this is optional.
* If it is not supported then the seek function will return <0.
*/
#define AVSEEK_SIZE 0x10000
/**
* Oring this flag as into the "whence" parameter to a seek function causes it to
* seek by any means (like reopening and linear reading) or other normally unreasonble
* means that can be extreemly slow.
* This may be ignored by the seek code.
*/
#define AVSEEK_FORCE 0x20000
typedef struct URLProtocol {
const char *name;
int (*url_open)(URLContext *h, const char *url, int flags);
int (*url_read)(URLContext *h, unsigned char *buf, int size);
int (*url_write)(URLContext *h, unsigned char *buf, int size);
int64_t (*url_seek)(URLContext *h, int64_t pos, int whence);
int (*url_close)(URLContext *h);
struct URLProtocol *next;
int (*url_read_pause)(URLContext *h, int pause);
int64_t (*url_read_seek)(URLContext *h, int stream_index,
int64_t timestamp, int flags);
int (*url_get_file_handle)(URLContext *h);
} URLProtocol;
#if LIBAVFORMAT_VERSION_MAJOR < 53
extern URLProtocol *first_protocol;
#endif
extern URLInterruptCB *url_interrupt_cb;
/**
* If protocol is NULL, returns the first registered protocol,
* if protocol is non-NULL, returns the next registered protocol after protocol,
* or NULL if protocol is the last one.
*/
URLProtocol *av_protocol_next(URLProtocol *p);
#if LIBAVFORMAT_VERSION_MAJOR < 53
/**
* @deprecated Use av_register_protocol() instead.
*/
attribute_deprecated int register_protocol(URLProtocol *protocol);
#endif
/**
* Registers the URLProtocol protocol.
*/
int av_register_protocol(URLProtocol *protocol);
/**
* Bytestream IO Context.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* sizeof(ByteIOContext) must not be used outside libav*.
*/
typedef struct {
unsigned char *buffer;
int buffer_size;
unsigned char *buf_ptr, *buf_end;
void *opaque;
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
int64_t (*seek)(void *opaque, int64_t offset, int whence);
int64_t pos; /**< position in the file of the current buffer */
int must_flush; /**< true if the next seek should flush */
int eof_reached; /**< true if eof reached */
int write_flag; /**< true if open for writing */
int is_streamed;
int max_packet_size;
unsigned long checksum;
unsigned char *checksum_ptr;
unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
int error; ///< contains the error code or 0 if no error happened
int (*read_pause)(void *opaque, int pause);
int64_t (*read_seek)(void *opaque, int stream_index,
int64_t timestamp, int flags);
} ByteIOContext;
int init_put_byte(ByteIOContext *s,
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
ByteIOContext *av_alloc_put_byte(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence));
void put_byte(ByteIOContext *s, int b);
void put_buffer(ByteIOContext *s, const unsigned char *buf, int size);
void put_le64(ByteIOContext *s, uint64_t val);
void put_be64(ByteIOContext *s, uint64_t val);
void put_le32(ByteIOContext *s, unsigned int val);
void put_be32(ByteIOContext *s, unsigned int val);
void put_le24(ByteIOContext *s, unsigned int val);
void put_be24(ByteIOContext *s, unsigned int val);
void put_le16(ByteIOContext *s, unsigned int val);
void put_be16(ByteIOContext *s, unsigned int val);
void put_tag(ByteIOContext *s, const char *tag);
void put_strz(ByteIOContext *s, const char *buf);
/**
* fseek() equivalent for ByteIOContext.
* @return new position or AVERROR.
*/
int64_t url_fseek(ByteIOContext *s, int64_t offset, int whence);
/**
* Skip given number of bytes forward.
* @param offset number of bytes
*/
void url_fskip(ByteIOContext *s, int64_t offset);
/**
* ftell() equivalent for ByteIOContext.
* @return position or AVERROR.
*/
int64_t url_ftell(ByteIOContext *s);
/**
* Gets the filesize.
* @return filesize or AVERROR
*/
int64_t url_fsize(ByteIOContext *s);
/**
* feof() equivalent for ByteIOContext.
* @return non zero if and only if end of file
*/
int url_feof(ByteIOContext *s);
int url_ferror(ByteIOContext *s);
int av_url_read_fpause(ByteIOContext *h, int pause);
int64_t av_url_read_fseek(ByteIOContext *h, int stream_index,
int64_t timestamp, int flags);
#define URL_EOF (-1)
/** @note return URL_EOF (-1) if EOF */
int url_fgetc(ByteIOContext *s);
/** @warning currently size is limited */
#ifdef __GNUC__
int url_fprintf(ByteIOContext *s, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3)));
#else
int url_fprintf(ByteIOContext *s, const char *fmt, ...);
#endif
/** @note unlike fgets, the EOL character is not returned and a whole
line is parsed. return NULL if first char read was EOF */
char *url_fgets(ByteIOContext *s, char *buf, int buf_size);
void put_flush_packet(ByteIOContext *s);
/**
* Reads size bytes from ByteIOContext into buf.
* @return number of bytes read or AVERROR
*/
int get_buffer(ByteIOContext *s, unsigned char *buf, int size);
/**
* Reads size bytes from ByteIOContext into buf.
* This reads at most 1 packet. If that is not enough fewer bytes will be
* returned.
* @return number of bytes read or AVERROR
*/
int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size);
/** @note return 0 if EOF, so you cannot use it if EOF handling is
necessary */
int get_byte(ByteIOContext *s);
unsigned int get_le24(ByteIOContext *s);
unsigned int get_le32(ByteIOContext *s);
uint64_t get_le64(ByteIOContext *s);
unsigned int get_le16(ByteIOContext *s);
char *get_strz(ByteIOContext *s, char *buf, int maxlen);
unsigned int get_be16(ByteIOContext *s);
unsigned int get_be24(ByteIOContext *s);
unsigned int get_be32(ByteIOContext *s);
uint64_t get_be64(ByteIOContext *s);
uint64_t ff_get_v(ByteIOContext *bc);
static inline int url_is_streamed(ByteIOContext *s)
{
return s->is_streamed;
}
/**
* Creates and initializes a ByteIOContext for accessing the
* resource referenced by the URLContext h.
* @note When the URLContext h has been opened in read+write mode, the
* ByteIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created ByteIOContext.
* In case of failure the pointed to value is set to NULL.
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int url_fdopen(ByteIOContext **s, URLContext *h);
/** @warning must be called before any I/O */
int url_setbufsize(ByteIOContext *s, int buf_size);
#if LIBAVFORMAT_VERSION_MAJOR < 53
/** Reset the buffer for reading or writing.
* @note Will drop any data currently in the buffer without transmitting it.
* @param flags URL_RDONLY to set up the buffer for reading, or URL_WRONLY
* to set up the buffer for writing. */
int url_resetbuf(ByteIOContext *s, int flags);
#endif
/**
* Rewinds the ByteIOContext using the specified buffer containing the first buf_size bytes of the file.
* Used after probing to avoid seeking.
* Joins buf and s->buffer, taking any overlap into consideration.
* @note s->buffer must overlap with buf or they can't be joined and the function fails
* @note This function is NOT part of the public API
*
* @param s The read-only ByteIOContext to rewind
* @param buf The probe buffer containing the first buf_size bytes of the file
* @param buf_size The size of buf
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ff_rewind_with_probe_data(ByteIOContext *s, unsigned char *buf, int buf_size);
/**
* Creates and initializes a ByteIOContext for accessing the
* resource indicated by url.
* @note When the resource indicated by url has been opened in
* read+write mode, the ByteIOContext can be used only for writing.
*
* @param s Used to return the pointer to the created ByteIOContext.
* In case of failure the pointed to value is set to NULL.
* @param flags flags which control how the resource indicated by url
* is to be opened
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int url_fopen(ByteIOContext **s, const char *url, int flags);
int url_fclose(ByteIOContext *s);
URLContext *url_fileno(ByteIOContext *s);
/**
* Return the maximum packet size associated to packetized buffered file
* handle. If the file is not packetized (stream like http or file on
* disk), then 0 is returned.
*
* @param s buffered file handle
* @return maximum packet size in bytes
*/
int url_fget_max_packet_size(ByteIOContext *s);
int url_open_buf(ByteIOContext **s, uint8_t *buf, int buf_size, int flags);
/** return the written or read size */
int url_close_buf(ByteIOContext *s);
/**
* Open a write only memory stream.
*
* @param s new IO context
* @return zero if no error.
*/
int url_open_dyn_buf(ByteIOContext **s);
/**
* Open a write only packetized memory stream with a maximum packet
* size of 'max_packet_size'. The stream is stored in a memory buffer
* with a big endian 4 byte header giving the packet size in bytes.
*
* @param s new IO context
* @param max_packet_size maximum packet size (must be > 0)
* @return zero if no error.
*/
int url_open_dyn_packet_buf(ByteIOContext **s, int max_packet_size);
/**
* Return the written size and a pointer to the buffer. The buffer
* must be freed with av_free().
* @param s IO context
* @param pbuffer pointer to a byte buffer
* @return the length of the byte buffer
*/
int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer);
unsigned long ff_crc04C11DB7_update(unsigned long checksum, const uint8_t *buf,
unsigned int len);
unsigned long get_checksum(ByteIOContext *s);
void init_checksum(ByteIOContext *s,
unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len),
unsigned long checksum);
/* udp.c */
int udp_set_remote_url(URLContext *h, const char *uri);
int udp_get_local_port(URLContext *h);
#if (LIBAVFORMAT_VERSION_MAJOR <= 52)
int udp_get_file_handle(URLContext *h);
#endif
#endif /* AVFORMAT_AVIO_H */
| 123linslouis-android-video-cutter | jni/libavformat/avio.h | C | asf20 | 18,589 |
/*
* Delay Locked Loop based time filter prototypes and declarations
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_TIMEFILTER_H
#define AVFORMAT_TIMEFILTER_H
/**
* Opaque type representing a time filter state
*
* The purpose of this filter is to provide a way to compute accurate time
* stamps that can be compared to wall clock time, especially when dealing
* with two clocks: the system clock and a hardware device clock, such as
* a soundcard.
*/
typedef struct TimeFilter TimeFilter;
/**
* Create a new Delay Locked Loop time filter
*
* feedback2_factor and feedback3_factor are the factors used for the
* multiplications that are respectively performed in the second and third
* feedback paths of the loop.
*
* Unless you know what you are doing, you should set these as follow:
*
* o = 2 * M_PI * bandwidth * period
* feedback2_factor = sqrt(2 * o)
* feedback3_factor = o * o
*
* Where bandwidth is up to you to choose. Smaller values will filter out more
* of the jitter, but also take a longer time for the loop to settle. A good
* starting point is something between 0.3 and 3 Hz.
*
* @param clock_period period of the hardware clock in seconds
* (for example 1.0/44100)
*
* For more details about these parameters and background concepts please see:
* http://www.kokkinizita.net/papers/usingdll.pdf
*/
TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, double feedback3_factor);
/**
* Update the filter
*
* This function must be called in real time, at each process cycle.
*
* @param period the device cycle duration in clock_periods. For example, at
* 44.1kHz and a buffer size of 512 frames, period = 512 when clock_period
* was 1.0/44100, or 512/44100 if clock_period was 1.
*
* system_time, in seconds, should be the value of the system clock time,
* at (or as close as possible to) the moment the device hardware interrupt
* occured (or any other event the device clock raises at the beginning of a
* cycle).
*
* @return the filtered time, in seconds
*/
double ff_timefilter_update(TimeFilter *self, double system_time, double period);
/**
* Reset the filter
*
* This function should mainly be called in case of XRUN.
*
* Warning: after calling this, the filter is in an undetermined state until
* the next call to ff_timefilter_update()
*/
void ff_timefilter_reset(TimeFilter *);
/**
* Free all resources associated with the filter
*/
void ff_timefilter_destroy(TimeFilter *);
#endif /* AVFORMAT_TIMEFILTER_H */
| 123linslouis-android-video-cutter | jni/libavformat/timefilter.h | C | asf20 | 3,432 |
/*
* AIFF/AIFF-C muxer
* Copyright (c) 2006 Patrick Guimond
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "aiff.h"
typedef struct {
int64_t form;
int64_t frames;
int64_t ssnd;
} AIFFOutputContext;
static int aiff_write_header(AVFormatContext *s)
{
AIFFOutputContext *aiff = s->priv_data;
ByteIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
AVExtFloat sample_rate;
int aifc = 0;
/* First verify if format is ok */
if (!enc->codec_tag)
return -1;
if (enc->codec_tag != MKTAG('N','O','N','E'))
aifc = 1;
/* FORM AIFF header */
put_tag(pb, "FORM");
aiff->form = url_ftell(pb);
put_be32(pb, 0); /* file length */
put_tag(pb, aifc ? "AIFC" : "AIFF");
if (aifc) { // compressed audio
enc->bits_per_coded_sample = 16;
if (!enc->block_align) {
av_log(s, AV_LOG_ERROR, "block align not set\n");
return -1;
}
/* Version chunk */
put_tag(pb, "FVER");
put_be32(pb, 4);
put_be32(pb, 0xA2805140);
}
/* Common chunk */
put_tag(pb, "COMM");
put_be32(pb, aifc ? 24 : 18); /* size */
put_be16(pb, enc->channels); /* Number of channels */
aiff->frames = url_ftell(pb);
put_be32(pb, 0); /* Number of frames */
if (!enc->bits_per_coded_sample)
enc->bits_per_coded_sample = av_get_bits_per_sample(enc->codec_id);
if (!enc->bits_per_coded_sample) {
av_log(s, AV_LOG_ERROR, "could not compute bits per sample\n");
return -1;
}
if (!enc->block_align)
enc->block_align = (enc->bits_per_coded_sample * enc->channels) >> 3;
put_be16(pb, enc->bits_per_coded_sample); /* Sample size */
sample_rate = av_dbl2ext((double)enc->sample_rate);
put_buffer(pb, (uint8_t*)&sample_rate, sizeof(sample_rate));
if (aifc) {
put_le32(pb, enc->codec_tag);
put_be16(pb, 0);
}
/* Sound data chunk */
put_tag(pb, "SSND");
aiff->ssnd = url_ftell(pb); /* Sound chunk size */
put_be32(pb, 0); /* Sound samples data size */
put_be32(pb, 0); /* Data offset */
put_be32(pb, 0); /* Block-size (block align) */
av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
/* Data is starting here */
put_flush_packet(pb);
return 0;
}
static int aiff_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
put_buffer(pb, pkt->data, pkt->size);
return 0;
}
static int aiff_write_trailer(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
AIFFOutputContext *aiff = s->priv_data;
AVCodecContext *enc = s->streams[0]->codec;
/* Chunks sizes must be even */
int64_t file_size, end_size;
end_size = file_size = url_ftell(pb);
if (file_size & 1) {
put_byte(pb, 0);
end_size++;
}
if (!url_is_streamed(s->pb)) {
/* File length */
url_fseek(pb, aiff->form, SEEK_SET);
put_be32(pb, file_size - aiff->form - 4);
/* Number of sample frames */
url_fseek(pb, aiff->frames, SEEK_SET);
put_be32(pb, (file_size-aiff->ssnd-12)/enc->block_align);
/* Sound Data chunk size */
url_fseek(pb, aiff->ssnd, SEEK_SET);
put_be32(pb, file_size - aiff->ssnd - 4);
/* return to the end */
url_fseek(pb, end_size, SEEK_SET);
put_flush_packet(pb);
}
return 0;
}
AVOutputFormat aiff_muxer = {
"aiff",
NULL_IF_CONFIG_SMALL("Audio IFF"),
"audio/aiff",
"aif,aiff,afc,aifc",
sizeof(AIFFOutputContext),
CODEC_ID_PCM_S16BE,
CODEC_ID_NONE,
aiff_write_header,
aiff_write_packet,
aiff_write_trailer,
.codec_tag= (const AVCodecTag* const []){ff_codec_aiff_tags, 0},
};
| 123linslouis-android-video-cutter | jni/libavformat/aiffenc.c | C | asf20 | 4,622 |
/*
* Libavformat API example: Output a media file in any supported
* libavformat format. The default codecs are used.
*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#undef exit
/* 5 seconds stream duration */
#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
/**************************************************************/
/* audio output */
float t, tincr, tincr2;
int16_t *samples;
uint8_t *audio_outbuf;
int audio_outbuf_size;
int audio_input_frame_size;
/*
* add an audio output stream
*/
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
st = av_new_stream(oc, 1);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
c = st->codec;
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_AUDIO;
/* put sample parameters */
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
// some formats want stream headers to be separate
if(oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
static void open_audio(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVCodec *codec;
c = st->codec;
/* find the audio encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* init signal generator */
t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
audio_outbuf_size = 10000;
audio_outbuf = av_malloc(audio_outbuf_size);
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
support to compute the input frame size in samples */
if (c->frame_size <= 1) {
audio_input_frame_size = audio_outbuf_size / c->channels;
switch(st->codec->codec_id) {
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
audio_input_frame_size >>= 1;
break;
default:
break;
}
} else {
audio_input_frame_size = c->frame_size;
}
samples = av_malloc(audio_input_frame_size * 2 * c->channels);
}
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
'nb_channels' channels */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{
int j, i, v;
int16_t *q;
q = samples;
for(j=0;j<frame_size;j++) {
v = (int)(sin(t) * 10000);
for(i = 0; i < nb_channels; i++)
*q++ = v;
t += tincr;
tincr += tincr2;
}
}
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt;
av_init_packet(&pkt);
c = st->codec;
get_audio_frame(samples, audio_input_frame_size, c->channels);
pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= audio_outbuf;
/* write the compressed frame in the media file */
if (av_interleaved_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
}
}
static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(samples);
av_free(audio_outbuf);
}
/**************************************************************/
/* video output */
AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;
/* add a video output stream */
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
st = av_new_stream(oc, 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
c = st->codec;
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_VIDEO;
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
/* Needed to avoid using macroblocks in which some coeffs overflow.
This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */
c->mb_decision=2;
}
// some formats want stream headers to be separate
if(oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
static void open_video(AVFormatContext *oc, AVStream *st)
{
AVCodec *codec;
AVCodecContext *c;
c = st->codec;
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open the codec */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
video_outbuf_size = 200000;
video_outbuf = av_malloc(video_outbuf_size);
}
/* allocate the encoded raw picture */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
/* if the output format is not YUV420P, then a temporary YUV420P
picture is needed too. It is then converted to the required
output format */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
if (!tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
}
/* prepare a dummy image */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;y<height/2;y++) {
for(x=0;x<width/2;x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
int out_size, ret;
AVCodecContext *c;
static struct SwsContext *img_convert_ctx;
c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
} else {
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
to the codec pixel format if needed */
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
0, c->height, picture->data, picture->linesize);
} else {
fill_yuv_image(picture, frame_count, c->width, c->height);
}
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= (uint8_t *)picture;
pkt.size= sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
if(c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= video_outbuf;
pkt.size= out_size;
/* write the compressed frame in the media file */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
av_free(tmp_picture->data[0]);
av_free(tmp_picture);
}
av_free(video_outbuf);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
double audio_pts, video_pts;
int i;
/* initialize libavcodec, and register all codecs and formats */
av_register_all();
if (argc != 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename\n"
"\n", argv[0]);
exit(1);
}
filename = argv[1];
/* auto detect the output format from the name. default is
mpeg. */
fmt = av_guess_format(NULL, filename, NULL);
if (!fmt) {
printf("Could not deduce output format from file extension: using MPEG.\n");
fmt = av_guess_format("mpeg", NULL, NULL);
}
if (!fmt) {
fprintf(stderr, "Could not find suitable output format\n");
exit(1);
}
/* allocate the output media context */
oc = avformat_alloc_context();
if (!oc) {
fprintf(stderr, "Memory error\n");
exit(1);
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
/* add the audio and video streams using the default format codecs
and initialize the codecs */
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = add_video_stream(oc, fmt->video_codec);
}
if (fmt->audio_codec != CODEC_ID_NONE) {
audio_st = add_audio_stream(oc, fmt->audio_codec);
}
/* set the output parameters (must be done even if no
parameters). */
if (av_set_parameters(oc, NULL) < 0) {
fprintf(stderr, "Invalid output format parameters\n");
exit(1);
}
dump_format(oc, 0, filename, 1);
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (video_st)
open_video(oc, video_st);
if (audio_st)
open_audio(oc, audio_st);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'\n", filename);
exit(1);
}
}
/* write the stream header, if any */
av_write_header(oc);
for(;;) {
/* compute current audio and video time */
if (audio_st)
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
audio_pts = 0.0;
if (video_st)
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
else
video_pts = 0.0;
if ((!audio_st || audio_pts >= STREAM_DURATION) &&
(!video_st || video_pts >= STREAM_DURATION))
break;
/* write interleaved audio and video frames */
if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
}
}
/* write the trailer, if any. the trailer must be written
* before you close the CodecContexts open when you wrote the
* header; otherwise write_trailer may try to use memory that
* was freed on av_codec_close() */
av_write_trailer(oc);
/* close each codec */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio(oc, audio_st);
/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
url_fclose(oc->pb);
}
/* free the stream */
av_free(oc);
return 0;
}
| 123linslouis-android-video-cutter | jni/libavformat/output-example.c | C | asf20 | 16,736 |
/*
* SoX native format muxer
* Copyright (c) 2009 Daniel Verkamp <daniel@drv.nu>
*
* Based on libSoX sox-fmt.c
* Copyright (c) 2008 robs@users.sourceforge.net
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* SoX native format muxer
* @file
* @author Daniel Verkamp
* @sa http://wiki.multimedia.cx/index.php?title=SoX_native_intermediate_format
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "sox.h"
typedef struct {
int64_t header_size;
} SoXContext;
static int sox_write_header(AVFormatContext *s)
{
SoXContext *sox = s->priv_data;
ByteIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
AVMetadataTag *comment;
size_t comment_len = 0, comment_size;
comment = av_metadata_get(s->metadata, "comment", NULL, 0);
if (comment)
comment_len = strlen(comment->value);
comment_size = (comment_len + 7) & ~7;
sox->header_size = SOX_FIXED_HDR + comment_size;
if (enc->codec_id == CODEC_ID_PCM_S32LE) {
put_tag(pb, ".SoX");
put_le32(pb, sox->header_size);
put_le64(pb, 0); /* number of samples */
put_le64(pb, av_dbl2int(enc->sample_rate));
put_le32(pb, enc->channels);
put_le32(pb, comment_size);
} else if (enc->codec_id == CODEC_ID_PCM_S32BE) {
put_tag(pb, "XoS.");
put_be32(pb, sox->header_size);
put_be64(pb, 0); /* number of samples */
put_be64(pb, av_dbl2int(enc->sample_rate));
put_be32(pb, enc->channels);
put_be32(pb, comment_size);
} else {
av_log(s, AV_LOG_ERROR, "invalid codec; use pcm_s32le or pcm_s32be\n");
return -1;
}
if (comment_len)
put_buffer(pb, comment->value, comment_len);
for ( ; comment_size > comment_len; comment_len++)
put_byte(pb, 0);
put_flush_packet(pb);
return 0;
}
static int sox_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
put_buffer(pb, pkt->data, pkt->size);
return 0;
}
static int sox_write_trailer(AVFormatContext *s)
{
SoXContext *sox = s->priv_data;
ByteIOContext *pb = s->pb;
AVCodecContext *enc = s->streams[0]->codec;
if (!url_is_streamed(s->pb)) {
/* update number of samples */
int64_t file_size = url_ftell(pb);
int64_t num_samples = (file_size - sox->header_size - 4LL) >> 2LL;
url_fseek(pb, 8, SEEK_SET);
if (enc->codec_id == CODEC_ID_PCM_S32LE) {
put_le64(pb, num_samples);
} else
put_be64(pb, num_samples);
url_fseek(pb, file_size, SEEK_SET);
put_flush_packet(pb);
}
return 0;
}
AVOutputFormat sox_muxer = {
"sox",
NULL_IF_CONFIG_SMALL("SoX native format"),
NULL,
"sox",
sizeof(SoXContext),
CODEC_ID_PCM_S32LE,
CODEC_ID_NONE,
sox_write_header,
sox_write_packet,
sox_write_trailer,
};
| 123linslouis-android-video-cutter | jni/libavformat/soxenc.c | C | asf20 | 3,617 |
/*
* raw FLAC muxer
* Copyright (c) 2006-2009 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/flac.h"
#include "avformat.h"
#include "flacenc.h"
#include "metadata.h"
#include "vorbiscomment.h"
#include "libavcodec/bytestream.h"
static int flac_write_block_padding(ByteIOContext *pb, unsigned int n_padding_bytes,
int last_block)
{
put_byte(pb, last_block ? 0x81 : 0x01);
put_be24(pb, n_padding_bytes);
while (n_padding_bytes > 0) {
put_byte(pb, 0);
n_padding_bytes--;
}
return 0;
}
static int flac_write_block_comment(ByteIOContext *pb, AVMetadata *m,
int last_block, int bitexact)
{
const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT;
unsigned int len, count;
uint8_t *p, *p0;
len = ff_vorbiscomment_length(m, vendor, &count);
p0 = av_malloc(len+4);
if (!p0)
return AVERROR(ENOMEM);
p = p0;
bytestream_put_byte(&p, last_block ? 0x84 : 0x04);
bytestream_put_be24(&p, len);
ff_vorbiscomment_write(&p, m, vendor, count);
put_buffer(pb, p0, len+4);
av_freep(&p0);
p = NULL;
return 0;
}
static int flac_write_header(struct AVFormatContext *s)
{
int ret;
AVCodecContext *codec = s->streams[0]->codec;
ret = ff_flac_write_header(s->pb, codec, 0);
if (ret)
return ret;
ret = flac_write_block_comment(s->pb, s->metadata, 0,
codec->flags & CODEC_FLAG_BITEXACT);
if (ret)
return ret;
/* The command line flac encoder defaults to placing a seekpoint
* every 10s. So one might add padding to allow that later
* but there seems to be no simple way to get the duration here.
* So let's try the flac default of 8192 bytes */
flac_write_block_padding(s->pb, 8192, 1);
return ret;
}
static int flac_write_trailer(struct AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
uint8_t *streaminfo;
enum FLACExtradataFormat format;
int64_t file_size;
if (!ff_flac_is_extradata_valid(s->streams[0]->codec, &format, &streaminfo))
return -1;
if (!url_is_streamed(pb)) {
/* rewrite the STREAMINFO header block data */
file_size = url_ftell(pb);
url_fseek(pb, 8, SEEK_SET);
put_buffer(pb, streaminfo, FLAC_STREAMINFO_SIZE);
url_fseek(pb, file_size, SEEK_SET);
put_flush_packet(pb);
} else {
av_log(s, AV_LOG_WARNING, "unable to rewrite FLAC header.\n");
}
return 0;
}
static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
put_buffer(s->pb, pkt->data, pkt->size);
put_flush_packet(s->pb);
return 0;
}
AVOutputFormat flac_muxer = {
"flac",
NULL_IF_CONFIG_SMALL("raw FLAC"),
"audio/x-flac",
"flac",
0,
CODEC_ID_FLAC,
CODEC_ID_NONE,
flac_write_header,
flac_write_packet,
flac_write_trailer,
.flags= AVFMT_NOTIMESTAMPS,
.metadata_conv = ff_vorbiscomment_metadata_conv,
};
| 123linslouis-android-video-cutter | jni/libavformat/flacenc.c | C | asf20 | 3,772 |
/*
* VorbisComment writer
* Copyright (c) 2009 James Darnley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_VORBISCOMMENT_H
#define AVFORMAT_VORBISCOMMENT_H
#include "avformat.h"
#include "metadata.h"
/**
* Calculates the length in bytes of a VorbisComment. This is the minimum
* size required by ff_vorbiscomment_write().
*
* @param m The metadata structure to be parsed. For no metadata, set to NULL.
* @param vendor_string The vendor string to be added into the VorbisComment.
* For no string, set to an empty string.
* @param count Pointer to store the number of tags in m because m->count is "not allowed"
* @return The length in bytes.
*/
int ff_vorbiscomment_length(AVMetadata *m, const char *vendor_string,
unsigned *count);
/**
* Writes a VorbisComment into a buffer. The buffer, p, must have enough
* data to hold the whole VorbisComment. The minimum size required can be
* obtained by passing the same AVMetadata and vendor_string to
* ff_vorbiscomment_length()
*
* @param p The buffer in which to write.
* @param m The metadata struct to write.
* @param vendor_string The vendor string to write.
* @param count The number of tags in m because m->count is "not allowed"
*/
int ff_vorbiscomment_write(uint8_t **p, AVMetadata *m,
const char *vendor_string, const unsigned count);
extern const AVMetadataConv ff_vorbiscomment_metadata_conv[];
#endif /* AVFORMAT_VORBISCOMMENT_H */
| 123linslouis-android-video-cutter | jni/libavformat/vorbiscomment.h | C | asf20 | 2,204 |
/**
* @file
* Psygnosis YOP demuxer
*
* Copyright (C) 2010 Mohamed Naufal Basheer <naufal11@gmail.com>
* derived from the code by
* Copyright (C) 2009 Thomas P. Higdon <thomas.p.higdon@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
typedef struct yop_dec_context {
AVPacket video_packet;
int odd_frame;
int frame_size;
int audio_block_length;
int palette_size;
} YopDecContext;
static int yop_probe(AVProbeData *probe_packet)
{
if (AV_RB16(probe_packet->buf) == AV_RB16("YO") &&
probe_packet->buf[6] &&
probe_packet->buf[7] &&
!(probe_packet->buf[8] & 1) &&
!(probe_packet->buf[10] & 1))
return AVPROBE_SCORE_MAX * 3 / 4;
return 0;
}
static int yop_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
YopDecContext *yop = s->priv_data;
ByteIOContext *pb = s->pb;
AVCodecContext *audio_dec, *video_dec;
AVStream *audio_stream, *video_stream;
int frame_rate, ret;
audio_stream = av_new_stream(s, 0);
video_stream = av_new_stream(s, 1);
// Extra data that will be passed to the decoder
video_stream->codec->extradata_size = 8;
video_stream->codec->extradata = av_mallocz(video_stream->codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
if (!video_stream->codec->extradata)
return AVERROR(ENOMEM);
// Audio
audio_dec = audio_stream->codec;
audio_dec->codec_type = AVMEDIA_TYPE_AUDIO;
audio_dec->codec_id = CODEC_ID_ADPCM_IMA_WS;
audio_dec->channels = 1;
audio_dec->sample_rate = 22050;
// Video
video_dec = video_stream->codec;
video_dec->codec_type = AVMEDIA_TYPE_VIDEO;
video_dec->codec_id = CODEC_ID_YOP;
url_fskip(pb, 6);
frame_rate = get_byte(pb);
yop->frame_size = get_byte(pb) * 2048;
video_dec->width = get_le16(pb);
video_dec->height = get_le16(pb);
video_stream->sample_aspect_ratio = (AVRational){1, 2};
ret = get_buffer(pb, video_dec->extradata, 8);
if (ret < 8)
return ret < 0 ? ret : AVERROR_EOF;
yop->palette_size = video_dec->extradata[0] * 3 + 4;
yop->audio_block_length = AV_RL16(video_dec->extradata + 6);
// 1840 samples per frame, 1 nibble per sample; hence 1840/2 = 920
if (yop->audio_block_length < 920 ||
yop->audio_block_length + yop->palette_size >= yop->frame_size) {
av_log(s, AV_LOG_ERROR, "YOP has invalid header\n");
return AVERROR_INVALIDDATA;
}
url_fseek(pb, 2048, SEEK_SET);
av_set_pts_info(video_stream, 32, 1, frame_rate);
return 0;
}
static int yop_read_packet(AVFormatContext *s, AVPacket *pkt)
{
YopDecContext *yop = s->priv_data;
ByteIOContext *pb = s->pb;
int ret;
int actual_video_data_size = yop->frame_size -
yop->audio_block_length - yop->palette_size;
yop->video_packet.stream_index = 1;
if (yop->video_packet.data) {
*pkt = yop->video_packet;
yop->video_packet.data = NULL;
yop->video_packet.size = 0;
pkt->data[0] = yop->odd_frame;
pkt->flags |= AV_PKT_FLAG_KEY;
yop->odd_frame ^= 1;
return pkt->size;
}
ret = av_new_packet(&yop->video_packet,
yop->frame_size - yop->audio_block_length);
if (ret < 0)
return ret;
yop->video_packet.pos = url_ftell(pb);
ret = get_buffer(pb, yop->video_packet.data, yop->palette_size);
if (ret < 0) {
goto err_out;
}else if (ret < yop->palette_size) {
ret = AVERROR_EOF;
goto err_out;
}
ret = av_get_packet(pb, pkt, 920);
if (ret < 0)
goto err_out;
// Set position to the start of the frame
pkt->pos = yop->video_packet.pos;
url_fskip(pb, yop->audio_block_length - ret);
ret = get_buffer(pb, yop->video_packet.data + yop->palette_size,
actual_video_data_size);
if (ret < 0)
goto err_out;
else if (ret < actual_video_data_size)
av_shrink_packet(&yop->video_packet, yop->palette_size + ret);
// Arbitrarily return the audio data first
return yop->audio_block_length;
err_out:
av_free_packet(&yop->video_packet);
return ret;
}
static int yop_read_close(AVFormatContext *s)
{
YopDecContext *yop = s->priv_data;
av_free_packet(&yop->video_packet);
return 0;
}
static int yop_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
YopDecContext *yop = s->priv_data;
int64_t frame_pos, pos_min, pos_max;
int frame_count;
av_free_packet(&yop->video_packet);
if (!stream_index)
return -1;
pos_min = s->data_offset;
pos_max = url_fsize(s->pb) - yop->frame_size;
frame_count = (pos_max - pos_min) / yop->frame_size;
timestamp = FFMAX(0, FFMIN(frame_count, timestamp));
frame_pos = timestamp * yop->frame_size + pos_min;
yop->odd_frame = timestamp & 1;
url_fseek(s->pb, frame_pos, SEEK_SET);
return 0;
}
AVInputFormat yop_demuxer = {
"yop",
NULL_IF_CONFIG_SMALL("Psygnosis YOP Format"),
sizeof(YopDecContext),
yop_probe,
yop_read_header,
yop_read_packet,
yop_read_close,
yop_read_seek,
.extensions = "yop",
.flags = AVFMT_GENERIC_INDEX,
};
| 123linslouis-android-video-cutter | jni/libavformat/yop.c | C | asf20 | 6,340 |
/*
* Default Palettes for Quicktime Files
* Automatically generated from a utility derived from XAnim:
* http://xanim.va.pubnix.com/home.html
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_QTPALETTE_H
#define AVFORMAT_QTPALETTE_H
#include <inttypes.h>
static const uint8_t ff_qt_default_palette_4[4 * 3] = {
0x93, 0x65, 0x5E,
0xFF, 0xFF, 0xFF,
0xDF, 0xD0, 0xAB,
0x00, 0x00, 0x00
};
static const uint8_t ff_qt_default_palette_16[16 * 3] = {
0xFF, 0xFB, 0xFF,
0xEF, 0xD9, 0xBB,
0xE8, 0xC9, 0xB1,
0x93, 0x65, 0x5E,
0xFC, 0xDE, 0xE8,
0x9D, 0x88, 0x91,
0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF,
0x47, 0x48, 0x37,
0x7A, 0x5E, 0x55,
0xDF, 0xD0, 0xAB,
0xFF, 0xFB, 0xF9,
0xE8, 0xCA, 0xC5,
0x8A, 0x7C, 0x77,
0x00, 0x00, 0x00
};
static const uint8_t ff_qt_default_palette_256[256 * 3] = {
/* 0, 0x00 */ 0xFF, 0xFF, 0xFF,
/* 1, 0x01 */ 0xFF, 0xFF, 0xCC,
/* 2, 0x02 */ 0xFF, 0xFF, 0x99,
/* 3, 0x03 */ 0xFF, 0xFF, 0x66,
/* 4, 0x04 */ 0xFF, 0xFF, 0x33,
/* 5, 0x05 */ 0xFF, 0xFF, 0x00,
/* 6, 0x06 */ 0xFF, 0xCC, 0xFF,
/* 7, 0x07 */ 0xFF, 0xCC, 0xCC,
/* 8, 0x08 */ 0xFF, 0xCC, 0x99,
/* 9, 0x09 */ 0xFF, 0xCC, 0x66,
/* 10, 0x0A */ 0xFF, 0xCC, 0x33,
/* 11, 0x0B */ 0xFF, 0xCC, 0x00,
/* 12, 0x0C */ 0xFF, 0x99, 0xFF,
/* 13, 0x0D */ 0xFF, 0x99, 0xCC,
/* 14, 0x0E */ 0xFF, 0x99, 0x99,
/* 15, 0x0F */ 0xFF, 0x99, 0x66,
/* 16, 0x10 */ 0xFF, 0x99, 0x33,
/* 17, 0x11 */ 0xFF, 0x99, 0x00,
/* 18, 0x12 */ 0xFF, 0x66, 0xFF,
/* 19, 0x13 */ 0xFF, 0x66, 0xCC,
/* 20, 0x14 */ 0xFF, 0x66, 0x99,
/* 21, 0x15 */ 0xFF, 0x66, 0x66,
/* 22, 0x16 */ 0xFF, 0x66, 0x33,
/* 23, 0x17 */ 0xFF, 0x66, 0x00,
/* 24, 0x18 */ 0xFF, 0x33, 0xFF,
/* 25, 0x19 */ 0xFF, 0x33, 0xCC,
/* 26, 0x1A */ 0xFF, 0x33, 0x99,
/* 27, 0x1B */ 0xFF, 0x33, 0x66,
/* 28, 0x1C */ 0xFF, 0x33, 0x33,
/* 29, 0x1D */ 0xFF, 0x33, 0x00,
/* 30, 0x1E */ 0xFF, 0x00, 0xFF,
/* 31, 0x1F */ 0xFF, 0x00, 0xCC,
/* 32, 0x20 */ 0xFF, 0x00, 0x99,
/* 33, 0x21 */ 0xFF, 0x00, 0x66,
/* 34, 0x22 */ 0xFF, 0x00, 0x33,
/* 35, 0x23 */ 0xFF, 0x00, 0x00,
/* 36, 0x24 */ 0xCC, 0xFF, 0xFF,
/* 37, 0x25 */ 0xCC, 0xFF, 0xCC,
/* 38, 0x26 */ 0xCC, 0xFF, 0x99,
/* 39, 0x27 */ 0xCC, 0xFF, 0x66,
/* 40, 0x28 */ 0xCC, 0xFF, 0x33,
/* 41, 0x29 */ 0xCC, 0xFF, 0x00,
/* 42, 0x2A */ 0xCC, 0xCC, 0xFF,
/* 43, 0x2B */ 0xCC, 0xCC, 0xCC,
/* 44, 0x2C */ 0xCC, 0xCC, 0x99,
/* 45, 0x2D */ 0xCC, 0xCC, 0x66,
/* 46, 0x2E */ 0xCC, 0xCC, 0x33,
/* 47, 0x2F */ 0xCC, 0xCC, 0x00,
/* 48, 0x30 */ 0xCC, 0x99, 0xFF,
/* 49, 0x31 */ 0xCC, 0x99, 0xCC,
/* 50, 0x32 */ 0xCC, 0x99, 0x99,
/* 51, 0x33 */ 0xCC, 0x99, 0x66,
/* 52, 0x34 */ 0xCC, 0x99, 0x33,
/* 53, 0x35 */ 0xCC, 0x99, 0x00,
/* 54, 0x36 */ 0xCC, 0x66, 0xFF,
/* 55, 0x37 */ 0xCC, 0x66, 0xCC,
/* 56, 0x38 */ 0xCC, 0x66, 0x99,
/* 57, 0x39 */ 0xCC, 0x66, 0x66,
/* 58, 0x3A */ 0xCC, 0x66, 0x33,
/* 59, 0x3B */ 0xCC, 0x66, 0x00,
/* 60, 0x3C */ 0xCC, 0x33, 0xFF,
/* 61, 0x3D */ 0xCC, 0x33, 0xCC,
/* 62, 0x3E */ 0xCC, 0x33, 0x99,
/* 63, 0x3F */ 0xCC, 0x33, 0x66,
/* 64, 0x40 */ 0xCC, 0x33, 0x33,
/* 65, 0x41 */ 0xCC, 0x33, 0x00,
/* 66, 0x42 */ 0xCC, 0x00, 0xFF,
/* 67, 0x43 */ 0xCC, 0x00, 0xCC,
/* 68, 0x44 */ 0xCC, 0x00, 0x99,
/* 69, 0x45 */ 0xCC, 0x00, 0x66,
/* 70, 0x46 */ 0xCC, 0x00, 0x33,
/* 71, 0x47 */ 0xCC, 0x00, 0x00,
/* 72, 0x48 */ 0x99, 0xFF, 0xFF,
/* 73, 0x49 */ 0x99, 0xFF, 0xCC,
/* 74, 0x4A */ 0x99, 0xFF, 0x99,
/* 75, 0x4B */ 0x99, 0xFF, 0x66,
/* 76, 0x4C */ 0x99, 0xFF, 0x33,
/* 77, 0x4D */ 0x99, 0xFF, 0x00,
/* 78, 0x4E */ 0x99, 0xCC, 0xFF,
/* 79, 0x4F */ 0x99, 0xCC, 0xCC,
/* 80, 0x50 */ 0x99, 0xCC, 0x99,
/* 81, 0x51 */ 0x99, 0xCC, 0x66,
/* 82, 0x52 */ 0x99, 0xCC, 0x33,
/* 83, 0x53 */ 0x99, 0xCC, 0x00,
/* 84, 0x54 */ 0x99, 0x99, 0xFF,
/* 85, 0x55 */ 0x99, 0x99, 0xCC,
/* 86, 0x56 */ 0x99, 0x99, 0x99,
/* 87, 0x57 */ 0x99, 0x99, 0x66,
/* 88, 0x58 */ 0x99, 0x99, 0x33,
/* 89, 0x59 */ 0x99, 0x99, 0x00,
/* 90, 0x5A */ 0x99, 0x66, 0xFF,
/* 91, 0x5B */ 0x99, 0x66, 0xCC,
/* 92, 0x5C */ 0x99, 0x66, 0x99,
/* 93, 0x5D */ 0x99, 0x66, 0x66,
/* 94, 0x5E */ 0x99, 0x66, 0x33,
/* 95, 0x5F */ 0x99, 0x66, 0x00,
/* 96, 0x60 */ 0x99, 0x33, 0xFF,
/* 97, 0x61 */ 0x99, 0x33, 0xCC,
/* 98, 0x62 */ 0x99, 0x33, 0x99,
/* 99, 0x63 */ 0x99, 0x33, 0x66,
/* 100, 0x64 */ 0x99, 0x33, 0x33,
/* 101, 0x65 */ 0x99, 0x33, 0x00,
/* 102, 0x66 */ 0x99, 0x00, 0xFF,
/* 103, 0x67 */ 0x99, 0x00, 0xCC,
/* 104, 0x68 */ 0x99, 0x00, 0x99,
/* 105, 0x69 */ 0x99, 0x00, 0x66,
/* 106, 0x6A */ 0x99, 0x00, 0x33,
/* 107, 0x6B */ 0x99, 0x00, 0x00,
/* 108, 0x6C */ 0x66, 0xFF, 0xFF,
/* 109, 0x6D */ 0x66, 0xFF, 0xCC,
/* 110, 0x6E */ 0x66, 0xFF, 0x99,
/* 111, 0x6F */ 0x66, 0xFF, 0x66,
/* 112, 0x70 */ 0x66, 0xFF, 0x33,
/* 113, 0x71 */ 0x66, 0xFF, 0x00,
/* 114, 0x72 */ 0x66, 0xCC, 0xFF,
/* 115, 0x73 */ 0x66, 0xCC, 0xCC,
/* 116, 0x74 */ 0x66, 0xCC, 0x99,
/* 117, 0x75 */ 0x66, 0xCC, 0x66,
/* 118, 0x76 */ 0x66, 0xCC, 0x33,
/* 119, 0x77 */ 0x66, 0xCC, 0x00,
/* 120, 0x78 */ 0x66, 0x99, 0xFF,
/* 121, 0x79 */ 0x66, 0x99, 0xCC,
/* 122, 0x7A */ 0x66, 0x99, 0x99,
/* 123, 0x7B */ 0x66, 0x99, 0x66,
/* 124, 0x7C */ 0x66, 0x99, 0x33,
/* 125, 0x7D */ 0x66, 0x99, 0x00,
/* 126, 0x7E */ 0x66, 0x66, 0xFF,
/* 127, 0x7F */ 0x66, 0x66, 0xCC,
/* 128, 0x80 */ 0x66, 0x66, 0x99,
/* 129, 0x81 */ 0x66, 0x66, 0x66,
/* 130, 0x82 */ 0x66, 0x66, 0x33,
/* 131, 0x83 */ 0x66, 0x66, 0x00,
/* 132, 0x84 */ 0x66, 0x33, 0xFF,
/* 133, 0x85 */ 0x66, 0x33, 0xCC,
/* 134, 0x86 */ 0x66, 0x33, 0x99,
/* 135, 0x87 */ 0x66, 0x33, 0x66,
/* 136, 0x88 */ 0x66, 0x33, 0x33,
/* 137, 0x89 */ 0x66, 0x33, 0x00,
/* 138, 0x8A */ 0x66, 0x00, 0xFF,
/* 139, 0x8B */ 0x66, 0x00, 0xCC,
/* 140, 0x8C */ 0x66, 0x00, 0x99,
/* 141, 0x8D */ 0x66, 0x00, 0x66,
/* 142, 0x8E */ 0x66, 0x00, 0x33,
/* 143, 0x8F */ 0x66, 0x00, 0x00,
/* 144, 0x90 */ 0x33, 0xFF, 0xFF,
/* 145, 0x91 */ 0x33, 0xFF, 0xCC,
/* 146, 0x92 */ 0x33, 0xFF, 0x99,
/* 147, 0x93 */ 0x33, 0xFF, 0x66,
/* 148, 0x94 */ 0x33, 0xFF, 0x33,
/* 149, 0x95 */ 0x33, 0xFF, 0x00,
/* 150, 0x96 */ 0x33, 0xCC, 0xFF,
/* 151, 0x97 */ 0x33, 0xCC, 0xCC,
/* 152, 0x98 */ 0x33, 0xCC, 0x99,
/* 153, 0x99 */ 0x33, 0xCC, 0x66,
/* 154, 0x9A */ 0x33, 0xCC, 0x33,
/* 155, 0x9B */ 0x33, 0xCC, 0x00,
/* 156, 0x9C */ 0x33, 0x99, 0xFF,
/* 157, 0x9D */ 0x33, 0x99, 0xCC,
/* 158, 0x9E */ 0x33, 0x99, 0x99,
/* 159, 0x9F */ 0x33, 0x99, 0x66,
/* 160, 0xA0 */ 0x33, 0x99, 0x33,
/* 161, 0xA1 */ 0x33, 0x99, 0x00,
/* 162, 0xA2 */ 0x33, 0x66, 0xFF,
/* 163, 0xA3 */ 0x33, 0x66, 0xCC,
/* 164, 0xA4 */ 0x33, 0x66, 0x99,
/* 165, 0xA5 */ 0x33, 0x66, 0x66,
/* 166, 0xA6 */ 0x33, 0x66, 0x33,
/* 167, 0xA7 */ 0x33, 0x66, 0x00,
/* 168, 0xA8 */ 0x33, 0x33, 0xFF,
/* 169, 0xA9 */ 0x33, 0x33, 0xCC,
/* 170, 0xAA */ 0x33, 0x33, 0x99,
/* 171, 0xAB */ 0x33, 0x33, 0x66,
/* 172, 0xAC */ 0x33, 0x33, 0x33,
/* 173, 0xAD */ 0x33, 0x33, 0x00,
/* 174, 0xAE */ 0x33, 0x00, 0xFF,
/* 175, 0xAF */ 0x33, 0x00, 0xCC,
/* 176, 0xB0 */ 0x33, 0x00, 0x99,
/* 177, 0xB1 */ 0x33, 0x00, 0x66,
/* 178, 0xB2 */ 0x33, 0x00, 0x33,
/* 179, 0xB3 */ 0x33, 0x00, 0x00,
/* 180, 0xB4 */ 0x00, 0xFF, 0xFF,
/* 181, 0xB5 */ 0x00, 0xFF, 0xCC,
/* 182, 0xB6 */ 0x00, 0xFF, 0x99,
/* 183, 0xB7 */ 0x00, 0xFF, 0x66,
/* 184, 0xB8 */ 0x00, 0xFF, 0x33,
/* 185, 0xB9 */ 0x00, 0xFF, 0x00,
/* 186, 0xBA */ 0x00, 0xCC, 0xFF,
/* 187, 0xBB */ 0x00, 0xCC, 0xCC,
/* 188, 0xBC */ 0x00, 0xCC, 0x99,
/* 189, 0xBD */ 0x00, 0xCC, 0x66,
/* 190, 0xBE */ 0x00, 0xCC, 0x33,
/* 191, 0xBF */ 0x00, 0xCC, 0x00,
/* 192, 0xC0 */ 0x00, 0x99, 0xFF,
/* 193, 0xC1 */ 0x00, 0x99, 0xCC,
/* 194, 0xC2 */ 0x00, 0x99, 0x99,
/* 195, 0xC3 */ 0x00, 0x99, 0x66,
/* 196, 0xC4 */ 0x00, 0x99, 0x33,
/* 197, 0xC5 */ 0x00, 0x99, 0x00,
/* 198, 0xC6 */ 0x00, 0x66, 0xFF,
/* 199, 0xC7 */ 0x00, 0x66, 0xCC,
/* 200, 0xC8 */ 0x00, 0x66, 0x99,
/* 201, 0xC9 */ 0x00, 0x66, 0x66,
/* 202, 0xCA */ 0x00, 0x66, 0x33,
/* 203, 0xCB */ 0x00, 0x66, 0x00,
/* 204, 0xCC */ 0x00, 0x33, 0xFF,
/* 205, 0xCD */ 0x00, 0x33, 0xCC,
/* 206, 0xCE */ 0x00, 0x33, 0x99,
/* 207, 0xCF */ 0x00, 0x33, 0x66,
/* 208, 0xD0 */ 0x00, 0x33, 0x33,
/* 209, 0xD1 */ 0x00, 0x33, 0x00,
/* 210, 0xD2 */ 0x00, 0x00, 0xFF,
/* 211, 0xD3 */ 0x00, 0x00, 0xCC,
/* 212, 0xD4 */ 0x00, 0x00, 0x99,
/* 213, 0xD5 */ 0x00, 0x00, 0x66,
/* 214, 0xD6 */ 0x00, 0x00, 0x33,
/* 215, 0xD7 */ 0xEE, 0x00, 0x00,
/* 216, 0xD8 */ 0xDD, 0x00, 0x00,
/* 217, 0xD9 */ 0xBB, 0x00, 0x00,
/* 218, 0xDA */ 0xAA, 0x00, 0x00,
/* 219, 0xDB */ 0x88, 0x00, 0x00,
/* 220, 0xDC */ 0x77, 0x00, 0x00,
/* 221, 0xDD */ 0x55, 0x00, 0x00,
/* 222, 0xDE */ 0x44, 0x00, 0x00,
/* 223, 0xDF */ 0x22, 0x00, 0x00,
/* 224, 0xE0 */ 0x11, 0x00, 0x00,
/* 225, 0xE1 */ 0x00, 0xEE, 0x00,
/* 226, 0xE2 */ 0x00, 0xDD, 0x00,
/* 227, 0xE3 */ 0x00, 0xBB, 0x00,
/* 228, 0xE4 */ 0x00, 0xAA, 0x00,
/* 229, 0xE5 */ 0x00, 0x88, 0x00,
/* 230, 0xE6 */ 0x00, 0x77, 0x00,
/* 231, 0xE7 */ 0x00, 0x55, 0x00,
/* 232, 0xE8 */ 0x00, 0x44, 0x00,
/* 233, 0xE9 */ 0x00, 0x22, 0x00,
/* 234, 0xEA */ 0x00, 0x11, 0x00,
/* 235, 0xEB */ 0x00, 0x00, 0xEE,
/* 236, 0xEC */ 0x00, 0x00, 0xDD,
/* 237, 0xED */ 0x00, 0x00, 0xBB,
/* 238, 0xEE */ 0x00, 0x00, 0xAA,
/* 239, 0xEF */ 0x00, 0x00, 0x88,
/* 240, 0xF0 */ 0x00, 0x00, 0x77,
/* 241, 0xF1 */ 0x00, 0x00, 0x55,
/* 242, 0xF2 */ 0x00, 0x00, 0x44,
/* 243, 0xF3 */ 0x00, 0x00, 0x22,
/* 244, 0xF4 */ 0x00, 0x00, 0x11,
/* 245, 0xF5 */ 0xEE, 0xEE, 0xEE,
/* 246, 0xF6 */ 0xDD, 0xDD, 0xDD,
/* 247, 0xF7 */ 0xBB, 0xBB, 0xBB,
/* 248, 0xF8 */ 0xAA, 0xAA, 0xAA,
/* 249, 0xF9 */ 0x88, 0x88, 0x88,
/* 250, 0xFA */ 0x77, 0x77, 0x77,
/* 251, 0xFB */ 0x55, 0x55, 0x55,
/* 252, 0xFC */ 0x44, 0x44, 0x44,
/* 253, 0xFD */ 0x22, 0x22, 0x22,
/* 254, 0xFE */ 0x11, 0x11, 0x11,
/* 255, 0xFF */ 0x00, 0x00, 0x00
};
#endif /* AVFORMAT_QTPALETTE_H */
| 123linslouis-android-video-cutter | jni/libavformat/qtpalette.h | C | asf20 | 11,082 |
/*
* AVS demuxer.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "voc.h"
typedef struct avs_format {
VocDecContext voc;
AVStream *st_video;
AVStream *st_audio;
int width;
int height;
int bits_per_sample;
int fps;
int nb_frames;
int remaining_frame_size;
int remaining_audio_size;
} AvsFormat;
typedef enum avs_block_type {
AVS_NONE = 0x00,
AVS_VIDEO = 0x01,
AVS_AUDIO = 0x02,
AVS_PALETTE = 0x03,
AVS_GAME_DATA = 0x04,
} AvsBlockType;
static int avs_probe(AVProbeData * p)
{
const uint8_t *d;
d = p->buf;
if (d[0] == 'w' && d[1] == 'W' && d[2] == 0x10 && d[3] == 0)
return 50;
return 0;
}
static int avs_read_header(AVFormatContext * s, AVFormatParameters * ap)
{
AvsFormat *avs = s->priv_data;
s->ctx_flags |= AVFMTCTX_NOHEADER;
url_fskip(s->pb, 4);
avs->width = get_le16(s->pb);
avs->height = get_le16(s->pb);
avs->bits_per_sample = get_le16(s->pb);
avs->fps = get_le16(s->pb);
avs->nb_frames = get_le32(s->pb);
avs->remaining_frame_size = 0;
avs->remaining_audio_size = 0;
avs->st_video = avs->st_audio = NULL;
if (avs->width != 318 || avs->height != 198)
av_log(s, AV_LOG_ERROR, "This avs pretend to be %dx%d "
"when the avs format is supposed to be 318x198 only.\n",
avs->width, avs->height);
return 0;
}
static int
avs_read_video_packet(AVFormatContext * s, AVPacket * pkt,
AvsBlockType type, int sub_type, int size,
uint8_t * palette, int palette_size)
{
AvsFormat *avs = s->priv_data;
int ret;
ret = av_new_packet(pkt, size + palette_size);
if (ret < 0)
return ret;
if (palette_size) {
pkt->data[0] = 0x00;
pkt->data[1] = 0x03;
pkt->data[2] = palette_size & 0xFF;
pkt->data[3] = (palette_size >> 8) & 0xFF;
memcpy(pkt->data + 4, palette, palette_size - 4);
}
pkt->data[palette_size + 0] = sub_type;
pkt->data[palette_size + 1] = type;
pkt->data[palette_size + 2] = size & 0xFF;
pkt->data[palette_size + 3] = (size >> 8) & 0xFF;
ret = get_buffer(s->pb, pkt->data + palette_size + 4, size - 4) + 4;
if (ret < size) {
av_free_packet(pkt);
return AVERROR(EIO);
}
pkt->size = ret + palette_size;
pkt->stream_index = avs->st_video->index;
if (sub_type == 0)
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
}
static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt)
{
AvsFormat *avs = s->priv_data;
int ret, size;
size = url_ftell(s->pb);
ret = voc_get_packet(s, pkt, avs->st_audio, avs->remaining_audio_size);
size = url_ftell(s->pb) - size;
avs->remaining_audio_size -= size;
if (ret == AVERROR(EIO))
return 0; /* this indicate EOS */
if (ret < 0)
return ret;
pkt->stream_index = avs->st_audio->index;
pkt->flags |= AV_PKT_FLAG_KEY;
return size;
}
static int avs_read_packet(AVFormatContext * s, AVPacket * pkt)
{
AvsFormat *avs = s->priv_data;
int sub_type = 0, size = 0;
AvsBlockType type = AVS_NONE;
int palette_size = 0;
uint8_t palette[4 + 3 * 256];
int ret;
if (avs->remaining_audio_size > 0)
if (avs_read_audio_packet(s, pkt) > 0)
return 0;
while (1) {
if (avs->remaining_frame_size <= 0) {
if (!get_le16(s->pb)) /* found EOF */
return AVERROR(EIO);
avs->remaining_frame_size = get_le16(s->pb) - 4;
}
while (avs->remaining_frame_size > 0) {
sub_type = get_byte(s->pb);
type = get_byte(s->pb);
size = get_le16(s->pb);
avs->remaining_frame_size -= size;
switch (type) {
case AVS_PALETTE:
ret = get_buffer(s->pb, palette, size - 4);
if (ret < size - 4)
return AVERROR(EIO);
palette_size = size;
break;
case AVS_VIDEO:
if (!avs->st_video) {
avs->st_video = av_new_stream(s, AVS_VIDEO);
if (avs->st_video == NULL)
return AVERROR(ENOMEM);
avs->st_video->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avs->st_video->codec->codec_id = CODEC_ID_AVS;
avs->st_video->codec->width = avs->width;
avs->st_video->codec->height = avs->height;
avs->st_video->codec->bits_per_coded_sample=avs->bits_per_sample;
avs->st_video->nb_frames = avs->nb_frames;
avs->st_video->codec->time_base = (AVRational) {
1, avs->fps};
}
return avs_read_video_packet(s, pkt, type, sub_type, size,
palette, palette_size);
case AVS_AUDIO:
if (!avs->st_audio) {
avs->st_audio = av_new_stream(s, AVS_AUDIO);
if (avs->st_audio == NULL)
return AVERROR(ENOMEM);
avs->st_audio->codec->codec_type = AVMEDIA_TYPE_AUDIO;
}
avs->remaining_audio_size = size - 4;
size = avs_read_audio_packet(s, pkt);
if (size != 0)
return size;
break;
default:
url_fskip(s->pb, size - 4);
}
}
}
}
static int avs_read_close(AVFormatContext * s)
{
return 0;
}
AVInputFormat avs_demuxer = {
"avs",
NULL_IF_CONFIG_SMALL("AVS format"),
sizeof(AvsFormat),
avs_probe,
avs_read_header,
avs_read_packet,
avs_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/avs.c | C | asf20 | 6,633 |
/*
* RAW muxer and demuxer
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2005 Alex Beregszaszi
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/crc.h"
#include "libavcodec/ac3_parser.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "raw.h"
#include "id3v2.h"
#include "id3v1.h"
/* simple formats */
#if CONFIG_ROQ_MUXER
static int roq_write_header(struct AVFormatContext *s)
{
static const uint8_t header[] = {
0x84, 0x10, 0xFF, 0xFF, 0xFF, 0xFF, 0x1E, 0x00
};
put_buffer(s->pb, header, 8);
put_flush_packet(s->pb);
return 0;
}
#endif
#if CONFIG_NULL_MUXER
static int null_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
return 0;
}
#endif
#if CONFIG_MUXERS
static int raw_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
put_buffer(s->pb, pkt->data, pkt->size);
put_flush_packet(s->pb);
return 0;
}
#endif
#if CONFIG_DEMUXERS
/* raw input */
static int raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
AVStream *st;
enum CodecID id;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
id = s->iformat->value;
if (id == CODEC_ID_RAWVIDEO) {
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
} else {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
}
st->codec->codec_id = id;
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
st->codec->sample_rate = ap->sample_rate;
if(ap->channels) st->codec->channels = ap->channels;
else st->codec->channels = 1;
st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
assert(st->codec->bits_per_coded_sample > 0);
st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
break;
case AVMEDIA_TYPE_VIDEO:
if(ap->time_base.num)
av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
else
av_set_pts_info(st, 64, 1, 25);
st->codec->width = ap->width;
st->codec->height = ap->height;
st->codec->pix_fmt = ap->pix_fmt;
if(st->codec->pix_fmt == PIX_FMT_NONE)
st->codec->pix_fmt= PIX_FMT_YUV420P;
break;
default:
return -1;
}
return 0;
}
#define RAW_PACKET_SIZE 1024
#define RAW_SAMPLES 1024
static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size, bps;
// AVStream *st = s->streams[0];
size= RAW_SAMPLES*s->streams[0]->codec->block_align;
ret= av_get_packet(s->pb, pkt, size);
pkt->stream_index = 0;
if (ret < 0)
return ret;
bps= av_get_bits_per_sample(s->streams[0]->codec->codec_id);
assert(bps); // if false there IS a bug elsewhere (NOT in this function)
pkt->dts=
pkt->pts= pkt->pos*8 / (bps * s->streams[0]->codec->channels);
return ret;
}
int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size;
size = RAW_PACKET_SIZE;
if (av_new_packet(pkt, size) < 0)
return AVERROR(ENOMEM);
pkt->pos= url_ftell(s->pb);
pkt->stream_index = 0;
ret = get_partial_buffer(s->pb, pkt->data, size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
pkt->size = ret;
return ret;
}
#endif
#if CONFIG_RAWVIDEO_DEMUXER
static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int packet_size, ret, width, height;
AVStream *st = s->streams[0];
width = st->codec->width;
height = st->codec->height;
packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
if (packet_size < 0)
return -1;
ret= av_get_packet(s->pb, pkt, packet_size);
pkt->pts=
pkt->dts= pkt->pos / packet_size;
pkt->stream_index = 0;
if (ret < 0)
return ret;
return 0;
}
#endif
#if CONFIG_INGENIENT_DEMUXER
// http://www.artificis.hu/files/texts/ingenient.txt
static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size, w, h, unk1, unk2;
if (get_le32(s->pb) != MKTAG('M', 'J', 'P', 'G'))
return AVERROR(EIO); // FIXME
size = get_le32(s->pb);
w = get_le16(s->pb);
h = get_le16(s->pb);
url_fskip(s->pb, 8); // zero + size (padded?)
url_fskip(s->pb, 2);
unk1 = get_le16(s->pb);
unk2 = get_le16(s->pb);
url_fskip(s->pb, 22); // ASCII timestamp
av_log(s, AV_LOG_DEBUG, "Ingenient packet: size=%d, width=%d, height=%d, unk1=%d unk2=%d\n",
size, w, h, unk1, unk2);
if (av_new_packet(pkt, size) < 0)
return AVERROR(ENOMEM);
pkt->pos = url_ftell(s->pb);
pkt->stream_index = 0;
ret = get_buffer(s->pb, pkt->data, size);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
pkt->size = ret;
return ret;
}
#endif
#if CONFIG_DEMUXERS
int pcm_read_seek(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
AVStream *st;
int block_align, byte_rate;
int64_t pos, ret;
st = s->streams[0];
block_align = st->codec->block_align ? st->codec->block_align :
(av_get_bits_per_sample(st->codec->codec_id) * st->codec->channels) >> 3;
byte_rate = st->codec->bit_rate ? st->codec->bit_rate >> 3 :
block_align * st->codec->sample_rate;
if (block_align <= 0 || byte_rate <= 0)
return -1;
if (timestamp < 0) timestamp = 0;
/* compute the position by aligning it to block_align */
pos = av_rescale_rnd(timestamp * byte_rate,
st->time_base.num,
st->time_base.den * (int64_t)block_align,
(flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP);
pos *= block_align;
/* recompute exact position */
st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
if ((ret = url_fseek(s->pb, pos + s->data_offset, SEEK_SET)) < 0)
return ret;
return 0;
}
static int audio_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
AVStream *st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->value;
st->need_parsing = AVSTREAM_PARSE_FULL;
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
/* MPEG-1/H.263 input */
static int video_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
AVStream *st;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = s->iformat->value;
st->need_parsing = AVSTREAM_PARSE_FULL;
/* for MJPEG, specify frame rate */
/* for MPEG-4 specify it, too (most MPEG-4 streams do not have the fixed_vop_rate set ...)*/
if (ap->time_base.num) {
st->codec->time_base= ap->time_base;
} else if ( st->codec->codec_id == CODEC_ID_MJPEG ||
st->codec->codec_id == CODEC_ID_MPEG4 ||
st->codec->codec_id == CODEC_ID_DIRAC ||
st->codec->codec_id == CODEC_ID_DNXHD ||
st->codec->codec_id == CODEC_ID_H264) {
st->codec->time_base= (AVRational){1,25};
}
av_set_pts_info(st, 64, 1, 1200000);
return 0;
}
#endif
#if CONFIG_MPEGVIDEO_DEMUXER
#define SEQ_START_CODE 0x000001b3
#define GOP_START_CODE 0x000001b8
#define PICTURE_START_CODE 0x00000100
#define SLICE_START_CODE 0x00000101
#define PACK_START_CODE 0x000001ba
#define VIDEO_ID 0x000001e0
#define AUDIO_ID 0x000001c0
static int mpegvideo_probe(AVProbeData *p)
{
uint32_t code= -1;
int pic=0, seq=0, slice=0, pspack=0, pes=0;
int i;
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
switch(code){
case SEQ_START_CODE: seq++; break;
case PICTURE_START_CODE: pic++; break;
case SLICE_START_CODE: slice++; break;
case PACK_START_CODE: pspack++; break;
}
if ((code & 0x1f0) == VIDEO_ID) pes++;
else if((code & 0x1e0) == AUDIO_ID) pes++;
}
}
if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !pes)
return pic>1 ? AVPROBE_SCORE_MAX/2+1 : AVPROBE_SCORE_MAX/4; // +1 for .mpg
return 0;
}
#endif
#if CONFIG_CAVSVIDEO_DEMUXER
#define CAVS_SEQ_START_CODE 0x000001b0
#define CAVS_PIC_I_START_CODE 0x000001b3
#define CAVS_UNDEF_START_CODE 0x000001b4
#define CAVS_PIC_PB_START_CODE 0x000001b6
#define CAVS_VIDEO_EDIT_CODE 0x000001b7
#define CAVS_PROFILE_JIZHUN 0x20
static int cavsvideo_probe(AVProbeData *p)
{
uint32_t code= -1;
int pic=0, seq=0, slice_pos = 0;
int i;
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
if(code < CAVS_SEQ_START_CODE) {
/* slices have to be consecutive */
if(code < slice_pos)
return 0;
slice_pos = code;
} else {
slice_pos = 0;
}
if (code == CAVS_SEQ_START_CODE) {
seq++;
/* check for the only currently supported profile */
if(p->buf[i+1] != CAVS_PROFILE_JIZHUN)
return 0;
} else if ((code == CAVS_PIC_I_START_CODE) ||
(code == CAVS_PIC_PB_START_CODE)) {
pic++;
} else if ((code == CAVS_UNDEF_START_CODE) ||
(code > CAVS_VIDEO_EDIT_CODE)) {
return 0;
}
}
}
if(seq && seq*9<=pic*10)
return AVPROBE_SCORE_MAX/2;
return 0;
}
#endif
#if CONFIG_M4V_DEMUXER
#define VISUAL_OBJECT_START_CODE 0x000001b5
#define VOP_START_CODE 0x000001b6
static int mpeg4video_probe(AVProbeData *probe_packet)
{
uint32_t temp_buffer= -1;
int VO=0, VOL=0, VOP = 0, VISO = 0, res=0;
int i;
for(i=0; i<probe_packet->buf_size; i++){
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
if ((temp_buffer & 0xffffff00) != 0x100)
continue;
if (temp_buffer == VOP_START_CODE) VOP++;
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
else if (temp_buffer < 0x120) VO++;
else if (temp_buffer < 0x130) VOL++;
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
}
if ( VOP >= VISO && VOP >= VOL && VO >= VOL && VOL > 0 && res==0)
return AVPROBE_SCORE_MAX/2;
return 0;
}
#endif
#if CONFIG_H264_DEMUXER
static int h264_probe(AVProbeData *p)
{
uint32_t code= -1;
int sps=0, pps=0, idr=0, res=0, sli=0;
int i;
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
int ref_idc= (code>>5)&3;
int type = code & 0x1F;
static const int8_t ref_zero[32]={
2, 0, 0, 0, 0,-1, 1,-1,
-1, 1, 1, 1, 1,-1, 2, 2,
2, 2, 2, 0, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2
};
if(code & 0x80) //forbidden bit
return 0;
if(ref_zero[type] == 1 && ref_idc)
return 0;
if(ref_zero[type] ==-1 && !ref_idc)
return 0;
if(ref_zero[type] == 2)
res++;
switch(type){
case 1: sli++; break;
case 5: idr++; break;
case 7:
if(p->buf[i+2]&0x0F)
return 0;
sps++;
break;
case 8: pps++; break;
}
}
}
if(sps && pps && (idr||sli>3) && res<(sps+pps+idr))
return AVPROBE_SCORE_MAX/2+1; // +1 for .mpg
return 0;
}
#endif
#if CONFIG_H263_DEMUXER
static int h263_probe(AVProbeData *p)
{
uint64_t code= -1;
int i;
int valid_psc=0;
int invalid_psc=0;
int res_change=0;
int src_fmt, last_src_fmt=-1;
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xfffffc0000) == 0x800000) {
src_fmt= (code>>2)&3;
if( src_fmt != last_src_fmt
&& last_src_fmt>0 && last_src_fmt<6
&& src_fmt<6)
res_change++;
if((code&0x300)==0x200 && src_fmt){
valid_psc++;
}else
invalid_psc++;
last_src_fmt= src_fmt;
}
}
//av_log(NULL, AV_LOG_ERROR, "h263_probe: psc:%d invalid:%d res_change:%d\n", valid_psc, invalid_psc, res_change);
//h263_probe: psc:3 invalid:0 res_change:0 (1588/recent_ffmpeg_parses_mpg_incorrectly.mpg)
if(valid_psc > 2*invalid_psc + 2*res_change + 3){
return 50;
}else if(valid_psc > 2*invalid_psc)
return 25;
return 0;
}
#endif
#if CONFIG_H261_DEMUXER
static int h261_probe(AVProbeData *p)
{
uint32_t code= -1;
int i;
int valid_psc=0;
int invalid_psc=0;
int next_gn=0;
int src_fmt=0;
GetBitContext gb;
init_get_bits(&gb, p->buf, p->buf_size*8);
for(i=0; i<p->buf_size*8; i++){
if ((code & 0x01ff0000) || !(code & 0xff00)) {
code = (code<<8) + get_bits(&gb, 8);
i += 7;
} else
code = (code<<1) + get_bits1(&gb);
if ((code & 0xffff0000) == 0x10000) {
int gn= (code>>12)&0xf;
if(!gn)
src_fmt= code&8;
if(gn != next_gn) invalid_psc++;
else valid_psc++;
if(src_fmt){ // CIF
next_gn= (gn+1 )%13;
}else{ //QCIF
next_gn= (gn+1+!!gn)% 7;
}
}
}
if(valid_psc > 2*invalid_psc + 6){
return 50;
}else if(valid_psc > 2*invalid_psc + 2)
return 25;
return 0;
}
#endif
#if CONFIG_DTS_DEMUXER
#define DCA_MARKER_14B_BE 0x1FFFE800
#define DCA_MARKER_14B_LE 0xFF1F00E8
#define DCA_MARKER_RAW_BE 0x7FFE8001
#define DCA_MARKER_RAW_LE 0xFE7F0180
static int dts_probe(AVProbeData *p)
{
const uint8_t *buf, *bufp;
uint32_t state = -1;
int markers[3] = {0};
int sum, max;
buf = p->buf;
for(; buf < (p->buf+p->buf_size)-2; buf+=2) {
bufp = buf;
state = (state << 16) | bytestream_get_be16(&bufp);
/* regular bitstream */
if (state == DCA_MARKER_RAW_BE || state == DCA_MARKER_RAW_LE)
markers[0]++;
/* 14 bits big-endian bitstream */
if (state == DCA_MARKER_14B_BE)
if ((bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0)
markers[1]++;
/* 14 bits little-endian bitstream */
if (state == DCA_MARKER_14B_LE)
if ((bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007)
markers[2]++;
}
sum = markers[0] + markers[1] + markers[2];
max = markers[1] > markers[0];
max = markers[2] > markers[max] ? 2 : max;
if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 &&
markers[max] * 4 > sum * 3)
return AVPROBE_SCORE_MAX/2+1;
return 0;
}
#endif
#if CONFIG_DIRAC_DEMUXER
static int dirac_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('B', 'B', 'C', 'D'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
#endif
#if CONFIG_DNXHD_DEMUXER
static int dnxhd_probe(AVProbeData *p)
{
static const uint8_t header[] = {0x00,0x00,0x02,0x80,0x01};
int w, h, compression_id;
if (p->buf_size < 0x2c)
return 0;
if (memcmp(p->buf, header, 5))
return 0;
h = AV_RB16(p->buf + 0x18);
w = AV_RB16(p->buf + 0x1a);
if (!w || !h)
return 0;
compression_id = AV_RB32(p->buf + 0x28);
if (compression_id < 1237 || compression_id > 1253)
return 0;
return AVPROBE_SCORE_MAX;
}
#endif
#if CONFIG_AC3_DEMUXER || CONFIG_EAC3_DEMUXER
static int ac3_eac3_probe(AVProbeData *p, enum CodecID expected_codec_id)
{
int max_frames, first_frames = 0, frames;
uint8_t *buf, *buf2, *end;
AC3HeaderInfo hdr;
GetBitContext gbc;
enum CodecID codec_id = CODEC_ID_AC3;
max_frames = 0;
buf = p->buf;
end = buf + p->buf_size;
for(; buf < end; buf++) {
buf2 = buf;
for(frames = 0; buf2 < end; frames++) {
init_get_bits(&gbc, buf2, 54);
if(ff_ac3_parse_header(&gbc, &hdr) < 0)
break;
if(buf2 + hdr.frame_size > end ||
av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, buf2 + 2, hdr.frame_size - 2))
break;
if (hdr.bitstream_id > 10)
codec_id = CODEC_ID_EAC3;
buf2 += hdr.frame_size;
}
max_frames = FFMAX(max_frames, frames);
if(buf == p->buf)
first_frames = frames;
}
if(codec_id != expected_codec_id) return 0;
// keep this in sync with mp3 probe, both need to avoid
// issues with MPEG-files!
if (first_frames>=4) return AVPROBE_SCORE_MAX/2+1;
else if(max_frames>500)return AVPROBE_SCORE_MAX/2;
else if(max_frames>=4) return AVPROBE_SCORE_MAX/4;
else if(max_frames>=1) return 1;
else return 0;
}
#endif
#if CONFIG_AC3_DEMUXER
static int ac3_probe(AVProbeData *p)
{
return ac3_eac3_probe(p, CODEC_ID_AC3);
}
#endif
#if CONFIG_EAC3_DEMUXER
static int eac3_probe(AVProbeData *p)
{
return ac3_eac3_probe(p, CODEC_ID_EAC3);
}
#endif
#if CONFIG_AAC_DEMUXER
static int adts_aac_probe(AVProbeData *p)
{
int max_frames = 0, first_frames = 0;
int fsize, frames;
uint8_t *buf0 = p->buf;
uint8_t *buf2;
uint8_t *buf;
uint8_t *end = buf0 + p->buf_size - 7;
if (ff_id3v2_match(buf0)) {
buf0 += ff_id3v2_tag_len(buf0);
}
buf = buf0;
for(; buf < end; buf= buf2+1) {
buf2 = buf;
for(frames = 0; buf2 < end; frames++) {
uint32_t header = AV_RB16(buf2);
if((header&0xFFF6) != 0xFFF0)
break;
fsize = (AV_RB32(buf2+3)>>13) & 0x8FFF;
if(fsize < 7)
break;
buf2 += fsize;
}
max_frames = FFMAX(max_frames, frames);
if(buf == buf0)
first_frames= frames;
}
if (first_frames>=3) return AVPROBE_SCORE_MAX/2+1;
else if(max_frames>500)return AVPROBE_SCORE_MAX/2;
else if(max_frames>=3) return AVPROBE_SCORE_MAX/4;
else if(max_frames>=1) return 1;
else return 0;
}
static int adts_aac_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
AVStream *st;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->iformat->value;
st->need_parsing = AVSTREAM_PARSE_FULL;
ff_id3v1_read(s);
ff_id3v2_read(s);
return 0;
}
#endif
/* Note: Do not forget to add new entries to the Makefile as well. */
#if CONFIG_AAC_DEMUXER
AVInputFormat aac_demuxer = {
"aac",
NULL_IF_CONFIG_SMALL("raw ADTS AAC"),
0,
adts_aac_probe,
adts_aac_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "aac",
.value = CODEC_ID_AAC,
};
#endif
#if CONFIG_AC3_DEMUXER
AVInputFormat ac3_demuxer = {
"ac3",
NULL_IF_CONFIG_SMALL("raw AC-3"),
0,
ac3_probe,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "ac3",
.value = CODEC_ID_AC3,
};
#endif
#if CONFIG_AC3_MUXER
AVOutputFormat ac3_muxer = {
"ac3",
NULL_IF_CONFIG_SMALL("raw AC-3"),
"audio/x-ac3",
"ac3",
0,
CODEC_ID_AC3,
CODEC_ID_NONE,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_DIRAC_DEMUXER
AVInputFormat dirac_demuxer = {
"dirac",
NULL_IF_CONFIG_SMALL("raw Dirac"),
0,
dirac_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_DIRAC,
};
#endif
#if CONFIG_DIRAC_MUXER
AVOutputFormat dirac_muxer = {
"dirac",
NULL_IF_CONFIG_SMALL("raw Dirac"),
NULL,
"drc",
0,
CODEC_ID_NONE,
CODEC_ID_DIRAC,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_DNXHD_DEMUXER
AVInputFormat dnxhd_demuxer = {
"dnxhd",
NULL_IF_CONFIG_SMALL("raw DNxHD (SMPTE VC-3)"),
0,
dnxhd_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_DNXHD,
};
#endif
#if CONFIG_DNXHD_MUXER
AVOutputFormat dnxhd_muxer = {
"dnxhd",
NULL_IF_CONFIG_SMALL("raw DNxHD (SMPTE VC-3)"),
NULL,
"dnxhd",
0,
CODEC_ID_NONE,
CODEC_ID_DNXHD,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_DTS_DEMUXER
AVInputFormat dts_demuxer = {
"dts",
NULL_IF_CONFIG_SMALL("raw DTS"),
0,
dts_probe,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "dts",
.value = CODEC_ID_DTS,
};
#endif
#if CONFIG_DTS_MUXER
AVOutputFormat dts_muxer = {
"dts",
NULL_IF_CONFIG_SMALL("raw DTS"),
"audio/x-dca",
"dts",
0,
CODEC_ID_DTS,
CODEC_ID_NONE,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_EAC3_DEMUXER
AVInputFormat eac3_demuxer = {
"eac3",
NULL_IF_CONFIG_SMALL("raw E-AC-3"),
0,
eac3_probe,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "eac3",
.value = CODEC_ID_EAC3,
};
#endif
#if CONFIG_EAC3_MUXER
AVOutputFormat eac3_muxer = {
"eac3",
NULL_IF_CONFIG_SMALL("raw E-AC-3"),
"audio/x-eac3",
"eac3",
0,
CODEC_ID_EAC3,
CODEC_ID_NONE,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_GSM_DEMUXER
AVInputFormat gsm_demuxer = {
"gsm",
NULL_IF_CONFIG_SMALL("raw GSM"),
0,
NULL,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "gsm",
.value = CODEC_ID_GSM,
};
#endif
#if CONFIG_H261_DEMUXER
AVInputFormat h261_demuxer = {
"h261",
NULL_IF_CONFIG_SMALL("raw H.261"),
0,
h261_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "h261",
.value = CODEC_ID_H261,
};
#endif
#if CONFIG_H261_MUXER
AVOutputFormat h261_muxer = {
"h261",
NULL_IF_CONFIG_SMALL("raw H.261"),
"video/x-h261",
"h261",
0,
CODEC_ID_NONE,
CODEC_ID_H261,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_H263_DEMUXER
AVInputFormat h263_demuxer = {
"h263",
NULL_IF_CONFIG_SMALL("raw H.263"),
0,
h263_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
// .extensions = "h263", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_H263,
};
#endif
#if CONFIG_H263_MUXER
AVOutputFormat h263_muxer = {
"h263",
NULL_IF_CONFIG_SMALL("raw H.263"),
"video/x-h263",
"h263",
0,
CODEC_ID_NONE,
CODEC_ID_H263,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_H264_DEMUXER
AVInputFormat h264_demuxer = {
"h264",
NULL_IF_CONFIG_SMALL("raw H.264 video format"),
0,
h264_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "h26l,h264,264", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_H264,
};
#endif
#if CONFIG_H264_MUXER
AVOutputFormat h264_muxer = {
"h264",
NULL_IF_CONFIG_SMALL("raw H.264 video format"),
NULL,
"h264",
0,
CODEC_ID_NONE,
CODEC_ID_H264,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_INGENIENT_DEMUXER
AVInputFormat ingenient_demuxer = {
"ingenient",
NULL_IF_CONFIG_SMALL("raw Ingenient MJPEG"),
0,
NULL,
video_read_header,
ingenient_read_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "cgi", // FIXME
.value = CODEC_ID_MJPEG,
};
#endif
#if CONFIG_M4V_DEMUXER
AVInputFormat m4v_demuxer = {
"m4v",
NULL_IF_CONFIG_SMALL("raw MPEG-4 video format"),
0,
mpeg4video_probe, /** probing for MPEG-4 data */
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "m4v",
.value = CODEC_ID_MPEG4,
};
#endif
#if CONFIG_M4V_MUXER
AVOutputFormat m4v_muxer = {
"m4v",
NULL_IF_CONFIG_SMALL("raw MPEG-4 video format"),
NULL,
"m4v",
0,
CODEC_ID_NONE,
CODEC_ID_MPEG4,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MJPEG_DEMUXER
AVInputFormat mjpeg_demuxer = {
"mjpeg",
NULL_IF_CONFIG_SMALL("raw MJPEG video"),
0,
NULL,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "mjpg,mjpeg",
.value = CODEC_ID_MJPEG,
};
#endif
#if CONFIG_MJPEG_MUXER
AVOutputFormat mjpeg_muxer = {
"mjpeg",
NULL_IF_CONFIG_SMALL("raw MJPEG video"),
"video/x-mjpeg",
"mjpg,mjpeg",
0,
CODEC_ID_NONE,
CODEC_ID_MJPEG,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MLP_DEMUXER
AVInputFormat mlp_demuxer = {
"mlp",
NULL_IF_CONFIG_SMALL("raw MLP"),
0,
NULL,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "mlp",
.value = CODEC_ID_MLP,
};
#endif
#if CONFIG_MLP_MUXER
AVOutputFormat mlp_muxer = {
"mlp",
NULL_IF_CONFIG_SMALL("raw MLP"),
NULL,
"mlp",
0,
CODEC_ID_MLP,
CODEC_ID_NONE,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_TRUEHD_DEMUXER
AVInputFormat truehd_demuxer = {
"truehd",
NULL_IF_CONFIG_SMALL("raw TrueHD"),
0,
NULL,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "thd",
.value = CODEC_ID_TRUEHD,
};
#endif
#if CONFIG_TRUEHD_MUXER
AVOutputFormat truehd_muxer = {
"truehd",
NULL_IF_CONFIG_SMALL("raw TrueHD"),
NULL,
"thd",
0,
CODEC_ID_TRUEHD,
CODEC_ID_NONE,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MPEG1VIDEO_MUXER
AVOutputFormat mpeg1video_muxer = {
"mpeg1video",
NULL_IF_CONFIG_SMALL("raw MPEG-1 video"),
"video/x-mpeg",
"mpg,mpeg,m1v",
0,
CODEC_ID_NONE,
CODEC_ID_MPEG1VIDEO,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MPEG2VIDEO_MUXER
AVOutputFormat mpeg2video_muxer = {
"mpeg2video",
NULL_IF_CONFIG_SMALL("raw MPEG-2 video"),
NULL,
"m2v",
0,
CODEC_ID_NONE,
CODEC_ID_MPEG2VIDEO,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_MPEGVIDEO_DEMUXER
AVInputFormat mpegvideo_demuxer = {
"mpegvideo",
NULL_IF_CONFIG_SMALL("raw MPEG video"),
0,
mpegvideo_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_MPEG1VIDEO,
};
#endif
#if CONFIG_CAVSVIDEO_DEMUXER
AVInputFormat cavsvideo_demuxer = {
"cavsvideo",
NULL_IF_CONFIG_SMALL("raw Chinese AVS video"),
0,
cavsvideo_probe,
video_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_CAVS,
};
#endif
#if CONFIG_NULL_MUXER
AVOutputFormat null_muxer = {
"null",
NULL_IF_CONFIG_SMALL("raw null video format"),
NULL,
NULL,
0,
#if HAVE_BIGENDIAN
CODEC_ID_PCM_S16BE,
#else
CODEC_ID_PCM_S16LE,
#endif
CODEC_ID_RAWVIDEO,
NULL,
null_write_packet,
.flags = AVFMT_NOFILE | AVFMT_RAWPICTURE | AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_RAWVIDEO_DEMUXER
AVInputFormat rawvideo_demuxer = {
"rawvideo",
NULL_IF_CONFIG_SMALL("raw video format"),
0,
NULL,
raw_read_header,
rawvideo_read_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "yuv,cif,qcif,rgb",
.value = CODEC_ID_RAWVIDEO,
};
#endif
#if CONFIG_RAWVIDEO_MUXER
AVOutputFormat rawvideo_muxer = {
"rawvideo",
NULL_IF_CONFIG_SMALL("raw video format"),
NULL,
"yuv,rgb",
0,
CODEC_ID_NONE,
CODEC_ID_RAWVIDEO,
NULL,
raw_write_packet,
.flags= AVFMT_NOTIMESTAMPS,
};
#endif
#if CONFIG_ROQ_MUXER
AVOutputFormat roq_muxer =
{
"RoQ",
NULL_IF_CONFIG_SMALL("raw id RoQ format"),
NULL,
"roq",
0,
CODEC_ID_ROQ_DPCM,
CODEC_ID_ROQ,
roq_write_header,
raw_write_packet,
};
#endif
#if CONFIG_SHORTEN_DEMUXER
AVInputFormat shorten_demuxer = {
"shn",
NULL_IF_CONFIG_SMALL("raw Shorten"),
0,
NULL,
audio_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "shn",
.value = CODEC_ID_SHORTEN,
};
#endif
#if CONFIG_VC1_DEMUXER
AVInputFormat vc1_demuxer = {
"vc1",
NULL_IF_CONFIG_SMALL("raw VC-1"),
0,
NULL /* vc1_probe */,
video_read_header,
ff_raw_read_partial_packet,
.extensions = "vc1",
.value = CODEC_ID_VC1,
};
#endif
/* PCM formats */
#define PCMINPUTDEF(name, long_name, ext, codec) \
AVInputFormat pcm_ ## name ## _demuxer = {\
#name,\
NULL_IF_CONFIG_SMALL(long_name),\
0,\
NULL,\
raw_read_header,\
raw_read_packet,\
NULL,\
pcm_read_seek,\
.flags= AVFMT_GENERIC_INDEX,\
.extensions = ext,\
.value = codec,\
};
#define PCMOUTPUTDEF(name, long_name, ext, codec) \
AVOutputFormat pcm_ ## name ## _muxer = {\
#name,\
NULL_IF_CONFIG_SMALL(long_name),\
NULL,\
ext,\
0,\
codec,\
CODEC_ID_NONE,\
NULL,\
raw_write_packet,\
.flags= AVFMT_NOTIMESTAMPS,\
};
#if !CONFIG_MUXERS && CONFIG_DEMUXERS
#define PCMDEF(name, long_name, ext, codec) \
PCMINPUTDEF(name, long_name, ext, codec)
#elif CONFIG_MUXERS && !CONFIG_DEMUXERS
#define PCMDEF(name, long_name, ext, codec) \
PCMOUTPUTDEF(name, long_name, ext, codec)
#elif CONFIG_MUXERS && CONFIG_DEMUXERS
#define PCMDEF(name, long_name, ext, codec) \
PCMINPUTDEF(name, long_name, ext, codec)\
PCMOUTPUTDEF(name, long_name, ext, codec)
#else
#define PCMDEF(name, long_name, ext, codec)
#endif
#if HAVE_BIGENDIAN
#define BE_DEF(s) s
#define LE_DEF(s) NULL
#else
#define BE_DEF(s) NULL
#define LE_DEF(s) s
#endif
PCMDEF(f64be, "PCM 64 bit floating-point big-endian format",
NULL, CODEC_ID_PCM_F64BE)
PCMDEF(f64le, "PCM 64 bit floating-point little-endian format",
NULL, CODEC_ID_PCM_F64LE)
PCMDEF(f32be, "PCM 32 bit floating-point big-endian format",
NULL, CODEC_ID_PCM_F32BE)
PCMDEF(f32le, "PCM 32 bit floating-point little-endian format",
NULL, CODEC_ID_PCM_F32LE)
PCMDEF(s32be, "PCM signed 32 bit big-endian format",
NULL, CODEC_ID_PCM_S32BE)
PCMDEF(s32le, "PCM signed 32 bit little-endian format",
NULL, CODEC_ID_PCM_S32LE)
PCMDEF(s24be, "PCM signed 24 bit big-endian format",
NULL, CODEC_ID_PCM_S24BE)
PCMDEF(s24le, "PCM signed 24 bit little-endian format",
NULL, CODEC_ID_PCM_S24LE)
PCMDEF(s16be, "PCM signed 16 bit big-endian format",
BE_DEF("sw"), CODEC_ID_PCM_S16BE)
PCMDEF(s16le, "PCM signed 16 bit little-endian format",
LE_DEF("sw"), CODEC_ID_PCM_S16LE)
PCMDEF(s8, "PCM signed 8 bit format",
"sb", CODEC_ID_PCM_S8)
PCMDEF(u32be, "PCM unsigned 32 bit big-endian format",
NULL, CODEC_ID_PCM_U32BE)
PCMDEF(u32le, "PCM unsigned 32 bit little-endian format",
NULL, CODEC_ID_PCM_U32LE)
PCMDEF(u24be, "PCM unsigned 24 bit big-endian format",
NULL, CODEC_ID_PCM_U24BE)
PCMDEF(u24le, "PCM unsigned 24 bit little-endian format",
NULL, CODEC_ID_PCM_U24LE)
PCMDEF(u16be, "PCM unsigned 16 bit big-endian format",
BE_DEF("uw"), CODEC_ID_PCM_U16BE)
PCMDEF(u16le, "PCM unsigned 16 bit little-endian format",
LE_DEF("uw"), CODEC_ID_PCM_U16LE)
PCMDEF(u8, "PCM unsigned 8 bit format",
"ub", CODEC_ID_PCM_U8)
PCMDEF(alaw, "PCM A-law format",
"al", CODEC_ID_PCM_ALAW)
PCMDEF(mulaw, "PCM mu-law format",
"ul", CODEC_ID_PCM_MULAW)
| 123linslouis-android-video-cutter | jni/libavformat/raw.c | C | asf20 | 33,602 |
/*
* FFM (ffserver live feed) common header
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_FFM_H
#define AVFORMAT_FFM_H
#include <stdint.h>
#include "avformat.h"
#include "avio.h"
/* The FFM file is made of blocks of fixed size */
#define FFM_HEADER_SIZE 14
#define PACKET_ID 0x666d
/* each packet contains frames (which can span several packets */
#define FRAME_HEADER_SIZE 16
#define FLAG_KEY_FRAME 0x01
#define FLAG_DTS 0x02
enum {
READ_HEADER,
READ_DATA,
};
typedef struct FFMContext {
/* only reading mode */
int64_t write_index, file_size;
int read_state;
uint8_t header[FRAME_HEADER_SIZE+4];
/* read and write */
int first_packet; /* true if first packet, needed to set the discontinuity tag */
int packet_size;
int frame_offset;
int64_t dts;
uint8_t *packet_ptr, *packet_end;
uint8_t packet[FFM_PACKET_SIZE];
} FFMContext;
#endif /* AVFORMAT_FFM_H */
| 123linslouis-android-video-cutter | jni/libavformat/ffm.h | C | asf20 | 1,718 |
/*
* HTTP authentication
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_HTTPAUTH_H
#define AVFORMAT_HTTPAUTH_H
/**
* Authentication types, ordered from weakest to strongest.
*/
typedef enum HTTPAuthType {
HTTP_AUTH_NONE = 0, /**< No authentication specified */
HTTP_AUTH_BASIC, /**< HTTP 1.0 Basic auth from RFC 1945
* (also in RFC 2617) */
HTTP_AUTH_DIGEST, /**< HTTP 1.1 Digest auth from RFC 2617 */
} HTTPAuthType;
typedef struct {
char nonce[300]; /**< Server specified nonce */
char algorithm[10]; /**< Server specified digest algorithm */
char qop[30]; /**< Quality of protection, containing the one
* that we've chosen to use, from the
* alternatives that the server offered. */
char opaque[300]; /**< A server-specified string that should be
* included in authentication responses, not
* included in the actual digest calculation. */
int nc; /**< Nonce count, the number of earlier replies
* where this particular nonce has been used. */
} DigestParams;
/**
* HTTP Authentication state structure. Must be zero-initialized
* before used with the functions below.
*/
typedef struct {
/**
* The currently chosen auth type.
*/
HTTPAuthType auth_type;
/**
* Authentication realm
*/
char realm[200];
/**
* The parameters specifiec to digest authentication.
*/
DigestParams digest_params;
} HTTPAuthState;
void ff_http_auth_handle_header(HTTPAuthState *state, const char *key,
const char *value);
char *ff_http_auth_create_response(HTTPAuthState *state, const char *auth,
const char *path, const char *method);
#endif /* AVFORMAT_HTTPAUTH_H */
| 123linslouis-android-video-cutter | jni/libavformat/httpauth.h | C | asf20 | 2,712 |
/*
* Wing Commander III Movie (.mve) File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Wing Commander III Movie file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the WC3 .mve file format, visit:
* http://www.pcisys.net/~melanson/codecs/
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define FORM_TAG MKTAG('F', 'O', 'R', 'M')
#define MOVE_TAG MKTAG('M', 'O', 'V', 'E')
#define PC__TAG MKTAG('_', 'P', 'C', '_')
#define SOND_TAG MKTAG('S', 'O', 'N', 'D')
#define BNAM_TAG MKTAG('B', 'N', 'A', 'M')
#define SIZE_TAG MKTAG('S', 'I', 'Z', 'E')
#define PALT_TAG MKTAG('P', 'A', 'L', 'T')
#define INDX_TAG MKTAG('I', 'N', 'D', 'X')
#define BRCH_TAG MKTAG('B', 'R', 'C', 'H')
#define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
#define VGA__TAG MKTAG('V', 'G', 'A', ' ')
#define TEXT_TAG MKTAG('T', 'E', 'X', 'T')
#define AUDI_TAG MKTAG('A', 'U', 'D', 'I')
/* video resolution unless otherwise specified */
#define WC3_DEFAULT_WIDTH 320
#define WC3_DEFAULT_HEIGHT 165
/* always use the same PCM audio parameters */
#define WC3_SAMPLE_RATE 22050
#define WC3_AUDIO_CHANNELS 1
#define WC3_AUDIO_BITS 16
/* nice, constant framerate */
#define WC3_FRAME_FPS 15
#define PALETTE_SIZE (256 * 3)
#define PALETTE_COUNT 256
typedef struct Wc3DemuxContext {
int width;
int height;
unsigned char *palettes;
int palette_count;
int64_t pts;
int video_stream_index;
int audio_stream_index;
AVPaletteControl palette_control;
} Wc3DemuxContext;
/**
* palette lookup table that does gamma correction
*
* can be calculated by this formula:
* for i between 0 and 251 inclusive:
* wc3_pal_lookup[i] = round(pow(i / 256.0, 0.8) * 256);
* values 252, 253, 254 and 255 are all 0xFD
* calculating this at runtime should not cause any
* rounding issues, the maximum difference between
* the table values and the calculated doubles is
* about 0.497527
*/
static const unsigned char wc3_pal_lookup[] = {
0x00, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0E,
0x10, 0x12, 0x13, 0x15, 0x16, 0x18, 0x19, 0x1A,
0x1C, 0x1D, 0x1F, 0x20, 0x21, 0x23, 0x24, 0x25,
0x27, 0x28, 0x29, 0x2A, 0x2C, 0x2D, 0x2E, 0x2F,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x38, 0x39,
0x3A, 0x3B, 0x3C, 0x3D, 0x3F, 0x40, 0x41, 0x42,
0x43, 0x44, 0x45, 0x46, 0x48, 0x49, 0x4A, 0x4B,
0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53,
0x54, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C,
0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64,
0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C,
0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74,
0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C,
0x7D, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B,
0x8C, 0x8D, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92,
0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x99,
0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1,
0xA2, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
0xA9, 0xAA, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
0xB0, 0xB1, 0xB2, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
0xB7, 0xB8, 0xB9, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
0xBE, 0xBF, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4,
0xC5, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB,
0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD0, 0xD1,
0xD2, 0xD3, 0xD4, 0xD5, 0xD5, 0xD6, 0xD7, 0xD8,
0xD9, 0xDA, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE4, 0xE5,
0xE6, 0xE7, 0xE8, 0xE9, 0xE9, 0xEA, 0xEB, 0xEC,
0xED, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF1, 0xF2,
0xF3, 0xF4, 0xF5, 0xF6, 0xF6, 0xF7, 0xF8, 0xF9,
0xFA, 0xFA, 0xFB, 0xFC, 0xFD, 0xFD, 0xFD, 0xFD
};
static int wc3_probe(AVProbeData *p)
{
if (p->buf_size < 12)
return 0;
if ((AV_RL32(&p->buf[0]) != FORM_TAG) ||
(AV_RL32(&p->buf[8]) != MOVE_TAG))
return 0;
return AVPROBE_SCORE_MAX;
}
static int wc3_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
Wc3DemuxContext *wc3 = s->priv_data;
ByteIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
AVStream *st;
int ret = 0;
int current_palette = 0;
char *buffer;
int i;
unsigned char rotate;
/* default context members */
wc3->width = WC3_DEFAULT_WIDTH;
wc3->height = WC3_DEFAULT_HEIGHT;
wc3->palettes = NULL;
wc3->palette_count = 0;
wc3->pts = 0;
wc3->video_stream_index = wc3->audio_stream_index = 0;
/* skip the first 3 32-bit numbers */
url_fseek(pb, 12, SEEK_CUR);
/* traverse through the chunks and load the header information before
* the first BRCH tag */
fourcc_tag = get_le32(pb);
size = (get_be32(pb) + 1) & (~1);
do {
switch (fourcc_tag) {
case SOND_TAG:
case INDX_TAG:
/* SOND unknown, INDX unnecessary; ignore both */
url_fseek(pb, size, SEEK_CUR);
break;
case PC__TAG:
/* need the number of palettes */
url_fseek(pb, 8, SEEK_CUR);
wc3->palette_count = get_le32(pb);
if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE){
wc3->palette_count= 0;
return -1;
}
wc3->palettes = av_malloc(wc3->palette_count * PALETTE_SIZE);
break;
case BNAM_TAG:
/* load up the name */
buffer = av_malloc(size+1);
if (!buffer)
return AVERROR(ENOMEM);
if ((ret = get_buffer(pb, buffer, size)) != size)
return AVERROR(EIO);
buffer[size] = 0;
av_metadata_set2(&s->metadata, "title", buffer,
AV_METADATA_DONT_STRDUP_VAL);
break;
case SIZE_TAG:
/* video resolution override */
wc3->width = get_le32(pb);
wc3->height = get_le32(pb);
break;
case PALT_TAG:
/* one of several palettes */
if ((unsigned)current_palette >= wc3->palette_count)
return AVERROR_INVALIDDATA;
if ((ret = get_buffer(pb,
&wc3->palettes[current_palette * PALETTE_SIZE],
PALETTE_SIZE)) != PALETTE_SIZE)
return AVERROR(EIO);
/* transform the current palette in place */
for (i = current_palette * PALETTE_SIZE;
i < (current_palette + 1) * PALETTE_SIZE; i++) {
/* rotate each palette component left by 2 and use the result
* as an index into the color component table */
rotate = ((wc3->palettes[i] << 2) & 0xFF) |
((wc3->palettes[i] >> 6) & 0xFF);
wc3->palettes[i] = wc3_pal_lookup[rotate];
}
current_palette++;
break;
default:
av_log(s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24),
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24));
return AVERROR_INVALIDDATA;
break;
}
fourcc_tag = get_le32(pb);
/* chunk sizes are 16-bit aligned */
size = (get_be32(pb) + 1) & (~1);
if (url_feof(pb))
return AVERROR(EIO);
} while (fourcc_tag != BRCH_TAG);
/* initialize the decoder streams */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 33, 1, WC3_FRAME_FPS);
wc3->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_XAN_WC3;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = wc3->width;
st->codec->height = wc3->height;
/* palette considerations */
st->codec->palctrl = &wc3->palette_control;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 33, 1, WC3_FRAME_FPS);
wc3->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_PCM_S16LE;
st->codec->codec_tag = 1;
st->codec->channels = WC3_AUDIO_CHANNELS;
st->codec->bits_per_coded_sample = WC3_AUDIO_BITS;
st->codec->sample_rate = WC3_SAMPLE_RATE;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
st->codec->block_align = WC3_AUDIO_BITS * WC3_AUDIO_CHANNELS;
return 0;
}
static int wc3_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
Wc3DemuxContext *wc3 = s->priv_data;
ByteIOContext *pb = s->pb;
unsigned int fourcc_tag;
unsigned int size;
int packet_read = 0;
int ret = 0;
unsigned char text[1024];
unsigned int palette_number;
int i;
unsigned char r, g, b;
int base_palette_index;
while (!packet_read) {
fourcc_tag = get_le32(pb);
/* chunk sizes are 16-bit aligned */
size = (get_be32(pb) + 1) & (~1);
if (url_feof(pb))
return AVERROR(EIO);
switch (fourcc_tag) {
case BRCH_TAG:
/* no-op */
break;
case SHOT_TAG:
/* load up new palette */
palette_number = get_le32(pb);
if (palette_number >= wc3->palette_count)
return AVERROR_INVALIDDATA;
base_palette_index = palette_number * PALETTE_COUNT * 3;
for (i = 0; i < PALETTE_COUNT; i++) {
r = wc3->palettes[base_palette_index + i * 3 + 0];
g = wc3->palettes[base_palette_index + i * 3 + 1];
b = wc3->palettes[base_palette_index + i * 3 + 2];
wc3->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
}
wc3->palette_control.palette_changed = 1;
break;
case VGA__TAG:
/* send out video chunk */
ret= av_get_packet(pb, pkt, size);
pkt->stream_index = wc3->video_stream_index;
pkt->pts = wc3->pts;
packet_read = 1;
break;
case TEXT_TAG:
/* subtitle chunk */
#if 0
url_fseek(pb, size, SEEK_CUR);
#else
if ((unsigned)size > sizeof(text) || (ret = get_buffer(pb, text, size)) != size)
ret = AVERROR(EIO);
else {
int i = 0;
av_log (s, AV_LOG_DEBUG, "Subtitle time!\n");
av_log (s, AV_LOG_DEBUG, " inglish: %s\n", &text[i + 1]);
i += text[i] + 1;
av_log (s, AV_LOG_DEBUG, " doytsch: %s\n", &text[i + 1]);
i += text[i] + 1;
av_log (s, AV_LOG_DEBUG, " fronsay: %s\n", &text[i + 1]);
}
#endif
break;
case AUDI_TAG:
/* send out audio chunk */
ret= av_get_packet(pb, pkt, size);
pkt->stream_index = wc3->audio_stream_index;
pkt->pts = wc3->pts;
/* time to advance pts */
wc3->pts++;
packet_read = 1;
break;
default:
av_log (s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24),
(uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24));
ret = AVERROR_INVALIDDATA;
packet_read = 1;
break;
}
}
return ret;
}
static int wc3_read_close(AVFormatContext *s)
{
Wc3DemuxContext *wc3 = s->priv_data;
av_free(wc3->palettes);
return 0;
}
AVInputFormat wc3_demuxer = {
"wc3movie",
NULL_IF_CONFIG_SMALL("Wing Commander III movie format"),
sizeof(Wc3DemuxContext),
wc3_probe,
wc3_read_header,
wc3_read_packet,
wc3_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/wc3movie.c | C | asf20 | 12,796 |
/*
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "asf.h"
const ff_asf_guid ff_asf_header = {
0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C
};
const ff_asf_guid ff_asf_file_header = {
0xA1, 0xDC, 0xAB, 0x8C, 0x47, 0xA9, 0xCF, 0x11, 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
const ff_asf_guid ff_asf_stream_header = {
0x91, 0x07, 0xDC, 0xB7, 0xB7, 0xA9, 0xCF, 0x11, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
const ff_asf_guid ff_asf_ext_stream_header = {
0xCB, 0xA5, 0xE6, 0x14, 0x72, 0xC6, 0x32, 0x43, 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A
};
const ff_asf_guid ff_asf_audio_stream = {
0x40, 0x9E, 0x69, 0xF8, 0x4D, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
const ff_asf_guid ff_asf_audio_conceal_none = {
// 0x40, 0xa4, 0xf1, 0x49, 0x4ece, 0x11d0, 0xa3, 0xac, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6
// New value lifted from avifile
0x00, 0x57, 0xfb, 0x20, 0x55, 0x5B, 0xCF, 0x11, 0xa8, 0xfd, 0x00, 0x80, 0x5f, 0x5c, 0x44, 0x2b
};
const ff_asf_guid ff_asf_audio_conceal_spread = {
0x50, 0xCD, 0xC3, 0xBF, 0x8F, 0x61, 0xCF, 0x11, 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20
};
const ff_asf_guid ff_asf_video_stream = {
0xC0, 0xEF, 0x19, 0xBC, 0x4D, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
const ff_asf_guid ff_asf_video_conceal_none = {
0x00, 0x57, 0xFB, 0x20, 0x55, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
const ff_asf_guid ff_asf_command_stream = {
0xC0, 0xCF, 0xDA, 0x59, 0xE6, 0x59, 0xD0, 0x11, 0xA3, 0xAC, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6
};
const ff_asf_guid ff_asf_comment_header = {
0x33, 0x26, 0xb2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c
};
const ff_asf_guid ff_asf_codec_comment_header = {
0x40, 0x52, 0xD1, 0x86, 0x1D, 0x31, 0xD0, 0x11, 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6
};
const ff_asf_guid ff_asf_codec_comment1_header = {
0x41, 0x52, 0xd1, 0x86, 0x1D, 0x31, 0xD0, 0x11, 0xa3, 0xa4, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6
};
const ff_asf_guid ff_asf_data_header = {
0x36, 0x26, 0xb2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c
};
const ff_asf_guid ff_asf_head1_guid = {
0xb5, 0x03, 0xbf, 0x5f, 0x2E, 0xA9, 0xCF, 0x11, 0x8e, 0xe3, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65
};
const ff_asf_guid ff_asf_head2_guid = {
0x11, 0xd2, 0xd3, 0xab, 0xBA, 0xA9, 0xCF, 0x11, 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65
};
const ff_asf_guid ff_asf_extended_content_header = {
0x40, 0xA4, 0xD0, 0xD2, 0x07, 0xE3, 0xD2, 0x11, 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5E, 0xA8, 0x50
};
const ff_asf_guid ff_asf_simple_index_header = {
0x90, 0x08, 0x00, 0x33, 0xB1, 0xE5, 0xCF, 0x11, 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB
};
const ff_asf_guid ff_asf_ext_stream_embed_stream_header = {
0xe2, 0x65, 0xfb, 0x3a, 0xEF, 0x47, 0xF2, 0x40, 0xac, 0x2c, 0x70, 0xa9, 0x0d, 0x71, 0xd3, 0x43
};
const ff_asf_guid ff_asf_ext_stream_audio_stream = {
0x9d, 0x8c, 0x17, 0x31, 0xE1, 0x03, 0x28, 0x45, 0xb5, 0x82, 0x3d, 0xf9, 0xdb, 0x22, 0xf5, 0x03
};
const ff_asf_guid ff_asf_metadata_header = {
0xea, 0xcb, 0xf8, 0xc5, 0xaf, 0x5b, 0x77, 0x48, 0x84, 0x67, 0xaa, 0x8c, 0x44, 0xfa, 0x4c, 0xca
};
const ff_asf_guid ff_asf_marker_header = {
0x01, 0xCD, 0x87, 0xF4, 0x51, 0xA9, 0xCF, 0x11, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
/* I am not a number !!! This GUID is the one found on the PC used to
generate the stream */
const ff_asf_guid ff_asf_my_guid = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
const ff_asf_guid ff_asf_language_guid = {
0xa9, 0x46, 0x43, 0x7c, 0xe0, 0xef, 0xfc, 0x4b, 0xb2, 0x29, 0x39, 0x3e, 0xde, 0x41, 0x5c, 0x85
};
const ff_asf_guid ff_asf_content_encryption = {
0xfb, 0xb3, 0x11, 0x22, 0x23, 0xbd, 0xd2, 0x11, 0xb4, 0xb7, 0x00, 0xa0, 0xc9, 0x55, 0xfc, 0x6e
};
const ff_asf_guid ff_asf_ext_content_encryption = {
0x14, 0xe6, 0x8a, 0x29, 0x22, 0x26, 0x17, 0x4c, 0xb9, 0x35, 0xda, 0xe0, 0x7e, 0xe9, 0x28, 0x9c
};
const ff_asf_guid ff_asf_digital_signature = {
0xfc, 0xb3, 0x11, 0x22, 0x23, 0xbd, 0xd2, 0x11, 0xb4, 0xb7, 0x00, 0xa0, 0xc9, 0x55, 0xfc, 0x6e
};
/* List of official tags at http://msdn.microsoft.com/en-us/library/dd743066(VS.85).aspx */
const AVMetadataConv ff_asf_metadata_conv[] = {
{ "WM/AlbumArtist" , "album_artist"},
{ "WM/AlbumTitle" , "album" },
{ "Author" , "artist" },
{ "Description" , "comment" },
{ "WM/Composer" , "composer" },
{ "WM/EncodedBy" , "encoded_by" },
{ "WM/EncodingSettings", "encoder" },
{ "WM/Genre" , "genre" },
{ "WM/Language" , "language" },
{ "WM/OriginalFilename", "filename" },
{ "WM/PartOfSet" , "disc" },
{ "WM/Publisher" , "publisher" },
{ "WM/Tool" , "encoder" },
{ "WM/TrackNumber" , "track" },
{ "WM/Track" , "track" },
// { "Year" , "date" }, TODO: conversion year<->date
{ 0 }
};
int ff_put_str16_nolen(ByteIOContext *s, const char *tag)
{
const uint8_t *q = tag;
int ret = 0;
while (*q) {
uint32_t ch;
uint16_t tmp;
GET_UTF8(ch, *q++, break;)
PUT_UTF16(ch, tmp, put_le16(s, tmp);ret += 2;)
}
put_le16(s, 0);
ret += 2;
return ret;
}
| 123linslouis-android-video-cutter | jni/libavformat/asf.c | C | asf20 | 6,270 |
/*
* Copyright (C) 2008 Ramiro Polla <ramiro@lisha.ufsc.br>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/bytestream.h"
#include "avformat.h"
#define HEADER_SIZE 24
/*
* Header structure:
* uint16_t ss; // struct size
* uint16_t width; // frame width
* uint16_t height; // frame height
* uint16_t ff; // keyframe + some other info(???)
* uint32_t size; // size of data
* uint32_t fourcc; // ML20
* uint32_t u3; // ?
* uint32_t ts; // time
*/
static int msnwc_tcp_probe(AVProbeData *p)
{
int i;
for(i = 0 ; i + HEADER_SIZE <= p->buf_size ; i++) {
uint16_t width, height;
uint32_t fourcc;
const uint8_t *bytestream = p->buf+i;
if(bytestream_get_le16(&bytestream) != HEADER_SIZE)
continue;
width = bytestream_get_le16(&bytestream);
height = bytestream_get_le16(&bytestream);
if(!(width==320 && height==240) && !(width==160 && height==120))
continue;
bytestream += 2; // keyframe
bytestream += 4; // size
fourcc = bytestream_get_le32(&bytestream);
if(fourcc != MKTAG('M', 'L', '2', '0'))
continue;
if(i) {
if(i < 14) /* starts with SwitchBoard connection info */
return AVPROBE_SCORE_MAX / 2;
else /* starts in the middle of stream */
return AVPROBE_SCORE_MAX / 3;
} else {
return AVPROBE_SCORE_MAX;
}
}
return -1;
}
static int msnwc_tcp_read_header(AVFormatContext *ctx, AVFormatParameters *ap)
{
ByteIOContext *pb = ctx->pb;
AVCodecContext *codec;
AVStream *st;
st = av_new_stream(ctx, 0);
if(!st)
return AVERROR(ENOMEM);
codec = st->codec;
codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->codec_id = CODEC_ID_MIMIC;
codec->codec_tag = MKTAG('M', 'L', '2', '0');
av_set_pts_info(st, 32, 1, 1000);
/* Some files start with "connected\r\n\r\n".
* So skip until we find the first byte of struct size */
while(get_byte(pb) != HEADER_SIZE && !url_feof(pb));
if(url_feof(pb)) {
av_log(ctx, AV_LOG_ERROR, "Could not find valid start.");
return -1;
}
return 0;
}
static int msnwc_tcp_read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
ByteIOContext *pb = ctx->pb;
uint16_t keyframe;
uint32_t size, timestamp;
url_fskip(pb, 1); /* one byte has been read ahead */
url_fskip(pb, 2);
url_fskip(pb, 2);
keyframe = get_le16(pb);
size = get_le32(pb);
url_fskip(pb, 4);
url_fskip(pb, 4);
timestamp = get_le32(pb);
if(!size || av_get_packet(pb, pkt, size) != size)
return -1;
url_fskip(pb, 1); /* Read ahead one byte of struct size like read_header */
pkt->pts = timestamp;
pkt->dts = timestamp;
pkt->stream_index = 0;
/* Some aMsn generated videos (or was it Mercury Messenger?) don't set
* this bit and rely on the codec to get keyframe information */
if(keyframe&1)
pkt->flags |= AV_PKT_FLAG_KEY;
return HEADER_SIZE + size;
}
AVInputFormat msnwc_tcp_demuxer = {
"msnwctcp",
NULL_IF_CONFIG_SMALL("MSN TCP Webcam stream"),
0,
msnwc_tcp_probe,
msnwc_tcp_read_header,
msnwc_tcp_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/msnwc_tcp.c | C | asf20 | 4,055 |
/*
* RTSP muxer
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include <sys/time.h>
#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#include "network.h"
#include "rtsp.h"
#include <libavutil/intreadwrite.h>
static int rtsp_write_record(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
char cmd[1024];
snprintf(cmd, sizeof(cmd),
"Range: npt=%0.3f-\r\n",
(double) 0);
ff_rtsp_send_cmd(s, "RECORD", rt->control_uri, cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK)
return -1;
rt->state = RTSP_STATE_STREAMING;
return 0;
}
static int rtsp_write_header(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
int ret;
ret = ff_rtsp_connect(s);
if (ret)
return ret;
if (rtsp_write_record(s) < 0) {
ff_rtsp_close_streams(s);
url_close(rt->rtsp_hd);
return AVERROR_INVALIDDATA;
}
return 0;
}
static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st)
{
RTSPState *rt = s->priv_data;
AVFormatContext *rtpctx = rtsp_st->transport_priv;
uint8_t *buf, *ptr;
int size;
uint8_t interleave_header[4];
size = url_close_dyn_buf(rtpctx->pb, &buf);
ptr = buf;
while (size > 4) {
uint32_t packet_len = AV_RB32(ptr);
int id;
ptr += 4;
size -= 4;
if (packet_len > size || packet_len < 2)
break;
if (ptr[1] >= 200 && ptr[1] <= 204)
id = rtsp_st->interleaved_max; /* RTCP */
else
id = rtsp_st->interleaved_min; /* RTP */
interleave_header[0] = '$';
interleave_header[1] = id;
AV_WB16(interleave_header + 2, packet_len);
url_write(rt->rtsp_hd, interleave_header, 4);
url_write(rt->rtsp_hd, ptr, packet_len);
ptr += packet_len;
size -= packet_len;
}
av_free(buf);
url_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);
return 0;
}
static int rtsp_write_packet(AVFormatContext *s, AVPacket *pkt)
{
RTSPState *rt = s->priv_data;
RTSPStream *rtsp_st;
fd_set rfds;
int n, tcp_fd;
struct timeval tv;
AVFormatContext *rtpctx;
AVPacket local_pkt;
int ret;
tcp_fd = url_get_file_handle(rt->rtsp_hd);
while (1) {
FD_ZERO(&rfds);
FD_SET(tcp_fd, &rfds);
tv.tv_sec = 0;
tv.tv_usec = 0;
n = select(tcp_fd + 1, &rfds, NULL, NULL, &tv);
if (n <= 0)
break;
if (FD_ISSET(tcp_fd, &rfds)) {
RTSPMessageHeader reply;
/* Don't let ff_rtsp_read_reply handle interleaved packets,
* since it would block and wait for an RTSP reply on the socket
* (which may not be coming any time soon) if it handles
* interleaved packets internally. */
ret = ff_rtsp_read_reply(s, &reply, NULL, 1);
if (ret < 0)
return AVERROR(EPIPE);
if (ret == 1)
ff_rtsp_skip_packet(s);
/* XXX: parse message */
if (rt->state != RTSP_STATE_STREAMING)
return AVERROR(EPIPE);
}
}
if (pkt->stream_index < 0 || pkt->stream_index >= rt->nb_rtsp_streams)
return AVERROR_INVALIDDATA;
rtsp_st = rt->rtsp_streams[pkt->stream_index];
rtpctx = rtsp_st->transport_priv;
/* Use a local packet for writing to the chained muxer, otherwise
* the internal stream_index = 0 becomes visible to the muxer user. */
local_pkt = *pkt;
local_pkt.stream_index = 0;
ret = av_write_frame(rtpctx, &local_pkt);
/* av_write_frame does all the RTP packetization. If using TCP as
* transport, rtpctx->pb is only a dyn_packet_buf that queues up the
* packets, so we need to send them out on the TCP connection separately.
*/
if (!ret && rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP)
ret = tcp_write_packet(s, rtsp_st);
return ret;
}
static int rtsp_write_close(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
ff_rtsp_send_cmd_async(s, "TEARDOWN", rt->control_uri, NULL);
ff_rtsp_close_streams(s);
url_close(rt->rtsp_hd);
ff_network_close();
return 0;
}
AVOutputFormat rtsp_muxer = {
"rtsp",
NULL_IF_CONFIG_SMALL("RTSP output format"),
NULL,
NULL,
sizeof(RTSPState),
CODEC_ID_PCM_MULAW,
CODEC_ID_NONE,
rtsp_write_header,
rtsp_write_packet,
rtsp_write_close,
.flags = AVFMT_NOFILE | AVFMT_GLOBALHEADER,
};
| 123linslouis-android-video-cutter | jni/libavformat/rtspenc.c | C | asf20 | 5,332 |
/*
* Linux Media Labs MPEG-4 demuxer
* Copyright (c) 2008 Ivo van Poorten
*
* Due to a lack of sample files, only files with one channel are supported.
* u-law and ADPCM audio are unsupported for the same reason.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define LMLM4_I_FRAME 0x00
#define LMLM4_P_FRAME 0x01
#define LMLM4_B_FRAME 0x02
#define LMLM4_INVALID 0x03
#define LMLM4_MPEG1L2 0x04
#define LMLM4_MAX_PACKET_SIZE 1024 * 1024
static int lmlm4_probe(AVProbeData * pd) {
unsigned char *buf = pd->buf;
unsigned int frame_type, packet_size;
frame_type = AV_RB16(buf+2);
packet_size = AV_RB32(buf+4);
if (!AV_RB16(buf) && frame_type <= LMLM4_MPEG1L2 && packet_size &&
frame_type != LMLM4_INVALID && packet_size <= LMLM4_MAX_PACKET_SIZE) {
if (frame_type == LMLM4_MPEG1L2) {
if ((AV_RB16(buf+8) & 0xfffe) != 0xfffc)
return 0;
/* I could calculate the audio framesize and compare with
* packet_size-8, but that seems overkill */
return AVPROBE_SCORE_MAX / 3;
} else if (AV_RB24(buf+8) == 0x000001) { /* PES Signal */
return AVPROBE_SCORE_MAX / 5;
}
}
return 0;
}
static int lmlm4_read_header(AVFormatContext *s, AVFormatParameters *ap) {
AVStream *st;
if (!(st = av_new_stream(s, 0)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_MPEG4;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
av_set_pts_info(st, 64, 1001, 30000);
if (!(st = av_new_stream(s, 1)))
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_MP2;
st->need_parsing = AVSTREAM_PARSE_HEADERS;
/* the parameters will be extracted from the compressed bitstream */
return 0;
}
static int lmlm4_read_packet(AVFormatContext *s, AVPacket *pkt) {
ByteIOContext *pb = s->pb;
int ret;
unsigned int frame_type, packet_size, padding, frame_size;
get_be16(pb); /* channel number */
frame_type = get_be16(pb);
packet_size = get_be32(pb);
padding = -packet_size & 511;
frame_size = packet_size - 8;
if (frame_type > LMLM4_MPEG1L2 || frame_type == LMLM4_INVALID) {
av_log(s, AV_LOG_ERROR, "invalid or unsupported frame_type\n");
return AVERROR(EIO);
}
if (packet_size > LMLM4_MAX_PACKET_SIZE) {
av_log(s, AV_LOG_ERROR, "packet size exceeds maximum\n");
return AVERROR(EIO);
}
if ((ret = av_get_packet(pb, pkt, frame_size)) <= 0)
return AVERROR(EIO);
url_fskip(pb, padding);
switch (frame_type) {
case LMLM4_I_FRAME:
pkt->flags = AV_PKT_FLAG_KEY;
case LMLM4_P_FRAME:
case LMLM4_B_FRAME:
pkt->stream_index = 0;
break;
case LMLM4_MPEG1L2:
pkt->stream_index = 1;
break;
}
return ret;
}
AVInputFormat lmlm4_demuxer = {
"lmlm4",
NULL_IF_CONFIG_SMALL("lmlm4 raw format"),
0,
lmlm4_probe,
lmlm4_read_header,
lmlm4_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/lmlm4.c | C | asf20 | 3,952 |
/*
* RTP demuxer definitions
* Copyright (c) 2002 Fabrice Bellard
* Copyright (c) 2006 Ryan Martell <rdm4@martellventures.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_RTPDEC_H
#define AVFORMAT_RTPDEC_H
#include "libavcodec/avcodec.h"
#include "avformat.h"
#include "rtp.h"
/** Structure listing useful vars to parse RTP packet payload*/
typedef struct rtp_payload_data
{
int sizelength;
int indexlength;
int indexdeltalength;
int profile_level_id;
int streamtype;
int objecttype;
char *mode;
/** mpeg 4 AU headers */
struct AUHeaders {
int size;
int index;
int cts_flag;
int cts;
int dts_flag;
int dts;
int rap_flag;
int streamstate;
} *au_headers;
int au_headers_allocated;
int nb_au_headers;
int au_headers_length_bytes;
int cur_au_index;
} RTPPayloadData;
typedef struct PayloadContext PayloadContext;
typedef struct RTPDynamicProtocolHandler_s RTPDynamicProtocolHandler;
#define RTP_MIN_PACKET_LENGTH 12
#define RTP_MAX_PACKET_LENGTH 1500 /* XXX: suppress this define */
typedef struct RTPDemuxContext RTPDemuxContext;
RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, RTPPayloadData *rtp_payload_data);
void rtp_parse_set_dynamic_protocol(RTPDemuxContext *s, PayloadContext *ctx,
RTPDynamicProtocolHandler *handler);
int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
const uint8_t *buf, int len);
void rtp_parse_close(RTPDemuxContext *s);
#if (LIBAVFORMAT_VERSION_MAJOR <= 53)
int rtp_get_local_port(URLContext *h);
#endif
int rtp_get_local_rtp_port(URLContext *h);
int rtp_get_local_rtcp_port(URLContext *h);
int rtp_set_remote_url(URLContext *h, const char *uri);
#if (LIBAVFORMAT_VERSION_MAJOR <= 52)
void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd);
#endif
/**
* Send a dummy packet on both port pairs to set up the connection
* state in potential NAT routers, so that we're able to receive
* packets.
*
* Note, this only works if the NAT router doesn't remap ports. This
* isn't a standardized procedure, but it works in many cases in practice.
*
* The same routine is used with RDT too, even if RDT doesn't use normal
* RTP packets otherwise.
*/
void rtp_send_punch_packets(URLContext* rtp_handle);
/**
* some rtp servers assume client is dead if they don't hear from them...
* so we send a Receiver Report to the provided ByteIO context
* (we don't have access to the rtcp handle from here)
*/
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count);
// these statistics are used for rtcp receiver reports...
typedef struct {
uint16_t max_seq; ///< highest sequence number seen
uint32_t cycles; ///< shifted count of sequence number cycles
uint32_t base_seq; ///< base sequence number
uint32_t bad_seq; ///< last bad sequence number + 1
int probation; ///< sequence packets till source is valid
int received; ///< packets received
int expected_prior; ///< packets expected in last interval
int received_prior; ///< packets received in last interval
uint32_t transit; ///< relative transit time for previous packet
uint32_t jitter; ///< estimated jitter.
} RTPStatistics;
#define RTP_FLAG_KEY 0x1 ///< RTP packet contains a keyframe
#define RTP_FLAG_MARKER 0x2 ///< RTP marker bit was set for this packet
/**
* Packet parsing for "private" payloads in the RTP specs.
*
* @param ctx RTSP demuxer context
* @param s stream context
* @param st stream that this packet belongs to
* @param pkt packet in which to write the parsed data
* @param timestamp pointer in which to write the timestamp of this RTP packet
* @param buf pointer to raw RTP packet data
* @param len length of buf
* @param flags flags from the RTP packet header (RTP_FLAG_*)
*/
typedef int (*DynamicPayloadPacketHandlerProc) (AVFormatContext *ctx,
PayloadContext *s,
AVStream *st,
AVPacket * pkt,
uint32_t *timestamp,
const uint8_t * buf,
int len, int flags);
struct RTPDynamicProtocolHandler_s {
// fields from AVRtpDynamicPayloadType_s
const char enc_name[50]; /* XXX: still why 50 ? ;-) */
enum AVMediaType codec_type;
enum CodecID codec_id;
// may be null
int (*parse_sdp_a_line) (AVFormatContext *s,
int st_index,
PayloadContext *priv_data,
const char *line); ///< Parse the a= line from the sdp field
PayloadContext *(*open) (void); ///< allocate any data needed by the rtp parsing for this dynamic data.
void (*close)(PayloadContext *protocol_data); ///< free any data needed by the rtp parsing for this dynamic data.
DynamicPayloadPacketHandlerProc parse_packet; ///< parse handler for this dynamic packet.
struct RTPDynamicProtocolHandler_s *next;
};
// moved out of rtp.c, because the h264 decoder needs to know about this structure..
struct RTPDemuxContext {
AVFormatContext *ic;
AVStream *st;
int payload_type;
uint32_t ssrc;
uint16_t seq;
uint32_t timestamp;
uint32_t base_timestamp;
uint32_t cur_timestamp;
int64_t range_start_offset;
int max_payload_size;
struct MpegTSContext *ts; /* only used for MP2T payloads */
int read_buf_index;
int read_buf_size;
/* used to send back RTCP RR */
URLContext *rtp_ctx;
char hostname[256];
RTPStatistics statistics; ///< Statistics for this stream (used by RTCP receiver reports)
/* rtcp sender statistics receive */
int64_t last_rtcp_ntp_time; // TODO: move into statistics
int64_t first_rtcp_ntp_time; // TODO: move into statistics
uint32_t last_rtcp_timestamp; // TODO: move into statistics
/* rtcp sender statistics */
unsigned int packet_count; // TODO: move into statistics (outgoing)
unsigned int octet_count; // TODO: move into statistics (outgoing)
unsigned int last_octet_count; // TODO: move into statistics (outgoing)
int first_packet;
/* buffer for output */
uint8_t buf[RTP_MAX_PACKET_LENGTH];
uint8_t *buf_ptr;
/* special infos for au headers parsing */
RTPPayloadData *rtp_payload_data; // TODO: Move into dynamic payload handlers
/* dynamic payload stuff */
DynamicPayloadPacketHandlerProc parse_packet; ///< This is also copied from the dynamic protocol handler structure
PayloadContext *dynamic_protocol_context; ///< This is a copy from the values setup from the sdp parsing, in rtsp.c don't free me.
int max_frames_per_packet;
};
extern RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler;
void ff_register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler);
int ff_rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size); ///< from rtsp.c, but used by rtp dynamic protocol handlers.
void av_register_rtp_dynamic_payload_handlers(void);
#endif /* AVFORMAT_RTPDEC_H */
| 123linslouis-android-video-cutter | jni/libavformat/rtpdec.h | C | asf20 | 8,162 |
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
include $(LOCAL_PATH)/../av.mk
LOCAL_SRC_FILES := $(FFFILES)
LOCAL_C_INCLUDES := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/..
LOCAL_CFLAGS += $(FFCFLAGS)
LOCAL_CFLAGS += -include "string.h" -Dipv6mr_interface=ipv6mr_ifindex
LOCAL_LDLIBS := -lz
LOCAL_STATIC_LIBRARIES := $(FFLIBS)
LOCAL_MODULE := $(FFNAME)
include $(BUILD_STATIC_LIBRARY) | 123linslouis-android-video-cutter | jni/libavformat/Android.mk | Makefile | asf20 | 382 |
/*
* Monkey's Audio APE demuxer
* Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
* based upon libdemac from Dave Chapman.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "apetag.h"
#define ENABLE_DEBUG 0
/* The earliest and latest file formats supported by this library */
#define APE_MIN_VERSION 3950
#define APE_MAX_VERSION 3990
#define MAC_FORMAT_FLAG_8_BIT 1 // is 8-bit [OBSOLETE]
#define MAC_FORMAT_FLAG_CRC 2 // uses the new CRC32 error detection [OBSOLETE]
#define MAC_FORMAT_FLAG_HAS_PEAK_LEVEL 4 // uint32 nPeakLevel after the header [OBSOLETE]
#define MAC_FORMAT_FLAG_24_BIT 8 // is 24-bit [OBSOLETE]
#define MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS 16 // has the number of seek elements after the peak level
#define MAC_FORMAT_FLAG_CREATE_WAV_HEADER 32 // create the wave header on decompression (not stored)
#define MAC_SUBFRAME_SIZE 4608
#define APE_EXTRADATA_SIZE 6
typedef struct {
int64_t pos;
int nblocks;
int size;
int skip;
int64_t pts;
} APEFrame;
typedef struct {
/* Derived fields */
uint32_t junklength;
uint32_t firstframe;
uint32_t totalsamples;
int currentframe;
APEFrame *frames;
/* Info from Descriptor Block */
char magic[4];
int16_t fileversion;
int16_t padding1;
uint32_t descriptorlength;
uint32_t headerlength;
uint32_t seektablelength;
uint32_t wavheaderlength;
uint32_t audiodatalength;
uint32_t audiodatalength_high;
uint32_t wavtaillength;
uint8_t md5[16];
/* Info from Header Block */
uint16_t compressiontype;
uint16_t formatflags;
uint32_t blocksperframe;
uint32_t finalframeblocks;
uint32_t totalframes;
uint16_t bps;
uint16_t channels;
uint32_t samplerate;
/* Seektable */
uint32_t *seektable;
} APEContext;
static int ape_probe(AVProbeData * p)
{
if (p->buf[0] == 'M' && p->buf[1] == 'A' && p->buf[2] == 'C' && p->buf[3] == ' ')
return AVPROBE_SCORE_MAX;
return 0;
}
static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx)
{
#if ENABLE_DEBUG
int i;
av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n");
av_log(s, AV_LOG_DEBUG, "magic = \"%c%c%c%c\"\n", ape_ctx->magic[0], ape_ctx->magic[1], ape_ctx->magic[2], ape_ctx->magic[3]);
av_log(s, AV_LOG_DEBUG, "fileversion = %d\n", ape_ctx->fileversion);
av_log(s, AV_LOG_DEBUG, "descriptorlength = %d\n", ape_ctx->descriptorlength);
av_log(s, AV_LOG_DEBUG, "headerlength = %d\n", ape_ctx->headerlength);
av_log(s, AV_LOG_DEBUG, "seektablelength = %d\n", ape_ctx->seektablelength);
av_log(s, AV_LOG_DEBUG, "wavheaderlength = %d\n", ape_ctx->wavheaderlength);
av_log(s, AV_LOG_DEBUG, "audiodatalength = %d\n", ape_ctx->audiodatalength);
av_log(s, AV_LOG_DEBUG, "audiodatalength_high = %d\n", ape_ctx->audiodatalength_high);
av_log(s, AV_LOG_DEBUG, "wavtaillength = %d\n", ape_ctx->wavtaillength);
av_log(s, AV_LOG_DEBUG, "md5 = ");
for (i = 0; i < 16; i++)
av_log(s, AV_LOG_DEBUG, "%02x", ape_ctx->md5[i]);
av_log(s, AV_LOG_DEBUG, "\n");
av_log(s, AV_LOG_DEBUG, "\nHeader Block:\n\n");
av_log(s, AV_LOG_DEBUG, "compressiontype = %d\n", ape_ctx->compressiontype);
av_log(s, AV_LOG_DEBUG, "formatflags = %d\n", ape_ctx->formatflags);
av_log(s, AV_LOG_DEBUG, "blocksperframe = %d\n", ape_ctx->blocksperframe);
av_log(s, AV_LOG_DEBUG, "finalframeblocks = %d\n", ape_ctx->finalframeblocks);
av_log(s, AV_LOG_DEBUG, "totalframes = %d\n", ape_ctx->totalframes);
av_log(s, AV_LOG_DEBUG, "bps = %d\n", ape_ctx->bps);
av_log(s, AV_LOG_DEBUG, "channels = %d\n", ape_ctx->channels);
av_log(s, AV_LOG_DEBUG, "samplerate = %d\n", ape_ctx->samplerate);
av_log(s, AV_LOG_DEBUG, "\nSeektable\n\n");
if ((ape_ctx->seektablelength / sizeof(uint32_t)) != ape_ctx->totalframes) {
av_log(s, AV_LOG_DEBUG, "No seektable\n");
} else {
for (i = 0; i < ape_ctx->seektablelength / sizeof(uint32_t); i++) {
if (i < ape_ctx->totalframes - 1) {
av_log(s, AV_LOG_DEBUG, "%8d %d (%d bytes)\n", i, ape_ctx->seektable[i], ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]);
} else {
av_log(s, AV_LOG_DEBUG, "%8d %d\n", i, ape_ctx->seektable[i]);
}
}
}
av_log(s, AV_LOG_DEBUG, "\nFrames\n\n");
for (i = 0; i < ape_ctx->totalframes; i++)
av_log(s, AV_LOG_DEBUG, "%8d %8lld %8d (%d samples)\n", i, ape_ctx->frames[i].pos, ape_ctx->frames[i].size, ape_ctx->frames[i].nblocks);
av_log(s, AV_LOG_DEBUG, "\nCalculated information:\n\n");
av_log(s, AV_LOG_DEBUG, "junklength = %d\n", ape_ctx->junklength);
av_log(s, AV_LOG_DEBUG, "firstframe = %d\n", ape_ctx->firstframe);
av_log(s, AV_LOG_DEBUG, "totalsamples = %d\n", ape_ctx->totalsamples);
#endif
}
static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
{
ByteIOContext *pb = s->pb;
APEContext *ape = s->priv_data;
AVStream *st;
uint32_t tag;
int i;
int total_blocks;
int64_t pts;
/* TODO: Skip any leading junk such as id3v2 tags */
ape->junklength = 0;
tag = get_le32(pb);
if (tag != MKTAG('M', 'A', 'C', ' '))
return -1;
ape->fileversion = get_le16(pb);
if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) {
av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10);
return -1;
}
if (ape->fileversion >= 3980) {
ape->padding1 = get_le16(pb);
ape->descriptorlength = get_le32(pb);
ape->headerlength = get_le32(pb);
ape->seektablelength = get_le32(pb);
ape->wavheaderlength = get_le32(pb);
ape->audiodatalength = get_le32(pb);
ape->audiodatalength_high = get_le32(pb);
ape->wavtaillength = get_le32(pb);
get_buffer(pb, ape->md5, 16);
/* Skip any unknown bytes at the end of the descriptor.
This is for future compatibility */
if (ape->descriptorlength > 52)
url_fseek(pb, ape->descriptorlength - 52, SEEK_CUR);
/* Read header data */
ape->compressiontype = get_le16(pb);
ape->formatflags = get_le16(pb);
ape->blocksperframe = get_le32(pb);
ape->finalframeblocks = get_le32(pb);
ape->totalframes = get_le32(pb);
ape->bps = get_le16(pb);
ape->channels = get_le16(pb);
ape->samplerate = get_le32(pb);
} else {
ape->descriptorlength = 0;
ape->headerlength = 32;
ape->compressiontype = get_le16(pb);
ape->formatflags = get_le16(pb);
ape->channels = get_le16(pb);
ape->samplerate = get_le32(pb);
ape->wavheaderlength = get_le32(pb);
ape->wavtaillength = get_le32(pb);
ape->totalframes = get_le32(pb);
ape->finalframeblocks = get_le32(pb);
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) {
url_fseek(pb, 4, SEEK_CUR); /* Skip the peak level */
ape->headerlength += 4;
}
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) {
ape->seektablelength = get_le32(pb);
ape->headerlength += 4;
ape->seektablelength *= sizeof(int32_t);
} else
ape->seektablelength = ape->totalframes * sizeof(int32_t);
if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT)
ape->bps = 8;
else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT)
ape->bps = 24;
else
ape->bps = 16;
if (ape->fileversion >= 3950)
ape->blocksperframe = 73728 * 4;
else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000))
ape->blocksperframe = 73728;
else
ape->blocksperframe = 9216;
/* Skip any stored wav header */
if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER))
url_fskip(pb, ape->wavheaderlength);
}
if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){
av_log(s, AV_LOG_ERROR, "Too many frames: %d\n", ape->totalframes);
return -1;
}
ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame));
if(!ape->frames)
return AVERROR(ENOMEM);
ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength;
ape->currentframe = 0;
ape->totalsamples = ape->finalframeblocks;
if (ape->totalframes > 1)
ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1);
if (ape->seektablelength > 0) {
ape->seektable = av_malloc(ape->seektablelength);
for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++)
ape->seektable[i] = get_le32(pb);
}
ape->frames[0].pos = ape->firstframe;
ape->frames[0].nblocks = ape->blocksperframe;
ape->frames[0].skip = 0;
for (i = 1; i < ape->totalframes; i++) {
ape->frames[i].pos = ape->seektable[i]; //ape->frames[i-1].pos + ape->blocksperframe;
ape->frames[i].nblocks = ape->blocksperframe;
ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos;
ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3;
}
ape->frames[ape->totalframes - 1].size = ape->finalframeblocks * 4;
ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks;
for (i = 0; i < ape->totalframes; i++) {
if(ape->frames[i].skip){
ape->frames[i].pos -= ape->frames[i].skip;
ape->frames[i].size += ape->frames[i].skip;
}
ape->frames[i].size = (ape->frames[i].size + 3) & ~3;
}
ape_dumpinfo(s, ape);
/* try to read APE tags */
if (!url_is_streamed(pb)) {
ff_ape_parse_tag(s);
url_fseek(pb, 0, SEEK_SET);
}
av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype);
/* now we are ready: build format streams */
st = av_new_stream(s, 0);
if (!st)
return -1;
total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_APE;
st->codec->codec_tag = MKTAG('A', 'P', 'E', ' ');
st->codec->channels = ape->channels;
st->codec->sample_rate = ape->samplerate;
st->codec->bits_per_coded_sample = ape->bps;
st->codec->frame_size = MAC_SUBFRAME_SIZE;
st->nb_frames = ape->totalframes;
st->start_time = 0;
st->duration = total_blocks / MAC_SUBFRAME_SIZE;
av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate);
st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE);
st->codec->extradata_size = APE_EXTRADATA_SIZE;
AV_WL16(st->codec->extradata + 0, ape->fileversion);
AV_WL16(st->codec->extradata + 2, ape->compressiontype);
AV_WL16(st->codec->extradata + 4, ape->formatflags);
pts = 0;
for (i = 0; i < ape->totalframes; i++) {
ape->frames[i].pts = pts;
av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME);
pts += ape->blocksperframe / MAC_SUBFRAME_SIZE;
}
return 0;
}
static int ape_read_packet(AVFormatContext * s, AVPacket * pkt)
{
int ret;
int nblocks;
APEContext *ape = s->priv_data;
uint32_t extra_size = 8;
if (url_feof(s->pb))
return AVERROR(EIO);
if (ape->currentframe > ape->totalframes)
return AVERROR(EIO);
url_fseek (s->pb, ape->frames[ape->currentframe].pos, SEEK_SET);
/* Calculate how many blocks there are in this frame */
if (ape->currentframe == (ape->totalframes - 1))
nblocks = ape->finalframeblocks;
else
nblocks = ape->blocksperframe;
if (av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size) < 0)
return AVERROR(ENOMEM);
AV_WL32(pkt->data , nblocks);
AV_WL32(pkt->data + 4, ape->frames[ape->currentframe].skip);
ret = get_buffer(s->pb, pkt->data + extra_size, ape->frames[ape->currentframe].size);
pkt->pts = ape->frames[ape->currentframe].pts;
pkt->stream_index = 0;
/* note: we need to modify the packet size here to handle the last
packet */
pkt->size = ret + extra_size;
ape->currentframe++;
return 0;
}
static int ape_read_close(AVFormatContext * s)
{
APEContext *ape = s->priv_data;
av_freep(&ape->frames);
av_freep(&ape->seektable);
return 0;
}
static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
AVStream *st = s->streams[stream_index];
APEContext *ape = s->priv_data;
int index = av_index_search_timestamp(st, timestamp, flags);
if (index < 0)
return -1;
ape->currentframe = index;
return 0;
}
AVInputFormat ape_demuxer = {
"ape",
NULL_IF_CONFIG_SMALL("Monkey's Audio"),
sizeof(APEContext),
ape_probe,
ape_read_header,
ape_read_packet,
ape_read_close,
ape_read_seek,
.extensions = "ape,apl,mac"
};
| 123linslouis-android-video-cutter | jni/libavformat/ape.c | C | asf20 | 14,502 |
/*
* CRC encoder (for codec/format testing)
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/adler32.h"
#include "avformat.h"
typedef struct CRCState {
uint32_t crcval;
} CRCState;
static int crc_write_header(struct AVFormatContext *s)
{
CRCState *crc = s->priv_data;
/* init CRC */
crc->crcval = 1;
return 0;
}
static int crc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
CRCState *crc = s->priv_data;
crc->crcval = av_adler32_update(crc->crcval, pkt->data, pkt->size);
return 0;
}
static int crc_write_trailer(struct AVFormatContext *s)
{
CRCState *crc = s->priv_data;
char buf[64];
snprintf(buf, sizeof(buf), "CRC=0x%08x\n", crc->crcval);
put_buffer(s->pb, buf, strlen(buf));
put_flush_packet(s->pb);
return 0;
}
AVOutputFormat crc_muxer = {
"crc",
NULL_IF_CONFIG_SMALL("CRC testing format"),
NULL,
"",
sizeof(CRCState),
CODEC_ID_PCM_S16LE,
CODEC_ID_RAWVIDEO,
crc_write_header,
crc_write_packet,
crc_write_trailer,
};
| 123linslouis-android-video-cutter | jni/libavformat/crcenc.c | C | asf20 | 1,810 |
/*
* Various utilities for ffmpeg system
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
* copyright (c) 2002 Francois Revol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* needed by inet_aton() */
#define _SVID_SOURCE
#define _DARWIN_C_SOURCE
#include "config.h"
#include "avformat.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/time.h>
#include "os_support.h"
#if CONFIG_NETWORK
#if !HAVE_POLL_H
#if HAVE_WINSOCK2_H
#include <winsock2.h>
#elif HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#endif
#include "network.h"
#if !HAVE_INET_ATON
#include <stdlib.h>
#include <strings.h>
int ff_inet_aton (const char * str, struct in_addr * add)
{
unsigned int add1 = 0, add2 = 0, add3 = 0, add4 = 0;
if (sscanf(str, "%d.%d.%d.%d", &add1, &add2, &add3, &add4) != 4)
return 0;
if (!add1 || (add1|add2|add3|add4) > 255) return 0;
add->s_addr = htonl((add1 << 24) + (add2 << 16) + (add3 << 8) + add4);
return 1;
}
#else
int ff_inet_aton (const char * str, struct in_addr * add)
{
return inet_aton(str, add);
}
#endif /* !HAVE_INET_ATON */
#if !HAVE_GETADDRINFO
int ff_getaddrinfo(const char *node, const char *service,
const struct addrinfo *hints, struct addrinfo **res)
{
struct hostent *h = NULL;
struct addrinfo *ai;
struct sockaddr_in *sin;
#if HAVE_WINSOCK2_H
int (WSAAPI *win_getaddrinfo)(const char *node, const char *service,
const struct addrinfo *hints,
struct addrinfo **res);
HMODULE ws2mod = GetModuleHandle("ws2_32.dll");
win_getaddrinfo = GetProcAddress(ws2mod, "getaddrinfo");
if (win_getaddrinfo)
return win_getaddrinfo(node, service, hints, res);
#endif
*res = NULL;
sin = av_mallocz(sizeof(struct sockaddr_in));
if (!sin)
return EAI_FAIL;
sin->sin_family = AF_INET;
if (node) {
if (!ff_inet_aton(node, &sin->sin_addr)) {
if (hints && (hints->ai_flags & AI_NUMERICHOST)) {
av_free(sin);
return EAI_FAIL;
}
h = gethostbyname(node);
if (!h) {
av_free(sin);
return EAI_FAIL;
}
memcpy(&sin->sin_addr, h->h_addr_list[0], sizeof(struct in_addr));
}
} else {
if (hints && (hints->ai_flags & AI_PASSIVE)) {
sin->sin_addr.s_addr = INADDR_ANY;
} else
sin->sin_addr.s_addr = INADDR_LOOPBACK;
}
/* Note: getaddrinfo allows service to be a string, which
* should be looked up using getservbyname. */
if (service)
sin->sin_port = htons(atoi(service));
ai = av_mallocz(sizeof(struct addrinfo));
if (!ai) {
av_free(sin);
return EAI_FAIL;
}
*res = ai;
ai->ai_family = AF_INET;
ai->ai_socktype = hints ? hints->ai_socktype : 0;
switch (ai->ai_socktype) {
case SOCK_STREAM: ai->ai_protocol = IPPROTO_TCP; break;
case SOCK_DGRAM: ai->ai_protocol = IPPROTO_UDP; break;
default: ai->ai_protocol = 0; break;
}
ai->ai_addr = (struct sockaddr *)sin;
ai->ai_addrlen = sizeof(struct sockaddr_in);
if (hints && (hints->ai_flags & AI_CANONNAME))
ai->ai_canonname = h ? av_strdup(h->h_name) : NULL;
ai->ai_next = NULL;
return 0;
}
void ff_freeaddrinfo(struct addrinfo *res)
{
#if HAVE_WINSOCK2_H
void (WSAAPI *win_freeaddrinfo)(struct addrinfo *res);
HMODULE ws2mod = GetModuleHandle("ws2_32.dll");
win_freeaddrinfo = (void (WSAAPI *)(struct addrinfo *res))
GetProcAddress(ws2mod, "freeaddrinfo");
if (win_freeaddrinfo) {
win_freeaddrinfo(res);
return;
}
#endif
av_free(res->ai_canonname);
av_free(res->ai_addr);
av_free(res);
}
int ff_getnameinfo(const struct sockaddr *sa, int salen,
char *host, int hostlen,
char *serv, int servlen, int flags)
{
const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
#if HAVE_WINSOCK2_H
int (WSAAPI *win_getnameinfo)(const struct sockaddr *sa, socklen_t salen,
char *host, DWORD hostlen,
char *serv, DWORD servlen, int flags);
HMODULE ws2mod = GetModuleHandle("ws2_32.dll");
win_getnameinfo = GetProcAddress(ws2mod, "getnameinfo");
if (win_getnameinfo)
return win_getnameinfo(sa, salen, host, hostlen, serv, servlen, flags);
#endif
if (sa->sa_family != AF_INET)
return EAI_FAMILY;
if (!host && !serv)
return EAI_NONAME;
if (host && hostlen > 0) {
struct hostent *ent = NULL;
uint32_t a;
if (!(flags & NI_NUMERICHOST))
ent = gethostbyaddr((const char *)&sin->sin_addr,
sizeof(sin->sin_addr), AF_INET);
if (ent) {
snprintf(host, hostlen, "%s", ent->h_name);
} else if (flags & NI_NAMERQD) {
return EAI_NONAME;
} else {
a = ntohl(sin->sin_addr.s_addr);
snprintf(host, hostlen, "%d.%d.%d.%d",
((a >> 24) & 0xff), ((a >> 16) & 0xff),
((a >> 8) & 0xff), ( a & 0xff));
}
}
if (serv && servlen > 0) {
struct servent *ent = NULL;
if (!(flags & NI_NUMERICSERV))
ent = getservbyport(sin->sin_port, flags & NI_DGRAM ? "udp" : "tcp");
if (ent) {
snprintf(serv, servlen, "%s", ent->s_name);
} else
snprintf(serv, servlen, "%d", ntohs(sin->sin_port));
}
return 0;
}
const char *ff_gai_strerror(int ecode)
{
switch(ecode) {
case EAI_FAIL : return "A non-recoverable error occurred";
case EAI_FAMILY : return "The address family was not recognized or the address length was invalid for the specified family";
case EAI_NONAME : return "The name does not resolve for the supplied parameters";
}
return "Unknown error";
}
#endif
int ff_socket_nonblock(int socket, int enable)
{
#if HAVE_WINSOCK2_H
return ioctlsocket(socket, FIONBIO, &enable);
#else
if (enable)
return fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) | O_NONBLOCK);
else
return fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) & ~O_NONBLOCK);
#endif
}
#endif /* CONFIG_NETWORK */
#if CONFIG_FFSERVER
#if !HAVE_POLL_H
int poll(struct pollfd *fds, nfds_t numfds, int timeout)
{
fd_set read_set;
fd_set write_set;
fd_set exception_set;
nfds_t i;
int n;
int rc;
#if HAVE_WINSOCK2_H
if (numfds >= FD_SETSIZE) {
errno = EINVAL;
return -1;
}
#endif
FD_ZERO(&read_set);
FD_ZERO(&write_set);
FD_ZERO(&exception_set);
n = -1;
for(i = 0; i < numfds; i++) {
if (fds[i].fd < 0)
continue;
#if !HAVE_WINSOCK2_H
if (fds[i].fd >= FD_SETSIZE) {
errno = EINVAL;
return -1;
}
#endif
if (fds[i].events & POLLIN) FD_SET(fds[i].fd, &read_set);
if (fds[i].events & POLLOUT) FD_SET(fds[i].fd, &write_set);
if (fds[i].events & POLLERR) FD_SET(fds[i].fd, &exception_set);
if (fds[i].fd > n)
n = fds[i].fd;
};
if (n == -1)
/* Hey!? Nothing to poll, in fact!!! */
return 0;
if (timeout < 0)
rc = select(n+1, &read_set, &write_set, &exception_set, NULL);
else {
struct timeval tv;
tv.tv_sec = timeout / 1000;
tv.tv_usec = 1000 * (timeout % 1000);
rc = select(n+1, &read_set, &write_set, &exception_set, &tv);
};
if (rc < 0)
return rc;
for(i = 0; i < (nfds_t) n; i++) {
fds[i].revents = 0;
if (FD_ISSET(fds[i].fd, &read_set)) fds[i].revents |= POLLIN;
if (FD_ISSET(fds[i].fd, &write_set)) fds[i].revents |= POLLOUT;
if (FD_ISSET(fds[i].fd, &exception_set)) fds[i].revents |= POLLERR;
};
return rc;
}
#endif /* HAVE_POLL_H */
#endif /* CONFIG_FFSERVER */
| 123linslouis-android-video-cutter | jni/libavformat/os_support.c | C | asf20 | 8,757 |
/*
* ID3v2 header parser
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFORMAT_ID3V2_H
#define AVFORMAT_ID3V2_H
#include <stdint.h>
#include "avformat.h"
#include "metadata.h"
#define ID3v2_HEADER_SIZE 10
/**
* Detects ID3v2 Header.
* @buf must be ID3v2_HEADER_SIZE byte long
*/
int ff_id3v2_match(const uint8_t *buf);
/**
* Gets the length of an ID3v2 tag.
* @buf must be ID3v2_HEADER_SIZE bytes long and point to the start of an
* already detected ID3v2 tag
*/
int ff_id3v2_tag_len(const uint8_t *buf);
/**
* ID3v2 parser
* Handles ID3v2.2, 2.3 and 2.4.
*/
void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t flags);
/**
* Read an ID3v2 tag
*/
void ff_id3v2_read(AVFormatContext *s);
extern const AVMetadataConv ff_id3v2_metadata_conv[];
/**
* A list of ID3v2.4 text information frames.
* http://www.id3.org/id3v2.4.0-frames
*/
extern const char ff_id3v2_tags[][4];
#endif /* AVFORMAT_ID3V2_H */
| 123linslouis-android-video-cutter | jni/libavformat/id3v2.h | C | asf20 | 1,710 |
/*
* ASF decryption
* Copyright (c) 2007 Reimar Doeffinger
* This is a rewrite of code contained in freeme/freeme2
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/bswap.h"
#include "libavutil/des.h"
#include "libavutil/rc4.h"
#include "asfcrypt.h"
/**
* \brief find multiplicative inverse modulo 2 ^ 32
* \param v number to invert, must be odd!
* \return number so that result * v = 1 (mod 2^32)
*/
static uint32_t inverse(uint32_t v) {
// v ^ 3 gives the inverse (mod 16), could also be implemented
// as table etc. (only lowest 4 bits matter!)
uint32_t inverse = v * v * v;
// uses a fixpoint-iteration that doubles the number
// of correct lowest bits each time
inverse *= 2 - v * inverse;
inverse *= 2 - v * inverse;
inverse *= 2 - v * inverse;
return inverse;
}
/**
* \brief read keys from keybuf into keys
* \param keybuf buffer containing the keys
* \param keys output key array containing the keys for encryption in
* native endianness
*/
static void multiswap_init(const uint8_t keybuf[48], uint32_t keys[12]) {
int i;
for (i = 0; i < 12; i++)
keys[i] = AV_RL32(keybuf + (i << 2)) | 1;
}
/**
* \brief invert the keys so that encryption become decryption keys and
* the other way round.
* \param keys key array of ints to invert
*/
static void multiswap_invert_keys(uint32_t keys[12]) {
int i;
for (i = 0; i < 5; i++)
keys[i] = inverse(keys[i]);
for (i = 6; i < 11; i++)
keys[i] = inverse(keys[i]);
}
static uint32_t multiswap_step(const uint32_t keys[12], uint32_t v) {
int i;
v *= keys[0];
for (i = 1; i < 5; i++) {
v = (v >> 16) | (v << 16);
v *= keys[i];
}
v += keys[5];
return v;
}
static uint32_t multiswap_inv_step(const uint32_t keys[12], uint32_t v) {
int i;
v -= keys[5];
for (i = 4; i > 0; i--) {
v *= keys[i];
v = (v >> 16) | (v << 16);
}
v *= keys[0];
return v;
}
/**
* \brief "MultiSwap" encryption
* \param keys 32 bit numbers in machine endianness,
* 0-4 and 6-10 must be inverted from decryption
* \param key another key, this one must be the same for the decryption
* \param data data to encrypt
* \return encrypted data
*/
static uint64_t multiswap_enc(const uint32_t keys[12], uint64_t key, uint64_t data) {
uint32_t a = data;
uint32_t b = data >> 32;
uint32_t c;
uint32_t tmp;
a += key;
tmp = multiswap_step(keys , a);
b += tmp;
c = (key >> 32) + tmp;
tmp = multiswap_step(keys + 6, b);
c += tmp;
return ((uint64_t)c << 32) | tmp;
}
/**
* \brief "MultiSwap" decryption
* \param keys 32 bit numbers in machine endianness,
* 0-4 and 6-10 must be inverted from encryption
* \param key another key, this one must be the same as for the encryption
* \param data data to decrypt
* \return decrypted data
*/
static uint64_t multiswap_dec(const uint32_t keys[12], uint64_t key, uint64_t data) {
uint32_t a;
uint32_t b;
uint32_t c = data >> 32;
uint32_t tmp = data;
c -= tmp;
b = multiswap_inv_step(keys + 6, tmp);
tmp = c - (key >> 32);
b -= tmp;
a = multiswap_inv_step(keys , tmp);
a -= key;
return ((uint64_t)b << 32) | a;
}
void ff_asfcrypt_dec(const uint8_t key[20], uint8_t *data, int len) {
struct AVDES des;
struct AVRC4 rc4;
int num_qwords = len >> 3;
uint64_t *qwords = (uint64_t *)data;
uint64_t rc4buff[8];
uint64_t packetkey;
uint32_t ms_keys[12];
uint64_t ms_state;
int i;
if (len < 16) {
for (i = 0; i < len; i++)
data[i] ^= key[i];
return;
}
memset(rc4buff, 0, sizeof(rc4buff));
av_rc4_init(&rc4, key, 12 * 8, 1);
av_rc4_crypt(&rc4, (uint8_t *)rc4buff, NULL, sizeof(rc4buff), NULL, 1);
multiswap_init((uint8_t *)rc4buff, ms_keys);
packetkey = qwords[num_qwords - 1];
packetkey ^= rc4buff[7];
av_des_init(&des, key + 12, 64, 1);
av_des_crypt(&des, (uint8_t *)&packetkey, (uint8_t *)&packetkey, 1, NULL, 1);
packetkey ^= rc4buff[6];
av_rc4_init(&rc4, (uint8_t *)&packetkey, 64, 1);
av_rc4_crypt(&rc4, data, data, len, NULL, 1);
ms_state = 0;
for (i = 0; i < num_qwords - 1; i++, qwords++)
ms_state = multiswap_enc(ms_keys, ms_state, AV_RL64(qwords));
multiswap_invert_keys(ms_keys);
packetkey = (packetkey << 32) | (packetkey >> 32);
packetkey = le2me_64(packetkey);
packetkey = multiswap_dec(ms_keys, ms_state, packetkey);
AV_WL64(qwords, packetkey);
}
| 123linslouis-android-video-cutter | jni/libavformat/asfcrypt.c | C | asf20 | 5,381 |
/*
* mtv demuxer
* Copyright (c) 2006 Reynaldo H. Verdejo Pinochet
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* MTV demuxer.
*/
#include "libavutil/bswap.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define MTV_ASUBCHUNK_DATA_SIZE 500
#define MTV_HEADER_SIZE 512
#define MTV_AUDIO_PADDING_SIZE 12
#define AUDIO_SAMPLING_RATE 44100
#define VIDEO_SID 0
#define AUDIO_SID 1
typedef struct MTVDemuxContext {
unsigned int file_size; ///< filesize, not always right
unsigned int segments; ///< number of 512 byte segments
unsigned int audio_identifier; ///< 'MP3' on all files I have seen
unsigned int audio_br; ///< bitrate of audio channel (mp3)
unsigned int img_colorfmt; ///< frame colorfmt rgb 565/555
unsigned int img_bpp; ///< frame bits per pixel
unsigned int img_width; //
unsigned int img_height; //
unsigned int img_segment_size; ///< size of image segment
unsigned int video_fps; //
unsigned int full_segment_size;
} MTVDemuxContext;
static int mtv_probe(AVProbeData *p)
{
/* Magic is 'AMV' */
if(*(p->buf) != 'A' || *(p->buf+1) != 'M' || *(p->buf+2) != 'V')
return 0;
/* Check for nonzero in bpp and (width|height) header fields */
if(!(p->buf[51] && AV_RL16(&p->buf[52]) | AV_RL16(&p->buf[54])))
return 0;
/* If width or height are 0 then imagesize header field should not */
if(!AV_RL16(&p->buf[52]) || !AV_RL16(&p->buf[54]))
{
if(!!AV_RL16(&p->buf[56]))
return AVPROBE_SCORE_MAX/2;
else
return 0;
}
if(p->buf[51] != 16)
return AVPROBE_SCORE_MAX/4; // But we are going to assume 16bpp anyway ..
return AVPROBE_SCORE_MAX;
}
static int mtv_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
MTVDemuxContext *mtv = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
unsigned int audio_subsegments;
url_fskip(pb, 3);
mtv->file_size = get_le32(pb);
mtv->segments = get_le32(pb);
url_fskip(pb, 32);
mtv->audio_identifier = get_le24(pb);
mtv->audio_br = get_le16(pb);
mtv->img_colorfmt = get_le24(pb);
mtv->img_bpp = get_byte(pb);
mtv->img_width = get_le16(pb);
mtv->img_height = get_le16(pb);
mtv->img_segment_size = get_le16(pb);
/* Calculate width and height if missing from header */
if(!mtv->img_width)
mtv->img_width=mtv->img_segment_size / (mtv->img_bpp>>3)
/ mtv->img_height;
if(!mtv->img_height)
mtv->img_height=mtv->img_segment_size / (mtv->img_bpp>>3)
/ mtv->img_width;
url_fskip(pb, 4);
audio_subsegments = get_le16(pb);
mtv->full_segment_size =
audio_subsegments * (MTV_AUDIO_PADDING_SIZE + MTV_ASUBCHUNK_DATA_SIZE) +
mtv->img_segment_size;
mtv->video_fps = (mtv->audio_br / 4) / audio_subsegments;
// FIXME Add sanity check here
// all systems go! init decoders
// video - raw rgb565
st = av_new_stream(s, VIDEO_SID);
if(!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 64, 1, mtv->video_fps);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = PIX_FMT_RGB565;
st->codec->width = mtv->img_width;
st->codec->height = mtv->img_height;
st->codec->sample_rate = mtv->video_fps;
st->codec->extradata = av_strdup("BottomUp");
st->codec->extradata_size = 9;
// audio - mp3
st = av_new_stream(s, AUDIO_SID);
if(!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 64, 1, AUDIO_SAMPLING_RATE);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_MP3;
st->codec->bit_rate = mtv->audio_br;
st->need_parsing = AVSTREAM_PARSE_FULL;
// Jump over header
if(url_fseek(pb, MTV_HEADER_SIZE, SEEK_SET) != MTV_HEADER_SIZE)
return AVERROR(EIO);
return 0;
}
static int mtv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MTVDemuxContext *mtv = s->priv_data;
ByteIOContext *pb = s->pb;
int ret;
#if !HAVE_BIGENDIAN
int i;
#endif
if((url_ftell(pb) - s->data_offset + mtv->img_segment_size) % mtv->full_segment_size)
{
url_fskip(pb, MTV_AUDIO_PADDING_SIZE);
ret = av_get_packet(pb, pkt, MTV_ASUBCHUNK_DATA_SIZE);
if(ret < 0)
return ret;
pkt->pos -= MTV_AUDIO_PADDING_SIZE;
pkt->stream_index = AUDIO_SID;
}else
{
ret = av_get_packet(pb, pkt, mtv->img_segment_size);
if(ret < 0)
return ret;
#if !HAVE_BIGENDIAN
/* pkt->data is GGGRRRR BBBBBGGG
* and we need RRRRRGGG GGGBBBBB
* for PIX_FMT_RGB565 so here we
* just swap bytes as they come
*/
for(i=0;i<mtv->img_segment_size/2;i++)
*((uint16_t *)pkt->data+i) = bswap_16(*((uint16_t *)pkt->data+i));
#endif
pkt->stream_index = VIDEO_SID;
}
return ret;
}
AVInputFormat mtv_demuxer = {
"MTV",
NULL_IF_CONFIG_SMALL("MTV format"),
sizeof(MTVDemuxContext),
mtv_probe,
mtv_read_header,
mtv_read_packet,
};
| 123linslouis-android-video-cutter | jni/libavformat/mtv.c | C | asf20 | 6,095 |
/*
* Core Audio Format demuxer
* Copyright (c) 2007 Justin Ruggles
* Copyright (c) 2009 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Core Audio Format demuxer
*/
#include "avformat.h"
#include "riff.h"
#include "isom.h"
#include "libavutil/intreadwrite.h"
#include "caf.h"
typedef struct {
int bytes_per_packet; ///< bytes in a packet, or 0 if variable
int frames_per_packet; ///< frames in a packet, or 0 if variable
int64_t num_bytes; ///< total number of bytes in stream
int64_t packet_cnt; ///< packet counter
int64_t frame_cnt; ///< frame counter
int64_t data_start; ///< data start position, in bytes
int64_t data_size; ///< raw data size, in bytes
} CaffContext;
static int probe(AVProbeData *p)
{
if (AV_RB32(p->buf) == MKBETAG('c','a','f','f') && AV_RB16(&p->buf[4]) == 1)
return AVPROBE_SCORE_MAX;
return 0;
}
/** Read audio description chunk */
static int read_desc_chunk(AVFormatContext *s)
{
ByteIOContext *pb = s->pb;
CaffContext *caf = s->priv_data;
AVStream *st;
int flags;
/* new audio stream */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
/* parse format description */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = av_int2dbl(get_be64(pb));
st->codec->codec_tag = get_be32(pb);
flags = get_be32(pb);
caf->bytes_per_packet = get_be32(pb);
st->codec->block_align = caf->bytes_per_packet;
caf->frames_per_packet = get_be32(pb);
st->codec->channels = get_be32(pb);
st->codec->bits_per_coded_sample = get_be32(pb);
/* calculate bit rate for constant size packets */
if (caf->frames_per_packet > 0 && caf->bytes_per_packet > 0) {
st->codec->bit_rate = (uint64_t)st->codec->sample_rate * (uint64_t)caf->bytes_per_packet * 8
/ (uint64_t)caf->frames_per_packet;
} else {
st->codec->bit_rate = 0;
}
/* determine codec */
if (st->codec->codec_tag == MKBETAG('l','p','c','m'))
st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, (flags ^ 0x2) | 0x4);
else
st->codec->codec_id = ff_codec_get_id(ff_codec_caf_tags, st->codec->codec_tag);
return 0;
}
/** Read magic cookie chunk */
static int read_kuki_chunk(AVFormatContext *s, int64_t size)
{
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[0];
if (size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
return -1;
if (st->codec->codec_id == CODEC_ID_AAC) {
/* The magic cookie format for AAC is an mp4 esds atom.
The lavc AAC decoder requires the data from the codec specific
description as extradata input. */
int strt, skip;
MOVAtom atom;
strt = url_ftell(pb);
ff_mov_read_esds(s, pb, atom);
skip = size - (url_ftell(pb) - strt);
if (skip < 0 || !st->codec->extradata ||
st->codec->codec_id != CODEC_ID_AAC) {
av_log(s, AV_LOG_ERROR, "invalid AAC magic cookie\n");
return AVERROR_INVALIDDATA;
}
url_fskip(pb, skip);
} else {
st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
get_buffer(pb, st->codec->extradata, size);
st->codec->extradata_size = size;
}
return 0;
}
/** Read packet table chunk */
static int read_pakt_chunk(AVFormatContext *s, int64_t size)
{
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
int64_t pos = 0, ccount;
int num_packets, i;
ccount = url_ftell(pb);
num_packets = get_be64(pb);
if (num_packets < 0 || INT32_MAX / sizeof(AVIndexEntry) < num_packets)
return AVERROR_INVALIDDATA;
st->nb_frames = get_be64(pb); /* valid frames */
st->nb_frames += get_be32(pb); /* priming frames */
st->nb_frames += get_be32(pb); /* remainder frames */
st->duration = 0;
for (i = 0; i < num_packets; i++) {
av_add_index_entry(s->streams[0], pos, st->duration, 0, 0, AVINDEX_KEYFRAME);
pos += caf->bytes_per_packet ? caf->bytes_per_packet : ff_mp4_read_descr_len(pb);
st->duration += caf->frames_per_packet ? caf->frames_per_packet : ff_mp4_read_descr_len(pb);
}
if (url_ftell(pb) - ccount != size) {
av_log(s, AV_LOG_ERROR, "error reading packet table\n");
return -1;
}
caf->num_bytes = pos;
return 0;
}
/** Read information chunk */
static void read_info_chunk(AVFormatContext *s, int64_t size)
{
ByteIOContext *pb = s->pb;
unsigned int i;
unsigned int nb_entries = get_be32(pb);
for (i = 0; i < nb_entries; i++) {
char key[32];
char value[1024];
get_strz(pb, key, sizeof(key));
get_strz(pb, value, sizeof(value));
av_metadata_set2(&s->metadata, key, value, 0);
}
}
static int read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
CaffContext *caf = s->priv_data;
AVStream *st;
uint32_t tag = 0;
int found_data, ret;
int64_t size;
url_fskip(pb, 8); /* magic, version, file flags */
/* audio description chunk */
if (get_be32(pb) != MKBETAG('d','e','s','c')) {
av_log(s, AV_LOG_ERROR, "desc chunk not present\n");
return AVERROR_INVALIDDATA;
}
size = get_be64(pb);
if (size != 32)
return AVERROR_INVALIDDATA;
ret = read_desc_chunk(s);
if (ret)
return ret;
st = s->streams[0];
/* parse each chunk */
found_data = 0;
while (!url_feof(pb)) {
/* stop at data chunk if seeking is not supported or
data chunk size is unknown */
if (found_data && (caf->data_size < 0 || url_is_streamed(pb)))
break;
tag = get_be32(pb);
size = get_be64(pb);
if (url_feof(pb))
break;
switch (tag) {
case MKBETAG('d','a','t','a'):
url_fskip(pb, 4); /* edit count */
caf->data_start = url_ftell(pb);
caf->data_size = size < 0 ? -1 : size - 4;
if (caf->data_size > 0 && !url_is_streamed(pb))
url_fskip(pb, caf->data_size);
found_data = 1;
break;
/* magic cookie chunk */
case MKBETAG('k','u','k','i'):
if (read_kuki_chunk(s, size))
return AVERROR_INVALIDDATA;
break;
/* packet table chunk */
case MKBETAG('p','a','k','t'):
if (read_pakt_chunk(s, size))
return AVERROR_INVALIDDATA;
break;
case MKBETAG('i','n','f','o'):
read_info_chunk(s, size);
break;
default:
#define _(x) ((x) >= ' ' ? (x) : ' ')
av_log(s, AV_LOG_WARNING, "skipping CAF chunk: %08X (%c%c%c%c)\n",
tag, _(tag>>24), _((tag>>16)&0xFF), _((tag>>8)&0xFF), _(tag&0xFF));
#undef _
case MKBETAG('f','r','e','e'):
if (size < 0)
return AVERROR_INVALIDDATA;
url_fskip(pb, size);
break;
}
}
if (!found_data)
return AVERROR_INVALIDDATA;
if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) {
if (caf->data_size > 0)
st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet;
} else if (st->nb_index_entries) {
st->codec->bit_rate = st->codec->sample_rate * caf->data_size * 8 /
st->duration;
} else {
av_log(s, AV_LOG_ERROR, "Missing packet table. It is required when "
"block size or frame size are variable.\n");
return AVERROR_INVALIDDATA;
}
s->file_size = url_fsize(pb);
s->file_size = FFMAX(0, s->file_size);
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
/* position the stream at the start of data */
if (caf->data_size >= 0)
url_fseek(pb, caf->data_start, SEEK_SET);
return 0;
}
#define CAF_MAX_PKT_SIZE 4096
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
int res, pkt_size = 0, pkt_frames = 0;
int64_t left = CAF_MAX_PKT_SIZE;
if (url_feof(pb))
return AVERROR(EIO);
/* don't read past end of data chunk */
if (caf->data_size > 0) {
left = (caf->data_start + caf->data_size) - url_ftell(pb);
if (left <= 0)
return AVERROR(EIO);
}
pkt_frames = caf->frames_per_packet;
pkt_size = caf->bytes_per_packet;
if (pkt_size > 0 && pkt_frames == 1) {
pkt_size = (CAF_MAX_PKT_SIZE / pkt_size) * pkt_size;
pkt_size = FFMIN(pkt_size, left);
pkt_frames = pkt_size / caf->bytes_per_packet;
} else if (st->nb_index_entries) {
if (caf->packet_cnt < st->nb_index_entries - 1) {
pkt_size = st->index_entries[caf->packet_cnt + 1].pos - st->index_entries[caf->packet_cnt].pos;
pkt_frames = st->index_entries[caf->packet_cnt + 1].timestamp - st->index_entries[caf->packet_cnt].timestamp;
} else if (caf->packet_cnt == st->nb_index_entries - 1) {
pkt_size = caf->num_bytes - st->index_entries[caf->packet_cnt].pos;
pkt_frames = st->duration - st->index_entries[caf->packet_cnt].timestamp;
} else {
return AVERROR(EIO);
}
}
if (pkt_size == 0 || pkt_frames == 0 || pkt_size > left)
return AVERROR(EIO);
res = av_get_packet(pb, pkt, pkt_size);
if (res < 0)
return res;
pkt->size = res;
pkt->stream_index = 0;
pkt->dts = pkt->pts = caf->frame_cnt;
caf->packet_cnt++;
caf->frame_cnt += pkt_frames;
return 0;
}
static int read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags)
{
AVStream *st = s->streams[0];
CaffContext *caf = s->priv_data;
int64_t pos;
timestamp = FFMAX(timestamp, 0);
if (caf->frames_per_packet > 0 && caf->bytes_per_packet > 0) {
/* calculate new byte position based on target frame position */
pos = caf->bytes_per_packet * timestamp / caf->frames_per_packet;
if (caf->data_size > 0)
pos = FFMIN(pos, caf->data_size);
caf->packet_cnt = pos / caf->bytes_per_packet;
caf->frame_cnt = caf->frames_per_packet * caf->packet_cnt;
} else if (st->nb_index_entries) {
caf->packet_cnt = av_index_search_timestamp(st, timestamp, flags);
caf->frame_cnt = st->index_entries[caf->packet_cnt].timestamp;
pos = st->index_entries[caf->packet_cnt].pos;
} else {
return -1;
}
url_fseek(s->pb, pos + caf->data_start, SEEK_SET);
return 0;
}
AVInputFormat caf_demuxer = {
"caf",
NULL_IF_CONFIG_SMALL("Apple Core Audio Format"),
sizeof(CaffContext),
probe,
read_header,
read_packet,
NULL,
read_seek,
.codec_tag = (const AVCodecTag*[]){ff_codec_caf_tags, 0},
};
| 123linslouis-android-video-cutter | jni/libavformat/cafdec.c | C | asf20 | 12,075 |
/*
* Sony Playstation (PSX) STR File Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* PSX STR file demuxer
* by Mike Melanson (melanson@pcisys.net)
* This module handles streams that have been ripped from Sony Playstation
* CD games. This demuxer can handle either raw STR files (which are just
* concatenations of raw compact disc sectors) or STR files with 0x2C-byte
* RIFF headers, followed by CD sectors.
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
#define CDXA_TAG MKTAG('C', 'D', 'X', 'A')
#define RAW_CD_SECTOR_SIZE 2352
#define RAW_CD_SECTOR_DATA_SIZE 2304
#define VIDEO_DATA_CHUNK_SIZE 0x7E0
#define VIDEO_DATA_HEADER_SIZE 0x38
#define RIFF_HEADER_SIZE 0x2C
#define CDXA_TYPE_MASK 0x0E
#define CDXA_TYPE_DATA 0x08
#define CDXA_TYPE_AUDIO 0x04
#define CDXA_TYPE_VIDEO 0x02
#define STR_MAGIC (0x80010160)
typedef struct StrChannel {
/* video parameters */
int video_stream_index;
AVPacket tmp_pkt;
/* audio parameters */
int audio_stream_index;
} StrChannel;
typedef struct StrDemuxContext {
/* a STR file can contain up to 32 channels of data */
StrChannel channels[32];
} StrDemuxContext;
static const char sync_header[12] = {0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00};
static int str_probe(AVProbeData *p)
{
uint8_t *sector= p->buf;
if (p->buf_size < RAW_CD_SECTOR_SIZE)
return 0;
if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
(AV_RL32(&p->buf[8]) == CDXA_TAG)) {
/* RIFF header seen; skip 0x2C bytes */
sector += RIFF_HEADER_SIZE;
}
/* look for CD sync header (00, 0xFF x 10, 00) */
if (memcmp(sector,sync_header,sizeof(sync_header)))
return 0;
if(sector[0x11] >= 32)
return 0;
if( (sector[0x12] & CDXA_TYPE_MASK) != CDXA_TYPE_VIDEO
&& (sector[0x12] & CDXA_TYPE_MASK) != CDXA_TYPE_AUDIO
&& (sector[0x12] & CDXA_TYPE_MASK) != CDXA_TYPE_DATA)
return 0;
/* MPEG files (like those ripped from VCDs) can also look like this;
* only return half certainty */
return 50;
}
static int str_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
StrDemuxContext *str = s->priv_data;
unsigned char sector[RAW_CD_SECTOR_SIZE];
int start;
int i;
/* skip over any RIFF header */
if (get_buffer(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
return AVERROR(EIO);
if (AV_RL32(§or[0]) == RIFF_TAG)
start = RIFF_HEADER_SIZE;
else
start = 0;
url_fseek(pb, start, SEEK_SET);
for(i=0; i<32; i++){
str->channels[i].video_stream_index=
str->channels[i].audio_stream_index= -1;
}
s->ctx_flags |= AVFMTCTX_NOHEADER;
return 0;
}
static int str_read_packet(AVFormatContext *s,
AVPacket *ret_pkt)
{
ByteIOContext *pb = s->pb;
StrDemuxContext *str = s->priv_data;
unsigned char sector[RAW_CD_SECTOR_SIZE];
int channel;
AVPacket *pkt;
AVStream *st;
while (1) {
if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
return AVERROR(EIO);
channel = sector[0x11];
if (channel >= 32)
return AVERROR_INVALIDDATA;
switch (sector[0x12] & CDXA_TYPE_MASK) {
case CDXA_TYPE_DATA:
case CDXA_TYPE_VIDEO:
{
int current_sector = AV_RL16(§or[0x1C]);
int sector_count = AV_RL16(§or[0x1E]);
int frame_size = AV_RL32(§or[0x24]);
if(!( frame_size>=0
&& current_sector < sector_count
&& sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size);
break;
}
if(str->channels[channel].video_stream_index < 0){
/* allocate a new AVStream */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 64, 1, 15);
str->channels[channel].video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_MDEC;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RL16(§or[0x28]);
st->codec->height = AV_RL16(§or[0x2A]);
}
/* if this is the first sector of the frame, allocate a pkt */
pkt = &str->channels[channel].tmp_pkt;
if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){
if(pkt->data)
av_log(s, AV_LOG_ERROR, "missmatching sector_count\n");
av_free_packet(pkt);
if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE))
return AVERROR(EIO);
pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE;
pkt->stream_index =
str->channels[channel].video_stream_index;
}
memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
sector + VIDEO_DATA_HEADER_SIZE,
VIDEO_DATA_CHUNK_SIZE);
if (current_sector == sector_count-1) {
pkt->size= frame_size;
*ret_pkt = *pkt;
pkt->data= NULL;
pkt->size= -1;
return 0;
}
}
break;
case CDXA_TYPE_AUDIO:
if(str->channels[channel].audio_stream_index < 0){
int fmt = sector[0x13];
/* allocate a new AVStream */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
str->channels[channel].audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_ADPCM_XA;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->channels = (fmt&1)?2:1;
st->codec->sample_rate = (fmt&4)?18900:37800;
// st->codec->bit_rate = 0; //FIXME;
st->codec->block_align = 128;
av_set_pts_info(st, 64, 128, st->codec->sample_rate);
}
pkt = ret_pkt;
if (av_new_packet(pkt, 2304))
return AVERROR(EIO);
memcpy(pkt->data,sector+24,2304);
pkt->stream_index =
str->channels[channel].audio_stream_index;
return 0;
break;
default:
av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]);
/* drop the sector and move on */
break;
}
if (url_feof(pb))
return AVERROR(EIO);
}
}
static int str_read_close(AVFormatContext *s)
{
StrDemuxContext *str = s->priv_data;
int i;
for(i=0; i<32; i++){
if(str->channels[i].tmp_pkt.data)
av_free_packet(&str->channels[i].tmp_pkt);
}
return 0;
}
AVInputFormat str_demuxer = {
"psxstr",
NULL_IF_CONFIG_SMALL("Sony Playstation STR format"),
sizeof(StrDemuxContext),
str_probe,
str_read_header,
str_read_packet,
str_read_close,
};
| 123linslouis-android-video-cutter | jni/libavformat/psxstr.c | C | asf20 | 8,451 |