code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
/* * ADTS muxer. * Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com> * Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_ADTS_H #define AVFORMAT_ADTS_H #include "avformat.h" #include "libavcodec/mpeg4audio.h" #define ADTS_HEADER_SIZE 7 typedef struct { int write_adts; int objecttype; int sample_rate_index; int channel_conf; int pce_size; uint8_t pce_data[MAX_PCE_SIZE]; } ADTSContext; int ff_adts_write_frame_header(ADTSContext *ctx, uint8_t *buf, int size, int pce_size); int ff_adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf, int size); #endif /* AVFORMAT_ADTS_H */
123linslouis-android-video-cutter
jni/libavformat/adts.h
C
asf20
1,492
/* * DXA demuxer * Copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avformat.h" #include "riff.h" #define DXA_EXTRA_SIZE 9 typedef struct{ int frames; int has_sound; int bpc; uint32_t bytes_left; int64_t wavpos, vidpos; int readvid; }DXAContext; static int dxa_probe(AVProbeData *p) { int w, h; if (p->buf_size < 15) return 0; w = AV_RB16(p->buf + 11); h = AV_RB16(p->buf + 13); /* check file header */ if (p->buf[0] == 'D' && p->buf[1] == 'E' && p->buf[2] == 'X' && p->buf[3] == 'A' && w && w <= 2048 && h && h <= 2048) return AVPROBE_SCORE_MAX; else return 0; } static int dxa_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; DXAContext *c = s->priv_data; AVStream *st, *ast; uint32_t tag; int32_t fps; int w, h; int num, den; int flags; tag = get_le32(pb); if (tag != MKTAG('D', 'E', 'X', 'A')) return -1; flags = get_byte(pb); c->frames = get_be16(pb); if(!c->frames){ av_log(s, AV_LOG_ERROR, "File contains no frames ???\n"); return -1; } fps = get_be32(pb); if(fps > 0){ den = 1000; num = fps; }else if (fps < 0){ den = 100000; num = -fps; }else{ den = 10; num = 1; } w = get_be16(pb); h = get_be16(pb); c->has_sound = 0; st = av_new_stream(s, 0); if (!st) return -1; // Parse WAV data header if(get_le32(pb) == MKTAG('W', 'A', 'V', 'E')){ uint32_t size, fsize; c->has_sound = 1; size = get_be32(pb); c->vidpos = url_ftell(pb) + size; url_fskip(pb, 16); fsize = get_le32(pb); ast = av_new_stream(s, 0); if (!ast) return -1; ff_get_wav_header(pb, ast->codec, fsize); // find 'data' chunk while(url_ftell(pb) < c->vidpos && !url_feof(pb)){ tag = get_le32(pb); fsize = get_le32(pb); if(tag == MKTAG('d', 'a', 't', 'a')) break; url_fskip(pb, fsize); } c->bpc = (fsize + c->frames - 1) / c->frames; if(ast->codec->block_align) c->bpc = ((c->bpc + ast->codec->block_align - 1) / ast->codec->block_align) * ast->codec->block_align; c->bytes_left = fsize; c->wavpos = url_ftell(pb); url_fseek(pb, c->vidpos, SEEK_SET); } /* now we are ready: build format streams */ st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_DXA; st->codec->width = w; st->codec->height = h; av_reduce(&den, &num, den, num, (1UL<<31)-1); av_set_pts_info(st, 33, num, den); /* flags & 0x80 means that image is interlaced, * flags & 0x40 means that image has double height * either way set true height */ if(flags & 0xC0){ st->codec->height >>= 1; } c->readvid = !c->has_sound; c->vidpos = url_ftell(pb); s->start_time = 0; s->duration = (int64_t)c->frames * AV_TIME_BASE * num / den; av_log(s, AV_LOG_DEBUG, "%d frame(s)\n",c->frames); return 0; } static int dxa_read_packet(AVFormatContext *s, AVPacket *pkt) { DXAContext *c = s->priv_data; int ret; uint32_t size; uint8_t buf[DXA_EXTRA_SIZE], pal[768+4]; int pal_size = 0; if(!c->readvid && c->has_sound && c->bytes_left){ c->readvid = 1; url_fseek(s->pb, c->wavpos, SEEK_SET); size = FFMIN(c->bytes_left, c->bpc); ret = av_get_packet(s->pb, pkt, size); pkt->stream_index = 1; if(ret != size) return AVERROR(EIO); c->bytes_left -= size; c->wavpos = url_ftell(s->pb); return 0; } url_fseek(s->pb, c->vidpos, SEEK_SET); while(!url_feof(s->pb) && c->frames){ get_buffer(s->pb, buf, 4); switch(AV_RL32(buf)){ case MKTAG('N', 'U', 'L', 'L'): if(av_new_packet(pkt, 4 + pal_size) < 0) return AVERROR(ENOMEM); pkt->stream_index = 0; if(pal_size) memcpy(pkt->data, pal, pal_size); memcpy(pkt->data + pal_size, buf, 4); c->frames--; c->vidpos = url_ftell(s->pb); c->readvid = 0; return 0; case MKTAG('C', 'M', 'A', 'P'): pal_size = 768+4; memcpy(pal, buf, 4); get_buffer(s->pb, pal + 4, 768); break; case MKTAG('F', 'R', 'A', 'M'): get_buffer(s->pb, buf + 4, DXA_EXTRA_SIZE - 4); size = AV_RB32(buf + 5); if(size > 0xFFFFFF){ av_log(s, AV_LOG_ERROR, "Frame size is too big: %d\n", size); return -1; } if(av_new_packet(pkt, size + DXA_EXTRA_SIZE + pal_size) < 0) return AVERROR(ENOMEM); memcpy(pkt->data + pal_size, buf, DXA_EXTRA_SIZE); ret = get_buffer(s->pb, pkt->data + DXA_EXTRA_SIZE + pal_size, size); if(ret != size){ av_free_packet(pkt); return AVERROR(EIO); } if(pal_size) memcpy(pkt->data, pal, pal_size); pkt->stream_index = 0; c->frames--; c->vidpos = url_ftell(s->pb); c->readvid = 0; return 0; default: av_log(s, AV_LOG_ERROR, "Unknown tag %c%c%c%c\n", buf[0], buf[1], buf[2], buf[3]); return -1; } } return AVERROR(EIO); } AVInputFormat dxa_demuxer = { "dxa", NULL_IF_CONFIG_SMALL("DXA"), sizeof(DXAContext), dxa_probe, dxa_read_header, dxa_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/dxa.c
C
asf20
6,501
/* * AIFF/AIFF-C demuxer * Copyright (c) 2006 Patrick Guimond * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intfloat_readwrite.h" #include "avformat.h" #include "raw.h" #include "aiff.h" #define AIFF 0 #define AIFF_C_VERSION1 0xA2805140 typedef struct { int64_t data_end; } AIFFInputContext; static enum CodecID aiff_codec_get_id(int bps) { if (bps <= 8) return CODEC_ID_PCM_S8; if (bps <= 16) return CODEC_ID_PCM_S16BE; if (bps <= 24) return CODEC_ID_PCM_S24BE; if (bps <= 32) return CODEC_ID_PCM_S32BE; /* bigger than 32 isn't allowed */ return CODEC_ID_NONE; } /* returns the size of the found tag */ static int get_tag(ByteIOContext *pb, uint32_t * tag) { int size; if (url_feof(pb)) return AVERROR(EIO); *tag = get_le32(pb); size = get_be32(pb); if (size < 0) size = 0x7fffffff; return size; } /* Metadata string read */ static void get_meta(AVFormatContext *s, const char *key, int size) { uint8_t *str = av_malloc(size+1); int res; if (!str) { url_fskip(s->pb, size); return; } res = get_buffer(s->pb, str, size); if (res < 0) return; str[res] = 0; av_metadata_set2(&s->metadata, key, str, AV_METADATA_DONT_STRDUP_VAL); } /* Returns the number of sound data frames or negative on error */ static unsigned int get_aiff_header(ByteIOContext *pb, AVCodecContext *codec, int size, unsigned version) { AVExtFloat ext; double sample_rate; unsigned int num_frames; if (size & 1) size++; codec->codec_type = AVMEDIA_TYPE_AUDIO; codec->channels = get_be16(pb); num_frames = get_be32(pb); codec->bits_per_coded_sample = get_be16(pb); get_buffer(pb, (uint8_t*)&ext, sizeof(ext));/* Sample rate is in */ sample_rate = av_ext2dbl(ext); /* 80 bits BE IEEE extended float */ codec->sample_rate = sample_rate; size -= 18; /* Got an AIFF-C? */ if (version == AIFF_C_VERSION1) { codec->codec_tag = get_le32(pb); codec->codec_id = ff_codec_get_id(ff_codec_aiff_tags, codec->codec_tag); switch (codec->codec_id) { case CODEC_ID_PCM_S16BE: codec->codec_id = aiff_codec_get_id(codec->bits_per_coded_sample); codec->bits_per_coded_sample = av_get_bits_per_sample(codec->codec_id); break; case CODEC_ID_ADPCM_IMA_QT: codec->block_align = 34*codec->channels; codec->frame_size = 64; break; case CODEC_ID_MACE3: codec->block_align = 2*codec->channels; codec->frame_size = 6; break; case CODEC_ID_MACE6: codec->block_align = 1*codec->channels; codec->frame_size = 6; break; case CODEC_ID_GSM: codec->block_align = 33; codec->frame_size = 160; break; case CODEC_ID_QCELP: codec->block_align = 35; codec->frame_size= 160; break; default: break; } size -= 4; } else { /* Need the codec type */ codec->codec_id = aiff_codec_get_id(codec->bits_per_coded_sample); codec->bits_per_coded_sample = av_get_bits_per_sample(codec->codec_id); } /* Block align needs to be computed in all cases, as the definition * is specific to applications -> here we use the WAVE format definition */ if (!codec->block_align) codec->block_align = (codec->bits_per_coded_sample * codec->channels) >> 3; codec->bit_rate = (codec->frame_size ? codec->sample_rate/codec->frame_size : codec->sample_rate) * (codec->block_align << 3); /* Chunk is over */ if (size) url_fseek(pb, size, SEEK_CUR); return num_frames; } static int aiff_probe(AVProbeData *p) { /* check file header */ if (p->buf[0] == 'F' && p->buf[1] == 'O' && p->buf[2] == 'R' && p->buf[3] == 'M' && p->buf[8] == 'A' && p->buf[9] == 'I' && p->buf[10] == 'F' && (p->buf[11] == 'F' || p->buf[11] == 'C')) return AVPROBE_SCORE_MAX; else return 0; } /* aiff input */ static int aiff_read_header(AVFormatContext *s, AVFormatParameters *ap) { int size, filesize; int64_t offset = 0; uint32_t tag; unsigned version = AIFF_C_VERSION1; ByteIOContext *pb = s->pb; AVStream * st; AIFFInputContext *aiff = s->priv_data; /* check FORM header */ filesize = get_tag(pb, &tag); if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M')) return AVERROR_INVALIDDATA; /* AIFF data type */ tag = get_le32(pb); if (tag == MKTAG('A', 'I', 'F', 'F')) /* Got an AIFF file */ version = AIFF; else if (tag != MKTAG('A', 'I', 'F', 'C')) /* An AIFF-C file then */ return AVERROR_INVALIDDATA; filesize -= 4; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); while (filesize > 0) { /* parse different chunks */ size = get_tag(pb, &tag); if (size < 0) return size; filesize -= size + 8; switch (tag) { case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */ /* Then for the complete header info */ st->nb_frames = get_aiff_header(pb, st->codec, size, version); if (st->nb_frames < 0) return st->nb_frames; if (offset > 0) // COMM is after SSND goto got_sound; break; case MKTAG('F', 'V', 'E', 'R'): /* Version chunk */ version = get_be32(pb); break; case MKTAG('N', 'A', 'M', 'E'): /* Sample name chunk */ get_meta(s, "title" , size); break; case MKTAG('A', 'U', 'T', 'H'): /* Author chunk */ get_meta(s, "author" , size); break; case MKTAG('(', 'c', ')', ' '): /* Copyright chunk */ get_meta(s, "copyright", size); break; case MKTAG('A', 'N', 'N', 'O'): /* Annotation chunk */ get_meta(s, "comment" , size); break; case MKTAG('S', 'S', 'N', 'D'): /* Sampled sound chunk */ aiff->data_end = url_ftell(pb) + size; offset = get_be32(pb); /* Offset of sound data */ get_be32(pb); /* BlockSize... don't care */ offset += url_ftell(pb); /* Compute absolute data offset */ if (st->codec->block_align) /* Assume COMM already parsed */ goto got_sound; if (url_is_streamed(pb)) { av_log(s, AV_LOG_ERROR, "file is not seekable\n"); return -1; } url_fskip(pb, size - 8); break; case MKTAG('w', 'a', 'v', 'e'): if ((uint64_t)size > (1<<30)) return -1; st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = size; get_buffer(pb, st->codec->extradata, size); break; default: /* Jump */ if (size & 1) /* Always even aligned */ size++; url_fskip (pb, size); } } if (!st->codec->block_align) { av_log(s, AV_LOG_ERROR, "could not find COMM tag\n"); return -1; } got_sound: /* Now positioned, get the sound data start and end */ if (st->nb_frames) s->file_size = st->nb_frames * st->codec->block_align; av_set_pts_info(st, 64, 1, st->codec->sample_rate); st->start_time = 0; st->duration = st->codec->frame_size ? st->nb_frames * st->codec->frame_size : st->nb_frames; /* Position the stream at the first block */ url_fseek(pb, offset, SEEK_SET); return 0; } #define MAX_SIZE 4096 static int aiff_read_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[0]; AIFFInputContext *aiff = s->priv_data; int64_t max_size; int res, size; /* calculate size of remaining data */ max_size = aiff->data_end - url_ftell(s->pb); if (max_size <= 0) return AVERROR_EOF; /* Now for that packet */ if (st->codec->block_align >= 33) // GSM, QCLP, IMA4 size = st->codec->block_align; else size = (MAX_SIZE / st->codec->block_align) * st->codec->block_align; size = FFMIN(max_size, size); res = av_get_packet(s->pb, pkt, size); if (res < 0) return res; /* Only one stream in an AIFF file */ pkt->stream_index = 0; return 0; } AVInputFormat aiff_demuxer = { "aiff", NULL_IF_CONFIG_SMALL("Audio IFF"), sizeof(AIFFInputContext), aiff_probe, aiff_read_header, aiff_read_packet, NULL, pcm_read_seek, .codec_tag= (const AVCodecTag* const []){ff_codec_aiff_tags, 0}, };
123linslouis-android-video-cutter
jni/libavformat/aiffdec.c
C
asf20
9,844
/* * Copyright (c) 2007 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_NETWORK_H #define AVFORMAT_NETWORK_H #include "config.h" #if HAVE_WINSOCK2_H #include <winsock2.h> #include <ws2tcpip.h> #define ff_neterrno() (-WSAGetLastError()) #define FF_NETERROR(err) (-WSA##err) #define WSAEAGAIN WSAEWOULDBLOCK #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <netdb.h> #define ff_neterrno() AVERROR(errno) #define FF_NETERROR(err) AVERROR(err) #endif #if HAVE_ARPA_INET_H #include <arpa/inet.h> #endif int ff_socket_nonblock(int socket, int enable); static inline int ff_network_init(void) { #if HAVE_WINSOCK2_H WSADATA wsaData; if (WSAStartup(MAKEWORD(1,1), &wsaData)) return 0; #endif return 1; } static inline void ff_network_close(void) { #if HAVE_WINSOCK2_H WSACleanup(); #endif } int ff_inet_aton (const char * str, struct in_addr * add); #if !HAVE_STRUCT_SOCKADDR_STORAGE struct sockaddr_storage { #if HAVE_STRUCT_SOCKADDR_SA_LEN uint8_t ss_len; uint8_t ss_family; #else uint16_t ss_family; #endif char ss_pad1[6]; int64_t ss_align; char ss_pad2[112]; }; #endif #if !HAVE_STRUCT_ADDRINFO struct addrinfo { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; int ai_addrlen; struct sockaddr *ai_addr; char *ai_canonname; struct addrinfo *ai_next; }; #endif /* getaddrinfo constants */ #ifndef EAI_FAIL #define EAI_FAIL 4 #endif #ifndef EAI_FAMILY #define EAI_FAMILY 5 #endif #ifndef EAI_NONAME #define EAI_NONAME 8 #endif #ifndef AI_PASSIVE #define AI_PASSIVE 1 #endif #ifndef AI_CANONNAME #define AI_CANONNAME 2 #endif #ifndef AI_NUMERICHOST #define AI_NUMERICHOST 4 #endif #ifndef NI_NOFQDN #define NI_NOFQDN 1 #endif #ifndef NI_NUMERICHOST #define NI_NUMERICHOST 2 #endif #ifndef NI_NAMERQD #define NI_NAMERQD 4 #endif #ifndef NI_NUMERICSERV #define NI_NUMERICSERV 8 #endif #ifndef NI_DGRAM #define NI_DGRAM 16 #endif #if !HAVE_GETADDRINFO int ff_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res); void ff_freeaddrinfo(struct addrinfo *res); int ff_getnameinfo(const struct sockaddr *sa, int salen, char *host, int hostlen, char *serv, int servlen, int flags); const char *ff_gai_strerror(int ecode); #define getaddrinfo ff_getaddrinfo #define freeaddrinfo ff_freeaddrinfo #define getnameinfo ff_getnameinfo #define gai_strerror ff_gai_strerror #endif #endif /* AVFORMAT_NETWORK_H */
123linslouis-android-video-cutter
jni/libavformat/network.h
C
asf20
3,305
/* * "Real" compatible muxer and demuxer common code. * Copyright (c) 2009 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "rm.h" const char * const ff_rm_metadata[4] = { "title", "author", "copyright", "comment" };
123linslouis-android-video-cutter
jni/libavformat/rm.c
C
asf20
996
/* * Ogg muxer * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at free dot fr> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/crc.h" #include "libavcodec/xiph.h" #include "libavcodec/bytestream.h" #include "libavcodec/flac.h" #include "avformat.h" #include "internal.h" #include "vorbiscomment.h" #define MAX_PAGE_SIZE 65025 typedef struct { int64_t granule; int stream_index; uint8_t flags; uint8_t segments_count; uint8_t segments[255]; uint8_t data[MAX_PAGE_SIZE]; uint16_t size; } OGGPage; typedef struct { int64_t duration; unsigned page_counter; uint8_t *header[3]; int header_len[3]; /** for theora granule */ int kfgshift; int64_t last_kf_pts; int vrev; int eos; unsigned page_count; ///< number of page buffered OGGPage page; ///< current page } OGGStreamContext; typedef struct OGGPageList { OGGPage page; struct OGGPageList *next; } OGGPageList; typedef struct { OGGPageList *page_list; } OGGContext; static void ogg_update_checksum(AVFormatContext *s, int64_t crc_offset) { int64_t pos = url_ftell(s->pb); uint32_t checksum = get_checksum(s->pb); url_fseek(s->pb, crc_offset, SEEK_SET); put_be32(s->pb, checksum); url_fseek(s->pb, pos, SEEK_SET); } static void ogg_write_page(AVFormatContext *s, OGGPage *page, int extra_flags) { OGGStreamContext *oggstream = s->streams[page->stream_index]->priv_data; int64_t crc_offset; init_checksum(s->pb, ff_crc04C11DB7_update, 0); put_tag(s->pb, "OggS"); put_byte(s->pb, 0); put_byte(s->pb, page->flags | extra_flags); put_le64(s->pb, page->granule); put_le32(s->pb, page->stream_index); put_le32(s->pb, oggstream->page_counter++); crc_offset = url_ftell(s->pb); put_le32(s->pb, 0); // crc put_byte(s->pb, page->segments_count); put_buffer(s->pb, page->segments, page->segments_count); put_buffer(s->pb, page->data, page->size); ogg_update_checksum(s, crc_offset); put_flush_packet(s->pb); oggstream->page_count--; } static int64_t ogg_granule_to_timestamp(OGGStreamContext *oggstream, OGGPage *page) { if (oggstream->kfgshift) return (page->granule>>oggstream->kfgshift) + (page->granule & ((1<<oggstream->kfgshift)-1)); else return page->granule; } static int ogg_compare_granule(AVFormatContext *s, OGGPage *next, OGGPage *page) { AVStream *st2 = s->streams[next->stream_index]; AVStream *st = s->streams[page->stream_index]; int64_t next_granule, cur_granule; if (next->granule == -1 || page->granule == -1) return 0; next_granule = av_rescale_q(ogg_granule_to_timestamp(st2->priv_data, next), st2->time_base, AV_TIME_BASE_Q); cur_granule = av_rescale_q(ogg_granule_to_timestamp(st->priv_data, page), st ->time_base, AV_TIME_BASE_Q); return next_granule > cur_granule; } static int ogg_reset_cur_page(OGGStreamContext *oggstream) { oggstream->page.granule = -1; oggstream->page.flags = 0; oggstream->page.segments_count = 0; oggstream->page.size = 0; return 0; } static int ogg_buffer_page(AVFormatContext *s, OGGStreamContext *oggstream) { OGGContext *ogg = s->priv_data; OGGPageList **p = &ogg->page_list; OGGPageList *l = av_mallocz(sizeof(*l)); if (!l) return AVERROR(ENOMEM); l->page = oggstream->page; oggstream->page_count++; ogg_reset_cur_page(oggstream); while (*p) { if (ogg_compare_granule(s, &(*p)->page, &l->page)) break; p = &(*p)->next; } l->next = *p; *p = l; return 0; } static int ogg_buffer_data(AVFormatContext *s, AVStream *st, uint8_t *data, unsigned size, int64_t granule) { OGGStreamContext *oggstream = st->priv_data; int total_segments = size / 255 + 1; uint8_t *p = data; int i, segments, len; for (i = 0; i < total_segments; ) { OGGPage *page = &oggstream->page; segments = FFMIN(total_segments - i, 255 - page->segments_count); if (i && !page->segments_count) page->flags |= 1; // continued packet memset(page->segments+page->segments_count, 255, segments - 1); page->segments_count += segments - 1; len = FFMIN(size, segments*255); page->segments[page->segments_count++] = len - (segments-1)*255; memcpy(page->data+page->size, p, len); p += len; size -= len; i += segments; page->size += len; if (i == total_segments) page->granule = granule; if (page->segments_count == 255) { ogg_buffer_page(s, oggstream); } } return 0; } static uint8_t *ogg_write_vorbiscomment(int offset, int bitexact, int *header_len, AVMetadata *m) { const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT; int size; uint8_t *p, *p0; unsigned int count; size = offset + ff_vorbiscomment_length(m, vendor, &count); p = av_mallocz(size); if (!p) return NULL; p0 = p; p += offset; ff_vorbiscomment_write(&p, m, vendor, count); *header_len = size; return p0; } static int ogg_build_flac_headers(AVCodecContext *avctx, OGGStreamContext *oggstream, int bitexact, AVMetadata *m) { enum FLACExtradataFormat format; uint8_t *streaminfo; uint8_t *p; if (!ff_flac_is_extradata_valid(avctx, &format, &streaminfo)) return -1; // first packet: STREAMINFO oggstream->header_len[0] = 51; oggstream->header[0] = av_mallocz(51); // per ogg flac specs p = oggstream->header[0]; if (!p) return AVERROR(ENOMEM); bytestream_put_byte(&p, 0x7F); bytestream_put_buffer(&p, "FLAC", 4); bytestream_put_byte(&p, 1); // major version bytestream_put_byte(&p, 0); // minor version bytestream_put_be16(&p, 1); // headers packets without this one bytestream_put_buffer(&p, "fLaC", 4); bytestream_put_byte(&p, 0x00); // streaminfo bytestream_put_be24(&p, 34); bytestream_put_buffer(&p, streaminfo, FLAC_STREAMINFO_SIZE); // second packet: VorbisComment p = ogg_write_vorbiscomment(4, bitexact, &oggstream->header_len[1], m); if (!p) return AVERROR(ENOMEM); oggstream->header[1] = p; bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment bytestream_put_be24(&p, oggstream->header_len[1] - 4); return 0; } #define SPEEX_HEADER_SIZE 80 static int ogg_build_speex_headers(AVCodecContext *avctx, OGGStreamContext *oggstream, int bitexact, AVMetadata *m) { uint8_t *p; if (avctx->extradata_size < SPEEX_HEADER_SIZE) return -1; // first packet: Speex header p = av_mallocz(SPEEX_HEADER_SIZE); if (!p) return AVERROR(ENOMEM); oggstream->header[0] = p; oggstream->header_len[0] = SPEEX_HEADER_SIZE; bytestream_put_buffer(&p, avctx->extradata, SPEEX_HEADER_SIZE); AV_WL32(&oggstream->header[0][68], 0); // set extra_headers to 0 // second packet: VorbisComment p = ogg_write_vorbiscomment(0, bitexact, &oggstream->header_len[1], m); if (!p) return AVERROR(ENOMEM); oggstream->header[1] = p; return 0; } static int ogg_write_header(AVFormatContext *s) { OGGStreamContext *oggstream; int i, j; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) av_set_pts_info(st, 64, 1, st->codec->sample_rate); else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) av_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den); if (st->codec->codec_id != CODEC_ID_VORBIS && st->codec->codec_id != CODEC_ID_THEORA && st->codec->codec_id != CODEC_ID_SPEEX && st->codec->codec_id != CODEC_ID_FLAC) { av_log(s, AV_LOG_ERROR, "Unsupported codec id in stream %d\n", i); return -1; } if (!st->codec->extradata || !st->codec->extradata_size) { av_log(s, AV_LOG_ERROR, "No extradata present\n"); return -1; } oggstream = av_mallocz(sizeof(*oggstream)); oggstream->page.stream_index = i; st->priv_data = oggstream; if (st->codec->codec_id == CODEC_ID_FLAC) { int err = ogg_build_flac_headers(st->codec, oggstream, st->codec->flags & CODEC_FLAG_BITEXACT, s->metadata); if (err) { av_log(s, AV_LOG_ERROR, "Error writing FLAC headers\n"); av_freep(&st->priv_data); return err; } } else if (st->codec->codec_id == CODEC_ID_SPEEX) { int err = ogg_build_speex_headers(st->codec, oggstream, st->codec->flags & CODEC_FLAG_BITEXACT, s->metadata); if (err) { av_log(s, AV_LOG_ERROR, "Error writing Speex headers\n"); av_freep(&st->priv_data); return err; } } else { if (ff_split_xiph_headers(st->codec->extradata, st->codec->extradata_size, st->codec->codec_id == CODEC_ID_VORBIS ? 30 : 42, oggstream->header, oggstream->header_len) < 0) { av_log(s, AV_LOG_ERROR, "Extradata corrupted\n"); av_freep(&st->priv_data); return -1; } if (st->codec->codec_id == CODEC_ID_THEORA) { /** KFGSHIFT is the width of the less significant section of the granule position The less significant section is the frame count since the last keyframe */ oggstream->kfgshift = ((oggstream->header[0][40]&3)<<3)|(oggstream->header[0][41]>>5); oggstream->vrev = oggstream->header[0][9]; av_log(s, AV_LOG_DEBUG, "theora kfgshift %d, vrev %d\n", oggstream->kfgshift, oggstream->vrev); } } } for (j = 0; j < s->nb_streams; j++) { OGGStreamContext *oggstream = s->streams[j]->priv_data; ogg_buffer_data(s, s->streams[j], oggstream->header[0], oggstream->header_len[0], 0); oggstream->page.flags |= 2; // bos ogg_buffer_page(s, oggstream); } for (j = 0; j < s->nb_streams; j++) { AVStream *st = s->streams[j]; OGGStreamContext *oggstream = st->priv_data; for (i = 1; i < 3; i++) { if (oggstream && oggstream->header_len[i]) ogg_buffer_data(s, st, oggstream->header[i], oggstream->header_len[i], 0); } ogg_buffer_page(s, oggstream); } return 0; } static void ogg_write_pages(AVFormatContext *s, int flush) { OGGContext *ogg = s->priv_data; OGGPageList *next, *p; if (!ogg->page_list) return; for (p = ogg->page_list; p; ) { OGGStreamContext *oggstream = s->streams[p->page.stream_index]->priv_data; if (oggstream->page_count < 2 && !flush) break; ogg_write_page(s, &p->page, flush && oggstream->page_count == 1 ? 4 : 0); // eos next = p->next; av_freep(&p); p = next; } ogg->page_list = p; } static int ogg_write_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; OGGStreamContext *oggstream = st->priv_data; int ret; int64_t granule; if (st->codec->codec_id == CODEC_ID_THEORA) { int64_t pts = oggstream->vrev < 1 ? pkt->pts : pkt->pts + pkt->duration; int pframe_count; if (pkt->flags & AV_PKT_FLAG_KEY) oggstream->last_kf_pts = pts; pframe_count = pts - oggstream->last_kf_pts; // prevent frame count from overflow if key frame flag is not set if (pframe_count >= (1<<oggstream->kfgshift)) { oggstream->last_kf_pts += pframe_count; pframe_count = 0; } granule = (oggstream->last_kf_pts<<oggstream->kfgshift) | pframe_count; } else granule = pkt->pts + pkt->duration; oggstream->duration = granule; ret = ogg_buffer_data(s, st, pkt->data, pkt->size, granule); if (ret < 0) return ret; ogg_write_pages(s, 0); return 0; } static int ogg_write_trailer(AVFormatContext *s) { int i; /* flush current page */ for (i = 0; i < s->nb_streams; i++) ogg_buffer_page(s, s->streams[i]->priv_data); ogg_write_pages(s, 1); for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; OGGStreamContext *oggstream = st->priv_data; if (st->codec->codec_id == CODEC_ID_FLAC || st->codec->codec_id == CODEC_ID_SPEEX) { av_free(oggstream->header[0]); av_free(oggstream->header[1]); } av_freep(&st->priv_data); } return 0; } AVOutputFormat ogg_muxer = { "ogg", NULL_IF_CONFIG_SMALL("Ogg"), "application/ogg", "ogg,ogv,spx", sizeof(OGGContext), CODEC_ID_FLAC, CODEC_ID_THEORA, ogg_write_header, ogg_write_packet, ogg_write_trailer, .metadata_conv = ff_vorbiscomment_metadata_conv, };
123linslouis-android-video-cutter
jni/libavformat/oggenc.c
C
asf20
14,425
/* * AVC helper functions for muxers * Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avformat.h" #include "avio.h" #include "avc.h" static const uint8_t *ff_avc_find_startcode_internal(const uint8_t *p, const uint8_t *end) { const uint8_t *a = p + 4 - ((intptr_t)p & 3); for (end -= 3; p < a && p < end; p++) { if (p[0] == 0 && p[1] == 0 && p[2] == 1) return p; } for (end -= 3; p < end; p += 4) { uint32_t x = *(const uint32_t*)p; // if ((x - 0x01000100) & (~x) & 0x80008000) // little endian // if ((x - 0x00010001) & (~x) & 0x00800080) // big endian if ((x - 0x01010101) & (~x) & 0x80808080) { // generic if (p[1] == 0) { if (p[0] == 0 && p[2] == 1) return p; if (p[2] == 0 && p[3] == 1) return p+1; } if (p[3] == 0) { if (p[2] == 0 && p[4] == 1) return p+2; if (p[4] == 0 && p[5] == 1) return p+3; } } } for (end += 3; p < end; p++) { if (p[0] == 0 && p[1] == 0 && p[2] == 1) return p; } return end + 3; } const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end){ const uint8_t *out= ff_avc_find_startcode_internal(p, end); if(p<out && out<end && !out[-1]) out--; return out; } int ff_avc_parse_nal_units(ByteIOContext *pb, const uint8_t *buf_in, int size) { const uint8_t *p = buf_in; const uint8_t *end = p + size; const uint8_t *nal_start, *nal_end; size = 0; nal_start = ff_avc_find_startcode(p, end); while (nal_start < end) { while(!*(nal_start++)); nal_end = ff_avc_find_startcode(nal_start, end); put_be32(pb, nal_end - nal_start); put_buffer(pb, nal_start, nal_end - nal_start); size += 4 + nal_end - nal_start; nal_start = nal_end; } return size; } int ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size) { ByteIOContext *pb; int ret = url_open_dyn_buf(&pb); if(ret < 0) return ret; ff_avc_parse_nal_units(pb, buf_in, *size); av_freep(buf); *size = url_close_dyn_buf(pb, buf); return 0; } int ff_isom_write_avcc(ByteIOContext *pb, const uint8_t *data, int len) { if (len > 6) { /* check for h264 start code */ if (AV_RB32(data) == 0x00000001 || AV_RB24(data) == 0x000001) { uint8_t *buf=NULL, *end, *start; uint32_t sps_size=0, pps_size=0; uint8_t *sps=0, *pps=0; int ret = ff_avc_parse_nal_units_buf(data, &buf, &len); if (ret < 0) return ret; start = buf; end = buf + len; /* look for sps and pps */ while (buf < end) { unsigned int size; uint8_t nal_type; size = AV_RB32(buf); nal_type = buf[4] & 0x1f; if (nal_type == 7) { /* SPS */ sps = buf + 4; sps_size = size; } else if (nal_type == 8) { /* PPS */ pps = buf + 4; pps_size = size; } buf += size + 4; } assert(sps); assert(pps); put_byte(pb, 1); /* version */ put_byte(pb, sps[1]); /* profile */ put_byte(pb, sps[2]); /* profile compat */ put_byte(pb, sps[3]); /* level */ put_byte(pb, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 1 (11) */ put_byte(pb, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */ put_be16(pb, sps_size); put_buffer(pb, sps, sps_size); put_byte(pb, 1); /* number of pps */ put_be16(pb, pps_size); put_buffer(pb, pps, pps_size); av_free(start); } else { put_buffer(pb, data, len); } } return 0; }
123linslouis-android-video-cutter
jni/libavformat/avc.c
C
asf20
4,925
/* * Adobe Filmstrip muxer * Copyright (c) 2010 Peter Ross * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Adobe Filmstrip muxer */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define RAND_TAG MKBETAG('R','a','n','d') typedef struct { int nb_frames; } FilmstripMuxContext; static int write_header(AVFormatContext *s) { if (s->streams[0]->codec->pix_fmt != PIX_FMT_RGBA) { av_log(s, AV_LOG_ERROR, "only PIX_FMT_RGBA is supported\n"); return AVERROR_INVALIDDATA; } return 0; } static int write_packet(AVFormatContext *s, AVPacket *pkt) { FilmstripMuxContext *film = s->priv_data; put_buffer(s->pb, pkt->data, pkt->size); film->nb_frames++; return 0; } static int write_trailer(AVFormatContext *s) { FilmstripMuxContext *film = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st = s->streams[0]; int i; put_be32(pb, RAND_TAG); put_be32(pb, film->nb_frames); put_be16(pb, 0); // packing method put_be16(pb, 0); // reserved put_be16(pb, st->codec->width); put_be16(pb, st->codec->height); put_be16(pb, 0); // leading put_be16(pb, 1/av_q2d(st->codec->time_base)); for (i = 0; i < 16; i++) put_byte(pb, 0x00); // reserved put_flush_packet(pb); return 0; } AVOutputFormat filmstrip_muxer = { "filmstrip", NULL_IF_CONFIG_SMALL("Adobe Filmstrip"), NULL, "flm", sizeof(FilmstripMuxContext), CODEC_ID_NONE, CODEC_ID_RAWVIDEO, write_header, write_packet, write_trailer, };
123linslouis-android-video-cutter
jni/libavformat/filmstripenc.c
C
asf20
2,280
/* * Copyright (C) 2008 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/get_bits.h" #include "libavcodec/dirac.h" #include "avformat.h" #include "oggdec.h" static int dirac_header(AVFormatContext *s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; dirac_source_params source; GetBitContext gb; // already parsed the header if (st->codec->codec_id == CODEC_ID_DIRAC) return 0; init_get_bits(&gb, os->buf + os->pstart + 13, (os->psize - 13) * 8); if (ff_dirac_parse_sequence_header(st->codec, &gb, &source) < 0) return -1; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_DIRAC; // dirac in ogg always stores timestamps as though the video were interlaced st->time_base = (AVRational){st->codec->time_base.num, 2*st->codec->time_base.den}; return 1; } // various undocument things: granule is signed (only for dirac!) static uint64_t dirac_gptopts(AVFormatContext *s, int idx, uint64_t granule, int64_t *dts_out) { int64_t gp = granule; struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; unsigned dist = ((gp >> 14) & 0xff00) | (gp & 0xff); int64_t dts = (gp >> 31); int64_t pts = dts + ((gp >> 9) & 0x1fff); if (!dist) os->pflags |= AV_PKT_FLAG_KEY; if (dts_out) *dts_out = dts; return pts; } static int old_dirac_header(AVFormatContext *s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; uint8_t *buf = os->buf + os->pstart; if (buf[0] != 'K') return 0; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_DIRAC; st->time_base.den = AV_RB32(buf+8); st->time_base.num = AV_RB32(buf+12); return 1; } static uint64_t old_dirac_gptopts(AVFormatContext *s, int idx, uint64_t gp, int64_t *dts) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; uint64_t iframe = gp >> 30; uint64_t pframe = gp & 0x3fffffff; if (!pframe) os->pflags |= AV_PKT_FLAG_KEY; return iframe + pframe; } const struct ogg_codec ff_dirac_codec = { .magic = "BBCD\0", .magicsize = 5, .header = dirac_header, .gptopts = dirac_gptopts, .granule_is_start = 1, }; const struct ogg_codec ff_old_dirac_codec = { .magic = "KW-DIRAC", .magicsize = 8, .header = old_dirac_header, .gptopts = old_dirac_gptopts, .granule_is_start = 1, };
123linslouis-android-video-cutter
jni/libavformat/oggparsedirac.c
C
asf20
3,419
/* * MD STUDIO audio demuxer * * Copyright (c) 2009 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "raw.h" #include "libavutil/intreadwrite.h" #define AT1_SU_SIZE 212 static int aea_read_probe(AVProbeData *p) { if (p->buf_size <= 2048+212) return 0; /* Magic is '00 08 00 00' in Little Endian*/ if (AV_RL32(p->buf)==0x800) { int bsm_s, bsm_e, inb_s, inb_e, ch; ch = p->buf[264]; bsm_s = p->buf[2048]; inb_s = p->buf[2048+1]; inb_e = p->buf[2048+210]; bsm_e = p->buf[2048+211]; if (ch != 1 && ch != 2) return 0; /* Check so that the redundant bsm bytes and info bytes are valid * the block size mode bytes have to be the same * the info bytes have to be the same */ if (bsm_s == bsm_e && inb_s == inb_e) return AVPROBE_SCORE_MAX / 4 + 1; } return 0; } static int aea_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); /* Parse the amount of channels and skip to pos 2048(0x800) */ url_fskip(s->pb, 264); st->codec->channels = get_byte(s->pb); url_fskip(s->pb, 1783); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ATRAC1; st->codec->sample_rate = 44100; st->codec->bit_rate = 292000; if (st->codec->channels != 1 && st->codec->channels != 2) { av_log(s,AV_LOG_ERROR,"Channels %d not supported!\n",st->codec->channels); return -1; } st->codec->channel_layout = (st->codec->channels == 1) ? CH_LAYOUT_MONO : CH_LAYOUT_STEREO; st->codec->block_align = AT1_SU_SIZE * st->codec->channels; return 0; } static int aea_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret = av_get_packet(s->pb, pkt, s->streams[0]->codec->block_align); pkt->stream_index = 0; if (ret <= 0) return AVERROR(EIO); return ret; } AVInputFormat aea_demuxer = { "aea", NULL_IF_CONFIG_SMALL("MD STUDIO audio"), 0, aea_read_probe, aea_read_header, aea_read_packet, 0, pcm_read_seek, .flags= AVFMT_GENERIC_INDEX, .extensions = "aea", };
123linslouis-android-video-cutter
jni/libavformat/aea.c
C
asf20
3,046
/* * VorbisComment writer * Copyright (c) 2009 James Darnley * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "metadata.h" #include "vorbiscomment.h" #include "libavcodec/bytestream.h" /** * VorbisComment metadata conversion mapping. * from Ogg Vorbis I format specification: comment field and header specification * http://xiph.org/vorbis/doc/v-comment.html */ const AVMetadataConv ff_vorbiscomment_metadata_conv[] = { { "ALBUMARTIST", "album_artist"}, { "TRACKNUMBER", "track" }, { 0 } }; int ff_vorbiscomment_length(AVMetadata *m, const char *vendor_string, unsigned *count) { int len = 8; len += strlen(vendor_string); *count = 0; if (m) { AVMetadataTag *tag = NULL; while ( (tag = av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX) ) ) { len += 4 +strlen(tag->key) + 1 + strlen(tag->value); (*count)++; } } return len; } int ff_vorbiscomment_write(uint8_t **p, AVMetadata *m, const char *vendor_string, const unsigned count) { bytestream_put_le32(p, strlen(vendor_string)); bytestream_put_buffer(p, vendor_string, strlen(vendor_string)); if (m) { AVMetadataTag *tag = NULL; bytestream_put_le32(p, count); while ( (tag = av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX) ) ) { unsigned int len1 = strlen(tag->key); unsigned int len2 = strlen(tag->value); bytestream_put_le32(p, len1+1+len2); bytestream_put_buffer(p, tag->key, len1); bytestream_put_byte(p, '='); bytestream_put_buffer(p, tag->value, len2); } } else bytestream_put_le32(p, 0); return 0; }
123linslouis-android-video-cutter
jni/libavformat/vorbiscomment.c
C
asf20
2,495
/* * MPEG1/2 demuxer * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "mpeg.h" //#define DEBUG_SEEK #undef NDEBUG #include <assert.h> /*********************************************/ /* demux code */ #define MAX_SYNC_SIZE 100000 static int check_pes(uint8_t *p, uint8_t *end){ int pes1; int pes2= (p[3] & 0xC0) == 0x80 && (p[4] & 0xC0) != 0x40 &&((p[4] & 0xC0) == 0x00 || (p[4]&0xC0)>>2 == (p[6]&0xF0)); for(p+=3; p<end && *p == 0xFF; p++); if((*p&0xC0) == 0x40) p+=2; if((*p&0xF0) == 0x20){ pes1= p[0]&p[2]&p[4]&1; }else if((*p&0xF0) == 0x30){ pes1= p[0]&p[2]&p[4]&p[5]&p[7]&p[9]&1; }else pes1 = *p == 0x0F; return pes1||pes2; } static int mpegps_probe(AVProbeData *p) { uint32_t code= -1; int sys=0, pspack=0, priv1=0, vid=0, audio=0, invalid=0; int i; int score=0; for(i=0; i<p->buf_size; i++){ code = (code<<8) + p->buf[i]; if ((code & 0xffffff00) == 0x100) { int pes= check_pes(p->buf+i, p->buf+p->buf_size); if(code == SYSTEM_HEADER_START_CODE) sys++; else if(code == PRIVATE_STREAM_1) priv1++; else if(code == PACK_START_CODE) pspack++; else if((code & 0xf0) == VIDEO_ID && pes) vid++; else if((code & 0xe0) == AUDIO_ID && pes) audio++; else if((code & 0xf0) == VIDEO_ID && !pes) invalid++; else if((code & 0xe0) == AUDIO_ID && !pes) invalid++; } } if(vid+audio > invalid) /* invalid VDR files nd short PES streams */ score= AVPROBE_SCORE_MAX/4; //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d %d len:%d\n", sys, priv1, pspack,vid, audio, invalid, p->buf_size); if(sys>invalid && sys*9 <= pspack*10) return pspack > 2 ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; // +1 for .mpg if(pspack > invalid && (priv1+vid+audio)*10 >= pspack*9) return pspack > 2 ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; // +1 for .mpg if((!!vid ^ !!audio) && (audio > 4 || vid > 1) && !sys && !pspack && p->buf_size>2048 && vid + audio > invalid) /* PES stream */ return (audio > 12 || vid > 3) ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; //02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1 //mp3_misidentified_2.mp3 has sys:0 priv1:0 pspack:0 vid:0 audio:6 return score; } typedef struct MpegDemuxContext { int32_t header_state; unsigned char psm_es_type[256]; int sofdec; } MpegDemuxContext; static int mpegps_read_header(AVFormatContext *s, AVFormatParameters *ap) { MpegDemuxContext *m = s->priv_data; const char *sofdec = "Sofdec"; int v, i = 0; m->header_state = 0xff; s->ctx_flags |= AVFMTCTX_NOHEADER; m->sofdec = -1; do { v = get_byte(s->pb); m->header_state = m->header_state << 8 | v; m->sofdec++; } while (v == sofdec[i] && i++ < 6); m->sofdec = (m->sofdec == 6) ? 1 : 0; /* no need to do more */ return 0; } static int64_t get_pts(ByteIOContext *pb, int c) { uint8_t buf[5]; buf[0] = c<0 ? get_byte(pb) : c; get_buffer(pb, buf+1, 4); return ff_parse_pes_pts(buf); } static int find_next_start_code(ByteIOContext *pb, int *size_ptr, int32_t *header_state) { unsigned int state, v; int val, n; state = *header_state; n = *size_ptr; while (n > 0) { if (url_feof(pb)) break; v = get_byte(pb); n--; if (state == 0x000001) { state = ((state << 8) | v) & 0xffffff; val = state; goto found; } state = ((state << 8) | v) & 0xffffff; } val = -1; found: *header_state = state; *size_ptr = n; return val; } #if 0 /* unused, remove? */ /* XXX: optimize */ static int find_prev_start_code(ByteIOContext *pb, int *size_ptr) { int64_t pos, pos_start; int max_size, start_code; max_size = *size_ptr; pos_start = url_ftell(pb); /* in order to go faster, we fill the buffer */ pos = pos_start - 16386; if (pos < 0) pos = 0; url_fseek(pb, pos, SEEK_SET); get_byte(pb); pos = pos_start; for(;;) { pos--; if (pos < 0 || (pos_start - pos) >= max_size) { start_code = -1; goto the_end; } url_fseek(pb, pos, SEEK_SET); start_code = get_be32(pb); if ((start_code & 0xffffff00) == 0x100) break; } the_end: *size_ptr = pos_start - pos; return start_code; } #endif /** * Extracts stream types from a program stream map * According to ISO/IEC 13818-1 ('MPEG-2 Systems') table 2-35 * * @return number of bytes occupied by PSM in the bitstream */ static long mpegps_psm_parse(MpegDemuxContext *m, ByteIOContext *pb) { int psm_length, ps_info_length, es_map_length; psm_length = get_be16(pb); get_byte(pb); get_byte(pb); ps_info_length = get_be16(pb); /* skip program_stream_info */ url_fskip(pb, ps_info_length); es_map_length = get_be16(pb); /* at least one es available? */ while (es_map_length >= 4){ unsigned char type = get_byte(pb); unsigned char es_id = get_byte(pb); uint16_t es_info_length = get_be16(pb); /* remember mapping from stream id to stream type */ m->psm_es_type[es_id] = type; /* skip program_stream_info */ url_fskip(pb, es_info_length); es_map_length -= 4 + es_info_length; } get_be32(pb); /* crc32 */ return 2 + psm_length; } /* read the next PES header. Return its position in ppos (if not NULL), and its start code, pts and dts. */ static int mpegps_read_pes_header(AVFormatContext *s, int64_t *ppos, int *pstart_code, int64_t *ppts, int64_t *pdts) { MpegDemuxContext *m = s->priv_data; int len, size, startcode, c, flags, header_len; int pes_ext, ext2_len, id_ext, skip; int64_t pts, dts; int64_t last_sync= url_ftell(s->pb); error_redo: url_fseek(s->pb, last_sync, SEEK_SET); redo: /* next start code (should be immediately after) */ m->header_state = 0xff; size = MAX_SYNC_SIZE; startcode = find_next_start_code(s->pb, &size, &m->header_state); last_sync = url_ftell(s->pb); //printf("startcode=%x pos=0x%"PRIx64"\n", startcode, url_ftell(s->pb)); if (startcode < 0){ if(url_feof(s->pb)) return AVERROR_EOF; //FIXME we should remember header_state return AVERROR(EAGAIN); } if (startcode == PACK_START_CODE) goto redo; if (startcode == SYSTEM_HEADER_START_CODE) goto redo; if (startcode == PADDING_STREAM) { url_fskip(s->pb, get_be16(s->pb)); goto redo; } if (startcode == PRIVATE_STREAM_2) { len = get_be16(s->pb); if (!m->sofdec) { while (len-- >= 6) { if (get_byte(s->pb) == 'S') { uint8_t buf[5]; get_buffer(s->pb, buf, sizeof(buf)); m->sofdec = !memcmp(buf, "ofdec", 5); len -= sizeof(buf); break; } } m->sofdec -= !m->sofdec; } url_fskip(s->pb, len); goto redo; } if (startcode == PROGRAM_STREAM_MAP) { mpegps_psm_parse(m, s->pb); goto redo; } /* find matching stream */ if (!((startcode >= 0x1c0 && startcode <= 0x1df) || (startcode >= 0x1e0 && startcode <= 0x1ef) || (startcode == 0x1bd) || (startcode == 0x1fd))) goto redo; if (ppos) { *ppos = url_ftell(s->pb) - 4; } len = get_be16(s->pb); pts = dts = AV_NOPTS_VALUE; /* stuffing */ for(;;) { if (len < 1) goto error_redo; c = get_byte(s->pb); len--; /* XXX: for mpeg1, should test only bit 7 */ if (c != 0xff) break; } if ((c & 0xc0) == 0x40) { /* buffer scale & size */ get_byte(s->pb); c = get_byte(s->pb); len -= 2; } if ((c & 0xe0) == 0x20) { dts = pts = get_pts(s->pb, c); len -= 4; if (c & 0x10){ dts = get_pts(s->pb, -1); len -= 5; } } else if ((c & 0xc0) == 0x80) { /* mpeg 2 PES */ #if 0 /* some streams have this field set for no apparent reason */ if ((c & 0x30) != 0) { /* Encrypted multiplex not handled */ goto redo; } #endif flags = get_byte(s->pb); header_len = get_byte(s->pb); len -= 2; if (header_len > len) goto error_redo; len -= header_len; if (flags & 0x80) { dts = pts = get_pts(s->pb, -1); header_len -= 5; if (flags & 0x40) { dts = get_pts(s->pb, -1); header_len -= 5; } } if (flags & 0x3f && header_len == 0){ flags &= 0xC0; av_log(s, AV_LOG_WARNING, "Further flags set but no bytes left\n"); } if (flags & 0x01) { /* PES extension */ pes_ext = get_byte(s->pb); header_len--; /* Skip PES private data, program packet sequence counter and P-STD buffer */ skip = (pes_ext >> 4) & 0xb; skip += skip & 0x9; if (pes_ext & 0x40 || skip > header_len){ av_log(s, AV_LOG_WARNING, "pes_ext %X is invalid\n", pes_ext); pes_ext=skip=0; } url_fskip(s->pb, skip); header_len -= skip; if (pes_ext & 0x01) { /* PES extension 2 */ ext2_len = get_byte(s->pb); header_len--; if ((ext2_len & 0x7f) > 0) { id_ext = get_byte(s->pb); if ((id_ext & 0x80) == 0) startcode = ((startcode & 0xff) << 8) | id_ext; header_len--; } } } if(header_len < 0) goto error_redo; url_fskip(s->pb, header_len); } else if( c!= 0xf ) goto redo; if (startcode == PRIVATE_STREAM_1 && !m->psm_es_type[startcode & 0xff]) { startcode = get_byte(s->pb); len--; if (startcode >= 0x80 && startcode <= 0xcf) { /* audio: skip header */ get_byte(s->pb); get_byte(s->pb); get_byte(s->pb); len -= 3; if (startcode >= 0xb0 && startcode <= 0xbf) { /* MLP/TrueHD audio has a 4-byte header */ get_byte(s->pb); len--; } } } if(len<0) goto error_redo; if(dts != AV_NOPTS_VALUE && ppos){ int i; for(i=0; i<s->nb_streams; i++){ if(startcode == s->streams[i]->id && !url_is_streamed(s->pb) /* index useless on streams anyway */) { ff_reduce_index(s, i); av_add_index_entry(s->streams[i], *ppos, dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */); } } } *pstart_code = startcode; *ppts = pts; *pdts = dts; return len; } static int mpegps_read_packet(AVFormatContext *s, AVPacket *pkt) { MpegDemuxContext *m = s->priv_data; AVStream *st; int len, startcode, i, es_type; enum CodecID codec_id = CODEC_ID_NONE; enum AVMediaType type; int64_t pts, dts, dummy_pos; //dummy_pos is needed for the index building to work uint8_t av_uninit(dvdaudio_substream_type); redo: len = mpegps_read_pes_header(s, &dummy_pos, &startcode, &pts, &dts); if (len < 0) return len; if(startcode == 0x1bd) { dvdaudio_substream_type = get_byte(s->pb); url_fskip(s->pb, 3); len -= 4; } /* now find stream */ for(i=0;i<s->nb_streams;i++) { st = s->streams[i]; if (st->id == startcode) goto found; } es_type = m->psm_es_type[startcode & 0xff]; if(es_type > 0 && es_type != STREAM_TYPE_PRIVATE_DATA){ if(es_type == STREAM_TYPE_VIDEO_MPEG1){ codec_id = CODEC_ID_MPEG2VIDEO; type = AVMEDIA_TYPE_VIDEO; } else if(es_type == STREAM_TYPE_VIDEO_MPEG2){ codec_id = CODEC_ID_MPEG2VIDEO; type = AVMEDIA_TYPE_VIDEO; } else if(es_type == STREAM_TYPE_AUDIO_MPEG1 || es_type == STREAM_TYPE_AUDIO_MPEG2){ codec_id = CODEC_ID_MP3; type = AVMEDIA_TYPE_AUDIO; } else if(es_type == STREAM_TYPE_AUDIO_AAC){ codec_id = CODEC_ID_AAC; type = AVMEDIA_TYPE_AUDIO; } else if(es_type == STREAM_TYPE_VIDEO_MPEG4){ codec_id = CODEC_ID_MPEG4; type = AVMEDIA_TYPE_VIDEO; } else if(es_type == STREAM_TYPE_VIDEO_H264){ codec_id = CODEC_ID_H264; type = AVMEDIA_TYPE_VIDEO; } else if(es_type == STREAM_TYPE_AUDIO_AC3){ codec_id = CODEC_ID_AC3; type = AVMEDIA_TYPE_AUDIO; } else { goto skip; } } else if (startcode >= 0x1e0 && startcode <= 0x1ef) { static const unsigned char avs_seqh[4] = { 0, 0, 1, 0xb0 }; unsigned char buf[8]; get_buffer(s->pb, buf, 8); url_fseek(s->pb, -8, SEEK_CUR); if(!memcmp(buf, avs_seqh, 4) && (buf[6] != 0 || buf[7] != 1)) codec_id = CODEC_ID_CAVS; else codec_id = CODEC_ID_PROBE; type = AVMEDIA_TYPE_VIDEO; } else if (startcode >= 0x1c0 && startcode <= 0x1df) { type = AVMEDIA_TYPE_AUDIO; codec_id = m->sofdec > 0 ? CODEC_ID_ADPCM_ADX : CODEC_ID_MP2; } else if (startcode >= 0x80 && startcode <= 0x87) { type = AVMEDIA_TYPE_AUDIO; codec_id = CODEC_ID_AC3; } else if ( ( startcode >= 0x88 && startcode <= 0x8f) ||( startcode >= 0x98 && startcode <= 0x9f)) { /* 0x90 - 0x97 is reserved for SDDS in DVD specs */ type = AVMEDIA_TYPE_AUDIO; codec_id = CODEC_ID_DTS; } else if (startcode >= 0xa0 && startcode <= 0xaf) { type = AVMEDIA_TYPE_AUDIO; /* 16 bit form will be handled as CODEC_ID_PCM_S16BE */ codec_id = CODEC_ID_PCM_DVD; } else if (startcode >= 0xb0 && startcode <= 0xbf) { type = AVMEDIA_TYPE_AUDIO; codec_id = CODEC_ID_TRUEHD; } else if (startcode >= 0xc0 && startcode <= 0xcf) { /* Used for both AC-3 and E-AC-3 in EVOB files */ type = AVMEDIA_TYPE_AUDIO; codec_id = CODEC_ID_AC3; } else if (startcode >= 0x20 && startcode <= 0x3f) { type = AVMEDIA_TYPE_SUBTITLE; codec_id = CODEC_ID_DVD_SUBTITLE; } else if (startcode >= 0xfd55 && startcode <= 0xfd5f) { type = AVMEDIA_TYPE_VIDEO; codec_id = CODEC_ID_VC1; } else if (startcode == 0x1bd) { // check dvd audio substream type type = AVMEDIA_TYPE_AUDIO; switch(dvdaudio_substream_type & 0xe0) { case 0xa0: codec_id = CODEC_ID_PCM_DVD; break; case 0x80: if((dvdaudio_substream_type & 0xf8) == 0x88) codec_id = CODEC_ID_DTS; else codec_id = CODEC_ID_AC3; break; default: av_log(s, AV_LOG_ERROR, "Unknown 0x1bd sub-stream\n"); goto skip; } } else { skip: /* skip packet */ url_fskip(s->pb, len); goto redo; } /* no stream found: add a new stream */ st = av_new_stream(s, startcode); if (!st) goto skip; st->codec->codec_type = type; st->codec->codec_id = codec_id; if (codec_id != CODEC_ID_PCM_S16BE) st->need_parsing = AVSTREAM_PARSE_FULL; found: if(st->discard >= AVDISCARD_ALL) goto skip; if ((startcode >= 0xa0 && startcode <= 0xaf) || (startcode == 0x1bd && ((dvdaudio_substream_type & 0xe0) == 0xa0))) { int b1, freq; /* for LPCM, we just skip the header and consider it is raw audio data */ if (len <= 3) goto skip; get_byte(s->pb); /* emphasis (1), muse(1), reserved(1), frame number(5) */ b1 = get_byte(s->pb); /* quant (2), freq(2), reserved(1), channels(3) */ get_byte(s->pb); /* dynamic range control (0x80 = off) */ len -= 3; freq = (b1 >> 4) & 3; st->codec->sample_rate = lpcm_freq_tab[freq]; st->codec->channels = 1 + (b1 & 7); st->codec->bits_per_coded_sample = 16 + ((b1 >> 6) & 3) * 4; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; if (st->codec->bits_per_coded_sample == 16) st->codec->codec_id = CODEC_ID_PCM_S16BE; else if (st->codec->bits_per_coded_sample == 28) return AVERROR(EINVAL); } av_new_packet(pkt, len); get_buffer(s->pb, pkt->data, pkt->size); pkt->pts = pts; pkt->dts = dts; pkt->pos = dummy_pos; pkt->stream_index = st->index; #if 0 av_log(s, AV_LOG_DEBUG, "%d: pts=%0.3f dts=%0.3f size=%d\n", pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0, pkt->size); #endif return 0; } static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit) { int len, startcode; int64_t pos, pts, dts; pos = *ppos; #ifdef DEBUG_SEEK printf("read_dts: pos=0x%"PRIx64" next=%d -> ", pos, find_next); #endif if (url_fseek(s->pb, pos, SEEK_SET) < 0) return AV_NOPTS_VALUE; for(;;) { len = mpegps_read_pes_header(s, &pos, &startcode, &pts, &dts); if (len < 0) { #ifdef DEBUG_SEEK printf("none (ret=%d)\n", len); #endif return AV_NOPTS_VALUE; } if (startcode == s->streams[stream_index]->id && dts != AV_NOPTS_VALUE) { break; } url_fskip(s->pb, len); } #ifdef DEBUG_SEEK printf("pos=0x%"PRIx64" dts=0x%"PRIx64" %0.3f\n", pos, dts, dts / 90000.0); #endif *ppos = pos; return dts; } AVInputFormat mpegps_demuxer = { "mpeg", NULL_IF_CONFIG_SMALL("MPEG-PS format"), sizeof(MpegDemuxContext), mpegps_probe, mpegps_read_header, mpegps_read_packet, NULL, NULL, //mpegps_read_seek, mpegps_read_dts, .flags = AVFMT_SHOW_IDS|AVFMT_TS_DISCONT, };
123linslouis-android-video-cutter
jni/libavformat/mpeg.c
C
asf20
19,523
/* * Raw FLAC demuxer * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/flac.h" #include "avformat.h" #include "raw.h" #include "id3v2.h" #include "oggdec.h" #include "vorbiscomment.h" static int flac_read_header(AVFormatContext *s, AVFormatParameters *ap) { uint8_t buf[ID3v2_HEADER_SIZE]; int ret, metadata_last=0, metadata_type, metadata_size, found_streaminfo=0; uint8_t header[4]; uint8_t *buffer=NULL; AVStream *st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_FLAC; st->need_parsing = AVSTREAM_PARSE_FULL; /* the parameters will be extracted from the compressed bitstream */ /* skip ID3v2 header if found */ ret = get_buffer(s->pb, buf, ID3v2_HEADER_SIZE); if (ret == ID3v2_HEADER_SIZE && ff_id3v2_match(buf)) { int len = ff_id3v2_tag_len(buf); url_fseek(s->pb, len - ID3v2_HEADER_SIZE, SEEK_CUR); } else { url_fseek(s->pb, 0, SEEK_SET); } /* if fLaC marker is not found, assume there is no header */ if (get_le32(s->pb) != MKTAG('f','L','a','C')) { url_fseek(s->pb, -4, SEEK_CUR); return 0; } /* process metadata blocks */ while (!url_feof(s->pb) && !metadata_last) { get_buffer(s->pb, header, 4); ff_flac_parse_block_header(header, &metadata_last, &metadata_type, &metadata_size); switch (metadata_type) { /* allocate and read metadata block for supported types */ case FLAC_METADATA_TYPE_STREAMINFO: case FLAC_METADATA_TYPE_VORBIS_COMMENT: buffer = av_mallocz(metadata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!buffer) { return AVERROR(ENOMEM); } if (get_buffer(s->pb, buffer, metadata_size) != metadata_size) { av_freep(&buffer); return AVERROR(EIO); } break; /* skip metadata block for unsupported types */ default: ret = url_fseek(s->pb, metadata_size, SEEK_CUR); if (ret < 0) return ret; } if (metadata_type == FLAC_METADATA_TYPE_STREAMINFO) { FLACStreaminfo si; /* STREAMINFO can only occur once */ if (found_streaminfo) { av_freep(&buffer); return AVERROR_INVALIDDATA; } if (metadata_size != FLAC_STREAMINFO_SIZE) { av_freep(&buffer); return AVERROR_INVALIDDATA; } found_streaminfo = 1; st->codec->extradata = buffer; st->codec->extradata_size = metadata_size; buffer = NULL; /* get codec params from STREAMINFO header */ ff_flac_parse_streaminfo(st->codec, &si, st->codec->extradata); /* set time base and duration */ if (si.samplerate > 0) { av_set_pts_info(st, 64, 1, si.samplerate); if (si.samples > 0) st->duration = si.samples; } } else { /* STREAMINFO must be the first block */ if (!found_streaminfo) { av_freep(&buffer); return AVERROR_INVALIDDATA; } /* process supported blocks other than STREAMINFO */ if (metadata_type == FLAC_METADATA_TYPE_VORBIS_COMMENT) { if (ff_vorbis_comment(s, &s->metadata, buffer, metadata_size)) { av_log(s, AV_LOG_WARNING, "error parsing VorbisComment metadata\n"); } } av_freep(&buffer); } } return 0; } static int flac_probe(AVProbeData *p) { uint8_t *bufptr = p->buf; uint8_t *end = p->buf + p->buf_size; if(ff_id3v2_match(bufptr)) bufptr += ff_id3v2_tag_len(bufptr); if(bufptr > end-4 || memcmp(bufptr, "fLaC", 4)) return 0; else return AVPROBE_SCORE_MAX/2; } AVInputFormat flac_demuxer = { "flac", NULL_IF_CONFIG_SMALL("raw FLAC"), 0, flac_probe, flac_read_header, ff_raw_read_partial_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "flac", .value = CODEC_ID_FLAC, .metadata_conv = ff_vorbiscomment_metadata_conv, };
123linslouis-android-video-cutter
jni/libavformat/flacdec.c
C
asf20
5,191
/* * MPEG1/2 muxer and demuxer common defines * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_MPEG_H #define AVFORMAT_MPEG_H #include <stdint.h> #include "libavutil/intreadwrite.h" #define PACK_START_CODE ((unsigned int)0x000001ba) #define SYSTEM_HEADER_START_CODE ((unsigned int)0x000001bb) #define SEQUENCE_END_CODE ((unsigned int)0x000001b7) #define PACKET_START_CODE_MASK ((unsigned int)0xffffff00) #define PACKET_START_CODE_PREFIX ((unsigned int)0x00000100) #define ISO_11172_END_CODE ((unsigned int)0x000001b9) /* mpeg2 */ #define PROGRAM_STREAM_MAP 0x1bc #define PRIVATE_STREAM_1 0x1bd #define PADDING_STREAM 0x1be #define PRIVATE_STREAM_2 0x1bf #define AUDIO_ID 0xc0 #define VIDEO_ID 0xe0 #define AC3_ID 0x80 #define DTS_ID 0x8a #define LPCM_ID 0xa0 #define SUB_ID 0x20 #define STREAM_TYPE_VIDEO_MPEG1 0x01 #define STREAM_TYPE_VIDEO_MPEG2 0x02 #define STREAM_TYPE_AUDIO_MPEG1 0x03 #define STREAM_TYPE_AUDIO_MPEG2 0x04 #define STREAM_TYPE_PRIVATE_SECTION 0x05 #define STREAM_TYPE_PRIVATE_DATA 0x06 #define STREAM_TYPE_AUDIO_AAC 0x0f #define STREAM_TYPE_VIDEO_MPEG4 0x10 #define STREAM_TYPE_VIDEO_H264 0x1b #define STREAM_TYPE_AUDIO_AC3 0x81 #define STREAM_TYPE_AUDIO_DTS 0x8a static const int lpcm_freq_tab[4] = { 48000, 96000, 44100, 32000 }; /** * Parse MPEG-PES five-byte timestamp */ static inline int64_t ff_parse_pes_pts(uint8_t *buf) { return (int64_t)(*buf & 0x0e) << 29 | (AV_RB16(buf+1) >> 1) << 15 | AV_RB16(buf+3) >> 1; } #endif /* AVFORMAT_MPEG_H */
123linslouis-android-video-cutter
jni/libavformat/mpeg.h
C
asf20
2,397
/* * D-Cinema audio demuxer * Copyright (c) 2005 Reimar Döffinger * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" static int daud_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_PCM_S24DAUD; st->codec->codec_tag = MKTAG('d', 'a', 'u', 'd'); st->codec->channels = 6; st->codec->sample_rate = 96000; st->codec->bit_rate = 3 * 6 * 96000 * 8; st->codec->block_align = 3 * 6; st->codec->bits_per_coded_sample = 24; return 0; } static int daud_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; int ret, size; if (url_feof(pb)) return AVERROR(EIO); size = get_be16(pb); get_be16(pb); // unknown ret = av_get_packet(pb, pkt, size); pkt->stream_index = 0; return ret; } static int daud_write_header(struct AVFormatContext *s) { AVCodecContext *codec = s->streams[0]->codec; if (codec->channels!=6 || codec->sample_rate!=96000) return -1; return 0; } static int daud_write_packet(struct AVFormatContext *s, AVPacket *pkt) { put_be16(s->pb, pkt->size); put_be16(s->pb, 0x8010); // unknown put_buffer(s->pb, pkt->data, pkt->size); put_flush_packet(s->pb); return 0; } #if CONFIG_DAUD_DEMUXER AVInputFormat daud_demuxer = { "daud", NULL_IF_CONFIG_SMALL("D-Cinema audio format"), 0, NULL, daud_header, daud_packet, NULL, NULL, .extensions = "302", }; #endif #if CONFIG_DAUD_MUXER AVOutputFormat daud_muxer = { "daud", NULL_IF_CONFIG_SMALL("D-Cinema audio format"), NULL, "302", 0, CODEC_ID_PCM_S24DAUD, CODEC_ID_NONE, daud_write_header, daud_write_packet, .flags= AVFMT_NOTIMESTAMPS, }; #endif
123linslouis-android-video-cutter
jni/libavformat/daud.c
C
asf20
2,604
/* * Audio Interleaving functions * * Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/fifo.h" #include "avformat.h" #include "audiointerleave.h" #include "internal.h" void ff_audio_interleave_close(AVFormatContext *s) { int i; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; AudioInterleaveContext *aic = st->priv_data; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) av_fifo_free(aic->fifo); } } int ff_audio_interleave_init(AVFormatContext *s, const int *samples_per_frame, AVRational time_base) { int i; if (!samples_per_frame) return -1; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; AudioInterleaveContext *aic = st->priv_data; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { aic->sample_size = (st->codec->channels * av_get_bits_per_sample(st->codec->codec_id)) / 8; if (!aic->sample_size) { av_log(s, AV_LOG_ERROR, "could not compute sample size\n"); return -1; } aic->samples_per_frame = samples_per_frame; aic->samples = aic->samples_per_frame; aic->time_base = time_base; aic->fifo_size = 100* *aic->samples; aic->fifo= av_fifo_alloc(100 * *aic->samples); } } return 0; } static int ff_interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt, int stream_index, int flush) { AVStream *st = s->streams[stream_index]; AudioInterleaveContext *aic = st->priv_data; int size = FFMIN(av_fifo_size(aic->fifo), *aic->samples * aic->sample_size); if (!size || (!flush && size == av_fifo_size(aic->fifo))) return 0; av_new_packet(pkt, size); av_fifo_generic_read(aic->fifo, pkt->data, size, NULL); pkt->dts = pkt->pts = aic->dts; pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base); pkt->stream_index = stream_index; aic->dts += pkt->duration; aic->samples++; if (!*aic->samples) aic->samples = aic->samples_per_frame; return size; } int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush, int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int), int (*compare_ts)(AVFormatContext *, AVPacket *, AVPacket *)) { int i; if (pkt) { AVStream *st = s->streams[pkt->stream_index]; AudioInterleaveContext *aic = st->priv_data; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { unsigned new_size = av_fifo_size(aic->fifo) + pkt->size; if (new_size > aic->fifo_size) { if (av_fifo_realloc2(aic->fifo, new_size) < 0) return -1; aic->fifo_size = new_size; } av_fifo_generic_write(aic->fifo, pkt->data, pkt->size, NULL); } else { // rewrite pts and dts to be decoded time line position pkt->pts = pkt->dts = aic->dts; aic->dts += pkt->duration; ff_interleave_add_packet(s, pkt, compare_ts); } pkt = NULL; } for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { AVPacket new_pkt; while (ff_interleave_new_audio_packet(s, &new_pkt, i, flush)) ff_interleave_add_packet(s, &new_pkt, compare_ts); } } return get_packet(s, out, pkt, flush); }
123linslouis-android-video-cutter
jni/libavformat/audiointerleave.c
C
asf20
4,510
/* * MPEG2 transport stream (aka DVB) muxer * Copyright (c) 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/bswap.h" #include "libavutil/crc.h" #include "libavcodec/mpegvideo.h" #include "avformat.h" #include "internal.h" #include "mpegts.h" #include "adts.h" /* write DVB SI sections */ /*********************************************/ /* mpegts section writer */ typedef struct MpegTSSection { int pid; int cc; void (*write_packet)(struct MpegTSSection *s, const uint8_t *packet); void *opaque; } MpegTSSection; typedef struct MpegTSService { MpegTSSection pmt; /* MPEG2 pmt table context */ int sid; /* service ID */ char *name; char *provider_name; int pcr_pid; int pcr_packet_count; int pcr_packet_period; } MpegTSService; typedef struct MpegTSWrite { MpegTSSection pat; /* MPEG2 pat table */ MpegTSSection sdt; /* MPEG2 sdt table context */ MpegTSService **services; int sdt_packet_count; int sdt_packet_period; int pat_packet_count; int pat_packet_period; int nb_services; int onid; int tsid; uint64_t cur_pcr; int mux_rate; ///< set to 1 when VBR } MpegTSWrite; /* NOTE: 4 bytes must be left at the end for the crc32 */ static void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len) { MpegTSWrite *ts = ((AVFormatContext*)s->opaque)->priv_data; unsigned int crc; unsigned char packet[TS_PACKET_SIZE]; const unsigned char *buf_ptr; unsigned char *q; int first, b, len1, left; crc = bswap_32(av_crc(av_crc_get_table(AV_CRC_32_IEEE), -1, buf, len - 4)); buf[len - 4] = (crc >> 24) & 0xff; buf[len - 3] = (crc >> 16) & 0xff; buf[len - 2] = (crc >> 8) & 0xff; buf[len - 1] = (crc) & 0xff; /* send each packet */ buf_ptr = buf; while (len > 0) { first = (buf == buf_ptr); q = packet; *q++ = 0x47; b = (s->pid >> 8); if (first) b |= 0x40; *q++ = b; *q++ = s->pid; s->cc = (s->cc + 1) & 0xf; *q++ = 0x10 | s->cc; if (first) *q++ = 0; /* 0 offset */ len1 = TS_PACKET_SIZE - (q - packet); if (len1 > len) len1 = len; memcpy(q, buf_ptr, len1); q += len1; /* add known padding data */ left = TS_PACKET_SIZE - (q - packet); if (left > 0) memset(q, 0xff, left); s->write_packet(s, packet); buf_ptr += len1; len -= len1; ts->cur_pcr += TS_PACKET_SIZE*8*90000LL/ts->mux_rate; } } static inline void put16(uint8_t **q_ptr, int val) { uint8_t *q; q = *q_ptr; *q++ = val >> 8; *q++ = val; *q_ptr = q; } static int mpegts_write_section1(MpegTSSection *s, int tid, int id, int version, int sec_num, int last_sec_num, uint8_t *buf, int len) { uint8_t section[1024], *q; unsigned int tot_len; tot_len = 3 + 5 + len + 4; /* check if not too big */ if (tot_len > 1024) return -1; q = section; *q++ = tid; put16(&q, 0xb000 | (len + 5 + 4)); /* 5 byte header + 4 byte CRC */ put16(&q, id); *q++ = 0xc1 | (version << 1); /* current_next_indicator = 1 */ *q++ = sec_num; *q++ = last_sec_num; memcpy(q, buf, len); mpegts_write_section(s, section, tot_len); return 0; } /*********************************************/ /* mpegts writer */ #define DEFAULT_PMT_START_PID 0x1000 #define DEFAULT_START_PID 0x0100 #define DEFAULT_PROVIDER_NAME "FFmpeg" #define DEFAULT_SERVICE_NAME "Service01" /* default network id, transport stream and service identifiers */ #define DEFAULT_ONID 0x0001 #define DEFAULT_TSID 0x0001 #define DEFAULT_SID 0x0001 /* a PES packet header is generated every DEFAULT_PES_HEADER_FREQ packets */ #define DEFAULT_PES_HEADER_FREQ 16 #define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170) /* we retransmit the SI info at this rate */ #define SDT_RETRANS_TIME 500 #define PAT_RETRANS_TIME 100 #define PCR_RETRANS_TIME 20 typedef struct MpegTSWriteStream { struct MpegTSService *service; int pid; /* stream associated pid */ int cc; int payload_index; int first_pts_check; ///< first pts check needed int64_t payload_pts; int64_t payload_dts; uint8_t payload[DEFAULT_PES_PAYLOAD_SIZE]; ADTSContext *adts; } MpegTSWriteStream; static void mpegts_write_pat(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; MpegTSService *service; uint8_t data[1012], *q; int i; q = data; for(i = 0; i < ts->nb_services; i++) { service = ts->services[i]; put16(&q, service->sid); put16(&q, 0xe000 | service->pmt.pid); } mpegts_write_section1(&ts->pat, PAT_TID, ts->tsid, 0, 0, 0, data, q - data); } static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service) { // MpegTSWrite *ts = s->priv_data; uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr; int val, stream_type, i; q = data; put16(&q, 0xe000 | service->pcr_pid); program_info_length_ptr = q; q += 2; /* patched after */ /* put program info here */ val = 0xf000 | (q - program_info_length_ptr - 2); program_info_length_ptr[0] = val >> 8; program_info_length_ptr[1] = val; for(i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MpegTSWriteStream *ts_st = st->priv_data; AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL,0); switch(st->codec->codec_id) { case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: stream_type = STREAM_TYPE_VIDEO_MPEG2; break; case CODEC_ID_MPEG4: stream_type = STREAM_TYPE_VIDEO_MPEG4; break; case CODEC_ID_H264: stream_type = STREAM_TYPE_VIDEO_H264; break; case CODEC_ID_DIRAC: stream_type = STREAM_TYPE_VIDEO_DIRAC; break; case CODEC_ID_MP2: case CODEC_ID_MP3: stream_type = STREAM_TYPE_AUDIO_MPEG1; break; case CODEC_ID_AAC: stream_type = STREAM_TYPE_AUDIO_AAC; break; case CODEC_ID_AC3: stream_type = STREAM_TYPE_AUDIO_AC3; break; default: stream_type = STREAM_TYPE_PRIVATE_DATA; break; } *q++ = stream_type; put16(&q, 0xe000 | ts_st->pid); desc_length_ptr = q; q += 2; /* patched after */ /* write optional descriptors here */ switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if (lang && strlen(lang->value) == 3) { *q++ = 0x0a; /* ISO 639 language descriptor */ *q++ = 4; *q++ = lang->value[0]; *q++ = lang->value[1]; *q++ = lang->value[2]; *q++ = 0; /* undefined type */ } break; case AVMEDIA_TYPE_SUBTITLE: { const char *language; language = lang && strlen(lang->value)==3 ? lang->value : "eng"; *q++ = 0x59; *q++ = 8; *q++ = language[0]; *q++ = language[1]; *q++ = language[2]; *q++ = 0x10; /* normal subtitles (0x20 = if hearing pb) */ put16(&q, 1); /* page id */ put16(&q, 1); /* ancillary page id */ } break; case AVMEDIA_TYPE_VIDEO: if (stream_type == STREAM_TYPE_VIDEO_DIRAC) { *q++ = 0x05; /*MPEG-2 registration descriptor*/ *q++ = 4; *q++ = 'd'; *q++ = 'r'; *q++ = 'a'; *q++ = 'c'; } break; } val = 0xf000 | (q - desc_length_ptr - 2); desc_length_ptr[0] = val >> 8; desc_length_ptr[1] = val; } mpegts_write_section1(&service->pmt, PMT_TID, service->sid, 0, 0, 0, data, q - data); } /* NOTE: str == NULL is accepted for an empty string */ static void putstr8(uint8_t **q_ptr, const char *str) { uint8_t *q; int len; q = *q_ptr; if (!str) len = 0; else len = strlen(str); *q++ = len; memcpy(q, str, len); q += len; *q_ptr = q; } static void mpegts_write_sdt(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; MpegTSService *service; uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr; int i, running_status, free_ca_mode, val; q = data; put16(&q, ts->onid); *q++ = 0xff; for(i = 0; i < ts->nb_services; i++) { service = ts->services[i]; put16(&q, service->sid); *q++ = 0xfc | 0x00; /* currently no EIT info */ desc_list_len_ptr = q; q += 2; running_status = 4; /* running */ free_ca_mode = 0; /* write only one descriptor for the service name and provider */ *q++ = 0x48; desc_len_ptr = q; q++; *q++ = 0x01; /* digital television service */ putstr8(&q, service->provider_name); putstr8(&q, service->name); desc_len_ptr[0] = q - desc_len_ptr - 1; /* fill descriptor length */ val = (running_status << 13) | (free_ca_mode << 12) | (q - desc_list_len_ptr - 2); desc_list_len_ptr[0] = val >> 8; desc_list_len_ptr[1] = val; } mpegts_write_section1(&ts->sdt, SDT_TID, ts->tsid, 0, 0, 0, data, q - data); } static MpegTSService *mpegts_add_service(MpegTSWrite *ts, int sid, const char *provider_name, const char *name) { MpegTSService *service; service = av_mallocz(sizeof(MpegTSService)); if (!service) return NULL; service->pmt.pid = DEFAULT_PMT_START_PID + ts->nb_services - 1; service->sid = sid; service->provider_name = av_strdup(provider_name); service->name = av_strdup(name); service->pcr_pid = 0x1fff; dynarray_add(&ts->services, &ts->nb_services, service); return service; } static void section_write_packet(MpegTSSection *s, const uint8_t *packet) { AVFormatContext *ctx = s->opaque; put_buffer(ctx->pb, packet, TS_PACKET_SIZE); } static int mpegts_write_header(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; MpegTSWriteStream *ts_st; MpegTSService *service; AVStream *st, *pcr_st = NULL; AVMetadataTag *title; int i; const char *service_name; ts->tsid = DEFAULT_TSID; ts->onid = DEFAULT_ONID; /* allocate a single DVB service */ title = av_metadata_get(s->metadata, "title", NULL, 0); service_name = title ? title->value : DEFAULT_SERVICE_NAME; service = mpegts_add_service(ts, DEFAULT_SID, DEFAULT_PROVIDER_NAME, service_name); service->pmt.write_packet = section_write_packet; service->pmt.opaque = s; service->pmt.cc = 15; ts->pat.pid = PAT_PID; ts->pat.cc = 15; // Initialize at 15 so that it wraps and be equal to 0 for the first packet we write ts->pat.write_packet = section_write_packet; ts->pat.opaque = s; ts->sdt.pid = SDT_PID; ts->sdt.cc = 15; ts->sdt.write_packet = section_write_packet; ts->sdt.opaque = s; /* assign pids to each stream */ for(i = 0;i < s->nb_streams; i++) { st = s->streams[i]; ts_st = av_mallocz(sizeof(MpegTSWriteStream)); if (!ts_st) goto fail; st->priv_data = ts_st; ts_st->service = service; ts_st->pid = DEFAULT_START_PID + i; ts_st->payload_pts = AV_NOPTS_VALUE; ts_st->payload_dts = AV_NOPTS_VALUE; ts_st->first_pts_check = 1; ts_st->cc = 15; /* update PCR pid by using the first video stream */ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && service->pcr_pid == 0x1fff) { service->pcr_pid = ts_st->pid; pcr_st = st; } if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size > 0) { ts_st->adts = av_mallocz(sizeof(*ts_st->adts)); if (!ts_st->adts) return AVERROR(ENOMEM); if (ff_adts_decode_extradata(s, ts_st->adts, st->codec->extradata, st->codec->extradata_size) < 0) return -1; } } /* if no video stream, use the first stream as PCR */ if (service->pcr_pid == 0x1fff && s->nb_streams > 0) { pcr_st = s->streams[0]; ts_st = pcr_st->priv_data; service->pcr_pid = ts_st->pid; } ts->mux_rate = s->mux_rate ? s->mux_rate : 1; if (ts->mux_rate > 1) { service->pcr_packet_period = (ts->mux_rate * PCR_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->sdt_packet_period = (ts->mux_rate * SDT_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->pat_packet_period = (ts->mux_rate * PAT_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->cur_pcr = av_rescale(s->max_delay, 90000, AV_TIME_BASE); } else { /* Arbitrary values, PAT/PMT could be written on key frames */ ts->sdt_packet_period = 200; ts->pat_packet_period = 40; if (pcr_st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (!pcr_st->codec->frame_size) { av_log(s, AV_LOG_WARNING, "frame size not set\n"); service->pcr_packet_period = pcr_st->codec->sample_rate/(10*512); } else { service->pcr_packet_period = pcr_st->codec->sample_rate/(10*pcr_st->codec->frame_size); } } else { // max delta PCR 0.1s service->pcr_packet_period = pcr_st->codec->time_base.den/(10*pcr_st->codec->time_base.num); } } // output a PCR as soon as possible service->pcr_packet_count = service->pcr_packet_period; ts->pat_packet_count = ts->pat_packet_period-1; ts->sdt_packet_count = ts->sdt_packet_period-1; av_log(s, AV_LOG_INFO, "muxrate %d bps, pcr every %d pkts, " "sdt every %d, pat/pmt every %d pkts\n", ts->mux_rate, service->pcr_packet_period, ts->sdt_packet_period, ts->pat_packet_period); put_flush_packet(s->pb); return 0; fail: for(i = 0;i < s->nb_streams; i++) { st = s->streams[i]; av_free(st->priv_data); } return -1; } /* send SDT, PAT and PMT tables regulary */ static void retransmit_si_info(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; int i; if (++ts->sdt_packet_count == ts->sdt_packet_period) { ts->sdt_packet_count = 0; mpegts_write_sdt(s); } if (++ts->pat_packet_count == ts->pat_packet_period) { ts->pat_packet_count = 0; mpegts_write_pat(s); for(i = 0; i < ts->nb_services; i++) { mpegts_write_pmt(s, ts->services[i]); } } } /* Write a single null transport stream packet */ static void mpegts_insert_null_packet(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; uint8_t *q; uint8_t buf[TS_PACKET_SIZE]; q = buf; *q++ = 0x47; *q++ = 0x00 | 0x1f; *q++ = 0xff; *q++ = 0x10; memset(q, 0x0FF, TS_PACKET_SIZE - (q - buf)); put_buffer(s->pb, buf, TS_PACKET_SIZE); ts->cur_pcr += TS_PACKET_SIZE*8*90000LL/ts->mux_rate; } /* Write a single transport stream packet with a PCR and no payload */ static void mpegts_insert_pcr_only(AVFormatContext *s, AVStream *st) { MpegTSWrite *ts = s->priv_data; MpegTSWriteStream *ts_st = st->priv_data; uint8_t *q; uint64_t pcr = ts->cur_pcr; uint8_t buf[TS_PACKET_SIZE]; q = buf; *q++ = 0x47; *q++ = ts_st->pid >> 8; *q++ = ts_st->pid; *q++ = 0x20 | ts_st->cc; /* Adaptation only */ /* Continuity Count field does not increment (see 13818-1 section 2.4.3.3) */ *q++ = TS_PACKET_SIZE - 5; /* Adaptation Field Length */ *q++ = 0x10; /* Adaptation flags: PCR present */ /* PCR coded into 6 bytes */ *q++ = pcr >> 25; *q++ = pcr >> 17; *q++ = pcr >> 9; *q++ = pcr >> 1; *q++ = (pcr & 1) << 7; *q++ = 0; /* stuffing bytes */ memset(q, 0xFF, TS_PACKET_SIZE - (q - buf)); put_buffer(s->pb, buf, TS_PACKET_SIZE); ts->cur_pcr += TS_PACKET_SIZE*8*90000LL/ts->mux_rate; } static void write_pts(uint8_t *q, int fourbits, int64_t pts) { int val; val = fourbits << 4 | (((pts >> 30) & 0x07) << 1) | 1; *q++ = val; val = (((pts >> 15) & 0x7fff) << 1) | 1; *q++ = val >> 8; *q++ = val; val = (((pts) & 0x7fff) << 1) | 1; *q++ = val >> 8; *q++ = val; } /* Add a pes header to the front of payload, and segment into an integer number of * ts packets. The final ts packet is padded using an over-sized adaptation header * to exactly fill the last ts packet. * NOTE: 'payload' contains a complete PES payload. */ static void mpegts_write_pes(AVFormatContext *s, AVStream *st, const uint8_t *payload, int payload_size, int64_t pts, int64_t dts) { MpegTSWriteStream *ts_st = st->priv_data; MpegTSWrite *ts = s->priv_data; uint8_t buf[TS_PACKET_SIZE]; uint8_t *q; int val, is_start, len, header_len, write_pcr, private_code, flags; int afc_len, stuffing_len; int64_t pcr = -1; /* avoid warning */ int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE); is_start = 1; while (payload_size > 0) { retransmit_si_info(s); write_pcr = 0; if (ts_st->pid == ts_st->service->pcr_pid) { if (ts->mux_rate > 1 || is_start) // VBR pcr period is based on frames ts_st->service->pcr_packet_count++; if (ts_st->service->pcr_packet_count >= ts_st->service->pcr_packet_period) { ts_st->service->pcr_packet_count = 0; write_pcr = 1; } } if (ts->mux_rate > 1 && dts != AV_NOPTS_VALUE && (dts - (int64_t)ts->cur_pcr) > delay) { /* pcr insert gets priority over null packet insert */ if (write_pcr) mpegts_insert_pcr_only(s, st); else mpegts_insert_null_packet(s); continue; /* recalculate write_pcr and possibly retransmit si_info */ } /* prepare packet header */ q = buf; *q++ = 0x47; val = (ts_st->pid >> 8); if (is_start) val |= 0x40; *q++ = val; *q++ = ts_st->pid; ts_st->cc = (ts_st->cc + 1) & 0xf; *q++ = 0x10 | ts_st->cc | (write_pcr ? 0x20 : 0); if (write_pcr) { // add 11, pcr references the last byte of program clock reference base if (ts->mux_rate > 1) pcr = ts->cur_pcr + (4+7)*8*90000LL / ts->mux_rate; else pcr = dts - delay; if (dts != AV_NOPTS_VALUE && dts < pcr) av_log(s, AV_LOG_WARNING, "dts < pcr, TS is invalid\n"); *q++ = 7; /* AFC length */ *q++ = 0x10; /* flags: PCR present */ *q++ = pcr >> 25; *q++ = pcr >> 17; *q++ = pcr >> 9; *q++ = pcr >> 1; *q++ = (pcr & 1) << 7; *q++ = 0; } if (is_start) { int pes_extension = 0; /* write PES header */ *q++ = 0x00; *q++ = 0x00; *q++ = 0x01; private_code = 0; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->codec->codec_id == CODEC_ID_DIRAC) { *q++ = 0xfd; } else *q++ = 0xe0; } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && (st->codec->codec_id == CODEC_ID_MP2 || st->codec->codec_id == CODEC_ID_MP3)) { *q++ = 0xc0; } else { *q++ = 0xbd; if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { private_code = 0x20; } } header_len = 0; flags = 0; if (pts != AV_NOPTS_VALUE) { header_len += 5; flags |= 0x80; } if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) { header_len += 5; flags |= 0x40; } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->codec->codec_id == CODEC_ID_DIRAC) { /* set PES_extension_flag */ pes_extension = 1; flags |= 0x01; /* * One byte for PES2 extension flag + * one byte for extension length + * one byte for extension id */ header_len += 3; } len = payload_size + header_len + 3; if (private_code != 0) len++; if (len > 0xffff) len = 0; *q++ = len >> 8; *q++ = len; val = 0x80; /* data alignment indicator is required for subtitle data */ if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) val |= 0x04; *q++ = val; *q++ = flags; *q++ = header_len; if (pts != AV_NOPTS_VALUE) { write_pts(q, flags >> 6, pts); q += 5; } if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) { write_pts(q, 1, dts); q += 5; } if (pes_extension && st->codec->codec_id == CODEC_ID_DIRAC) { flags = 0x01; /* set PES_extension_flag_2 */ *q++ = flags; *q++ = 0x80 | 0x01; /* marker bit + extension length */ /* * Set the stream id extension flag bit to 0 and * write the extended stream id */ *q++ = 0x00 | 0x60; } if (private_code != 0) *q++ = private_code; is_start = 0; } /* header size */ header_len = q - buf; /* data len */ len = TS_PACKET_SIZE - header_len; if (len > payload_size) len = payload_size; stuffing_len = TS_PACKET_SIZE - header_len - len; if (stuffing_len > 0) { /* add stuffing with AFC */ if (buf[3] & 0x20) { /* stuffing already present: increase its size */ afc_len = buf[4] + 1; memmove(buf + 4 + afc_len + stuffing_len, buf + 4 + afc_len, header_len - (4 + afc_len)); buf[4] += stuffing_len; memset(buf + 4 + afc_len, 0xff, stuffing_len); } else { /* add stuffing */ memmove(buf + 4 + stuffing_len, buf + 4, header_len - 4); buf[3] |= 0x20; buf[4] = stuffing_len - 1; if (stuffing_len >= 2) { buf[5] = 0x00; memset(buf + 6, 0xff, stuffing_len - 2); } } } memcpy(buf + TS_PACKET_SIZE - len, payload, len); payload += len; payload_size -= len; put_buffer(s->pb, buf, TS_PACKET_SIZE); ts->cur_pcr += TS_PACKET_SIZE*8*90000LL/ts->mux_rate; } put_flush_packet(s->pb); } static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; int size = pkt->size; uint8_t *buf= pkt->data; uint8_t *data= NULL; MpegTSWriteStream *ts_st = st->priv_data; const uint64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE)*2; int64_t dts = AV_NOPTS_VALUE, pts = AV_NOPTS_VALUE; if (pkt->pts != AV_NOPTS_VALUE) pts = pkt->pts + delay; if (pkt->dts != AV_NOPTS_VALUE) dts = pkt->dts + delay; if (ts_st->first_pts_check && pts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_ERROR, "first pts value must set\n"); return -1; } ts_st->first_pts_check = 0; if (st->codec->codec_id == CODEC_ID_H264) { const uint8_t *p = buf, *buf_end = p+size; uint32_t state = -1; if (pkt->size < 5 || AV_RB32(pkt->data) != 0x0000001) { av_log(s, AV_LOG_ERROR, "h264 bitstream malformated, " "no startcode found, use -vbsf h264_mp4toannexb\n"); return -1; } do { p = ff_find_start_code(p, buf_end, &state); //av_log(s, AV_LOG_INFO, "nal %d\n", state & 0x1f); } while (p < buf_end && (state & 0x1f) != 9 && (state & 0x1f) != 5 && (state & 0x1f) != 1); if ((state & 0x1f) != 9) { // AUD NAL data = av_malloc(pkt->size+6); if (!data) return -1; memcpy(data+6, pkt->data, pkt->size); AV_WB32(data, 0x00000001); data[4] = 0x09; data[5] = 0xe0; // any slice type buf = data; size = pkt->size+6; } } else if (st->codec->codec_id == CODEC_ID_AAC) { if (pkt->size < 2) return -1; if ((AV_RB16(pkt->data) & 0xfff0) != 0xfff0) { ADTSContext *adts = ts_st->adts; int new_size; if (!adts) { av_log(s, AV_LOG_ERROR, "aac bitstream not in adts format " "and extradata missing\n"); return -1; } new_size = ADTS_HEADER_SIZE+adts->pce_size+pkt->size; if ((unsigned)new_size >= INT_MAX) return -1; data = av_malloc(new_size); if (!data) return AVERROR(ENOMEM); ff_adts_write_frame_header(adts, data, pkt->size, adts->pce_size); if (adts->pce_size) { memcpy(data+ADTS_HEADER_SIZE, adts->pce_data, adts->pce_size); adts->pce_size = 0; } memcpy(data+ADTS_HEADER_SIZE+adts->pce_size, pkt->data, pkt->size); buf = data; size = new_size; } } if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) { // for video and subtitle, write a single pes packet mpegts_write_pes(s, st, buf, size, pts, dts); av_free(data); return 0; } if (ts_st->payload_index + size > DEFAULT_PES_PAYLOAD_SIZE) { mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, ts_st->payload_pts, ts_st->payload_dts); ts_st->payload_index = 0; } if (!ts_st->payload_index) { ts_st->payload_pts = pts; ts_st->payload_dts = dts; } memcpy(ts_st->payload + ts_st->payload_index, buf, size); ts_st->payload_index += size; av_free(data); return 0; } static int mpegts_write_end(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; MpegTSWriteStream *ts_st; MpegTSService *service; AVStream *st; int i; /* flush current packets */ for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; ts_st = st->priv_data; if (ts_st->payload_index > 0) { mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, ts_st->payload_pts, ts_st->payload_dts); } av_freep(&ts_st->adts); } put_flush_packet(s->pb); for(i = 0; i < ts->nb_services; i++) { service = ts->services[i]; av_freep(&service->provider_name); av_freep(&service->name); av_free(service); } av_free(ts->services); return 0; } AVOutputFormat mpegts_muxer = { "mpegts", NULL_IF_CONFIG_SMALL("MPEG-2 transport stream format"), "video/x-mpegts", "ts,m2t", sizeof(MpegTSWrite), CODEC_ID_MP2, CODEC_ID_MPEG2VIDEO, mpegts_write_header, mpegts_write_packet, mpegts_write_end, };
123linslouis-android-video-cutter
jni/libavformat/mpegtsenc.c
C
asf20
29,348
/* * ISO Media common code * copyright (c) 2001 Fabrice Bellard * copyright (c) 2002 Francois Revol <revol@free.fr> * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_ISOM_H #define AVFORMAT_ISOM_H #include "avio.h" #include "riff.h" #include "dv.h" /* isom.c */ extern const AVCodecTag ff_mp4_obj_type[]; extern const AVCodecTag codec_movvideo_tags[]; extern const AVCodecTag codec_movaudio_tags[]; extern const AVCodecTag ff_codec_movsubtitle_tags[]; int ff_mov_iso639_to_lang(const char lang[4], int mp4); int ff_mov_lang_to_iso639(unsigned code, char to[4]); /* the QuickTime file format is quite convoluted... * it has lots of index tables, each indexing something in another one... * Here we just use what is needed to read the chunks */ typedef struct { int count; int duration; } MOVStts; typedef struct { int first; int count; int id; } MOVStsc; typedef struct { uint32_t type; char *path; char *dir; char volume[28]; char filename[64]; int16_t nlvl_to, nlvl_from; } MOVDref; typedef struct { uint32_t type; int64_t size; /* total size (excluding the size and type fields) */ } MOVAtom; struct MOVParseTableEntry; typedef struct { unsigned track_id; uint64_t base_data_offset; uint64_t moof_offset; unsigned stsd_id; unsigned duration; unsigned size; unsigned flags; } MOVFragment; typedef struct { unsigned track_id; unsigned stsd_id; unsigned duration; unsigned size; unsigned flags; } MOVTrackExt; typedef struct MOVStreamContext { ByteIOContext *pb; int ffindex; ///< AVStream index int next_chunk; unsigned int chunk_count; int64_t *chunk_offsets; unsigned int stts_count; MOVStts *stts_data; unsigned int ctts_count; MOVStts *ctts_data; unsigned int stsc_count; MOVStsc *stsc_data; unsigned int stps_count; unsigned *stps_data; ///< partial sync sample for mpeg-2 open gop int ctts_index; int ctts_sample; unsigned int sample_size; unsigned int sample_count; int *sample_sizes; unsigned int keyframe_count; int *keyframes; int time_scale; int time_offset; ///< time offset of the first edit list entry int current_sample; unsigned int bytes_per_frame; unsigned int samples_per_frame; int dv_audio_container; int pseudo_stream_id; ///< -1 means demux all ids int16_t audio_cid; ///< stsd audio compression id unsigned drefs_count; MOVDref *drefs; int dref_id; int wrong_dts; ///< dts are wrong due to huge ctts offset (iMovie files) int width; ///< tkhd width int height; ///< tkhd height int dts_shift; ///< dts shift when ctts is negative } MOVStreamContext; typedef struct MOVContext { AVFormatContext *fc; int time_scale; int64_t duration; ///< duration of the longest track int found_moov; ///< 'moov' atom has been found int found_mdat; ///< 'mdat' atom has been found DVDemuxContext *dv_demux; AVFormatContext *dv_fctx; int isom; ///< 1 if file is ISO Media (mp4/3gp) MOVFragment fragment; ///< current fragment in moof atom MOVTrackExt *trex_data; unsigned trex_count; int itunes_metadata; ///< metadata are itunes style int chapter_track; } MOVContext; int ff_mp4_read_descr_len(ByteIOContext *pb); int ff_mov_read_esds(AVFormatContext *fc, ByteIOContext *pb, MOVAtom atom); enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags); #endif /* AVFORMAT_ISOM_H */
123linslouis-android-video-cutter
jni/libavformat/isom.h
C
asf20
4,381
/* * CAF common code * Copyright (c) 2007 Justin Ruggles * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * CAF common code */ #include "avformat.h" #include "riff.h" #include "caf.h" /** * Known codec tags for CAF */ const AVCodecTag ff_codec_caf_tags[] = { { CODEC_ID_AAC, MKBETAG('a','a','c',' ') }, { CODEC_ID_AC3, MKBETAG('a','c','-','3') }, { CODEC_ID_ALAC, MKBETAG('a','l','a','c') }, /* FIXME: use DV demuxer, as done in MOV */ /*{ CODEC_ID_DVAUDIO, MKBETAG('v','d','v','a') },*/ /*{ CODEC_ID_DVAUDIO, MKBETAG('d','v','c','a') },*/ { CODEC_ID_ADPCM_IMA_QT, MKBETAG('i','m','a','4') }, { CODEC_ID_MACE3, MKBETAG('M','A','C','3') }, { CODEC_ID_MACE6, MKBETAG('M','A','C','6') }, { CODEC_ID_MP3, MKBETAG('.','m','p','3') }, { CODEC_ID_MP2, MKBETAG('.','m','p','2') }, { CODEC_ID_MP1, MKBETAG('.','m','p','1') }, { CODEC_ID_PCM_ALAW, MKBETAG('a','l','a','w') }, { CODEC_ID_PCM_MULAW, MKBETAG('u','l','a','w') }, { CODEC_ID_QCELP, MKBETAG('Q','c','l','p') }, { CODEC_ID_QDM2, MKBETAG('Q','D','M','2') }, { CODEC_ID_QDM2, MKBETAG('Q','D','M','C') }, /* currently unsupported codecs */ /*{ AC-3 over S/PDIF MKBETAG('c','a','c','3') },*/ /*{ MPEG4CELP MKBETAG('c','e','l','p') },*/ /*{ MPEG4HVXC MKBETAG('h','v','x','c') },*/ /*{ MPEG4TwinVQ MKBETAG('t','w','v','q') },*/ { CODEC_ID_NONE, 0 }, };
123linslouis-android-video-cutter
jni/libavformat/caf.c
C
asf20
2,331
/* * QCP format (.qcp) demuxer * Copyright (c) 2009 Kenan Gillet * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * QCP format (.qcp) demuxer * @author Kenan Gillet * @sa RFC 3625: "The QCP File Format and Media Types for Speech Data" * http://tools.ietf.org/html/rfc3625 */ #include "libavutil/intreadwrite.h" #include "avformat.h" typedef struct { uint32_t data_size; ///< size of data chunk #define QCP_MAX_MODE 4 int16_t rates_per_mode[QCP_MAX_MODE+1]; ///< contains the packet size corresponding ///< to each mode, -1 if no size. } QCPContext; /** * Last 15 out of 16 bytes of QCELP-13K GUID, as stored in the file; * the first byte of the GUID can be either 0x41 or 0x42. */ static const uint8_t guid_qcelp_13k_part[15] = { 0x6d, 0x7f, 0x5e, 0x15, 0xb1, 0xd0, 0x11, 0xba, 0x91, 0x00, 0x80, 0x5f, 0xb4, 0xb9, 0x7e }; /** * EVRC GUID as stored in the file */ static const uint8_t guid_evrc[16] = { 0x8d, 0xd4, 0x89, 0xe6, 0x76, 0x90, 0xb5, 0x46, 0x91, 0xef, 0x73, 0x6a, 0x51, 0x00, 0xce, 0xb4 }; /** * SMV GUID as stored in the file */ static const uint8_t guid_smv[16] = { 0x75, 0x2b, 0x7c, 0x8d, 0x97, 0xa7, 0x49, 0xed, 0x98, 0x5e, 0xd5, 0x3c, 0x8c, 0xc7, 0x5f, 0x84 }; /** * @param guid contains at least 16 bytes * @return 1 if the guid is a qcelp_13k guid, 0 otherwise */ static int is_qcelp_13k_guid(const uint8_t *guid) { return (guid[0] == 0x41 || guid[0] == 0x42) && !memcmp(guid+1, guid_qcelp_13k_part, sizeof(guid_qcelp_13k_part)); } static int qcp_probe(AVProbeData *pd) { if (AV_RL32(pd->buf ) == AV_RL32("RIFF") && AV_RL64(pd->buf+8) == AV_RL64("QLCMfmt ")) return AVPROBE_SCORE_MAX; return 0; } static int qcp_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; QCPContext *c = s->priv_data; AVStream *st = av_new_stream(s, 0); uint8_t buf[16]; int i, nb_rates; if (!st) return AVERROR(ENOMEM); get_be32(pb); // "RIFF" s->file_size = get_le32(pb) + 8; url_fskip(pb, 8 + 4 + 1 + 1); // "QLCMfmt " + chunk-size + major-version + minor-version st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->channels = 1; get_buffer(pb, buf, 16); if (is_qcelp_13k_guid(buf)) { st->codec->codec_id = CODEC_ID_QCELP; } else if (!memcmp(buf, guid_evrc, 16)) { av_log(s, AV_LOG_ERROR, "EVRC codec is not supported.\n"); return AVERROR_PATCHWELCOME; } else if (!memcmp(buf, guid_smv, 16)) { av_log(s, AV_LOG_ERROR, "SMV codec is not supported.\n"); return AVERROR_PATCHWELCOME; } else { av_log(s, AV_LOG_ERROR, "Unknown codec GUID.\n"); return AVERROR_INVALIDDATA; } url_fskip(pb, 2 + 80); // codec-version + codec-name st->codec->bit_rate = get_le16(pb); s->packet_size = get_le16(pb); url_fskip(pb, 2); // block-size st->codec->sample_rate = get_le16(pb); url_fskip(pb, 2); // sample-size memset(c->rates_per_mode, -1, sizeof(c->rates_per_mode)); nb_rates = get_le32(pb); nb_rates = FFMIN(nb_rates, 8); for (i=0; i<nb_rates; i++) { int size = get_byte(pb); int mode = get_byte(pb); if (mode > QCP_MAX_MODE) { av_log(s, AV_LOG_WARNING, "Unknown entry %d=>%d in rate-map-table.\n ", mode, size); } else c->rates_per_mode[mode] = size; } url_fskip(pb, 16 - 2*nb_rates + 20); // empty entries of rate-map-table + reserved return 0; } static int qcp_read_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; QCPContext *c = s->priv_data; unsigned int chunk_size, tag; while(!url_feof(pb)) { if (c->data_size) { int pkt_size, ret, mode = get_byte(pb); if (s->packet_size) { pkt_size = s->packet_size - 1; } else if (mode > QCP_MAX_MODE || (pkt_size = c->rates_per_mode[mode]) < 0) { c->data_size--; continue; } if (c->data_size <= pkt_size) { av_log(s, AV_LOG_WARNING, "Data chunk is too small.\n"); pkt_size = c->data_size - 1; } if ((ret = av_get_packet(pb, pkt, pkt_size)) >= 0) { if (pkt_size != ret) av_log(s, AV_LOG_ERROR, "Packet size is too small.\n"); c->data_size -= pkt_size + 1; } return ret; } if (url_ftell(pb) & 1 && get_byte(pb)) av_log(s, AV_LOG_WARNING, "Padding should be 0.\n"); tag = get_le32(pb); chunk_size = get_le32(pb); switch (tag) { case MKTAG('v', 'r', 'a', 't'): if (get_le32(pb)) // var-rate-flag s->packet_size = 0; url_fskip(pb, 4); // size-in-packets break; case MKTAG('d', 'a', 't', 'a'): c->data_size = chunk_size; break; default: url_fskip(pb, chunk_size); } } return AVERROR_EOF; } AVInputFormat qcp_demuxer = { .name = "qcp", .long_name = NULL_IF_CONFIG_SMALL("QCP format"), .priv_data_size = sizeof(QCPContext), .read_probe = qcp_probe, .read_header = qcp_read_header, .read_packet = qcp_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/qcp.c
C
asf20
6,194
/* * AU muxer and demuxer * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * First version by Francois Revol revol@free.fr * * Reference documents: * http://www.opengroup.org/public/pubs/external/auformat.html * http://www.goice.co.jp/member/mo/formats/au.html */ #include "avformat.h" #include "raw.h" #include "riff.h" /* if we don't know the size in advance */ #define AU_UNKNOWN_SIZE ((uint32_t)(~0)) /* The ffmpeg codecs we support, and the IDs they have in the file */ static const AVCodecTag codec_au_tags[] = { { CODEC_ID_PCM_MULAW, 1 }, { CODEC_ID_PCM_S8, 2 }, { CODEC_ID_PCM_S16BE, 3 }, { CODEC_ID_PCM_S24BE, 4 }, { CODEC_ID_PCM_S32BE, 5 }, { CODEC_ID_PCM_F32BE, 6 }, { CODEC_ID_PCM_F64BE, 7 }, { CODEC_ID_PCM_ALAW, 27 }, { CODEC_ID_NONE, 0 }, }; #if CONFIG_AU_MUXER /* AUDIO_FILE header */ static int put_au_header(ByteIOContext *pb, AVCodecContext *enc) { if(!enc->codec_tag) return -1; put_tag(pb, ".snd"); /* magic number */ put_be32(pb, 24); /* header size */ put_be32(pb, AU_UNKNOWN_SIZE); /* data size */ put_be32(pb, (uint32_t)enc->codec_tag); /* codec ID */ put_be32(pb, enc->sample_rate); put_be32(pb, (uint32_t)enc->channels); return 0; } static int au_write_header(AVFormatContext *s) { ByteIOContext *pb = s->pb; s->priv_data = NULL; /* format header */ if (put_au_header(pb, s->streams[0]->codec) < 0) { return -1; } put_flush_packet(pb); return 0; } static int au_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; put_buffer(pb, pkt->data, pkt->size); return 0; } static int au_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; int64_t file_size; if (!url_is_streamed(s->pb)) { /* update file size */ file_size = url_ftell(pb); url_fseek(pb, 8, SEEK_SET); put_be32(pb, (uint32_t)(file_size - 24)); url_fseek(pb, file_size, SEEK_SET); put_flush_packet(pb); } return 0; } #endif /* CONFIG_AU_MUXER */ static int au_probe(AVProbeData *p) { /* check file header */ if (p->buf[0] == '.' && p->buf[1] == 's' && p->buf[2] == 'n' && p->buf[3] == 'd') return AVPROBE_SCORE_MAX; else return 0; } /* au input */ static int au_read_header(AVFormatContext *s, AVFormatParameters *ap) { int size; unsigned int tag; ByteIOContext *pb = s->pb; unsigned int id, channels, rate; enum CodecID codec; AVStream *st; /* check ".snd" header */ tag = get_le32(pb); if (tag != MKTAG('.', 's', 'n', 'd')) return -1; size = get_be32(pb); /* header size */ get_be32(pb); /* data size */ id = get_be32(pb); rate = get_be32(pb); channels = get_be32(pb); codec = ff_codec_get_id(codec_au_tags, id); if (size >= 24) { /* skip unused data */ url_fseek(pb, size - 24, SEEK_CUR); } /* now we are ready: build format streams */ st = av_new_stream(s, 0); if (!st) return -1; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = id; st->codec->codec_id = codec; st->codec->channels = channels; st->codec->sample_rate = rate; av_set_pts_info(st, 64, 1, rate); return 0; } #define BLOCK_SIZE 1024 static int au_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret; ret= av_get_packet(s->pb, pkt, BLOCK_SIZE * s->streams[0]->codec->channels * av_get_bits_per_sample(s->streams[0]->codec->codec_id) >> 3); if (ret < 0) return ret; pkt->stream_index = 0; /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret; return 0; } #if CONFIG_AU_DEMUXER AVInputFormat au_demuxer = { "au", NULL_IF_CONFIG_SMALL("SUN AU format"), 0, au_probe, au_read_header, au_read_packet, NULL, pcm_read_seek, .codec_tag= (const AVCodecTag* const []){codec_au_tags, 0}, }; #endif #if CONFIG_AU_MUXER AVOutputFormat au_muxer = { "au", NULL_IF_CONFIG_SMALL("SUN AU format"), "audio/basic", "au", 0, CODEC_ID_PCM_S16BE, CODEC_ID_NONE, au_write_header, au_write_packet, au_write_trailer, .codec_tag= (const AVCodecTag* const []){codec_au_tags, 0}, }; #endif //CONFIG_AU_MUXER
123linslouis-android-video-cutter
jni/libavformat/au.c
C
asf20
5,225
/* * MPEG1/2 muxer * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/fifo.h" #include "libavcodec/put_bits.h" #include "avformat.h" #include "mpeg.h" #define MAX_PAYLOAD_SIZE 4096 //#define DEBUG_SEEK #undef NDEBUG #include <assert.h> typedef struct PacketDesc { int64_t pts; int64_t dts; int size; int unwritten_size; int flags; struct PacketDesc *next; } PacketDesc; typedef struct { AVFifoBuffer *fifo; uint8_t id; int max_buffer_size; /* in bytes */ int buffer_index; PacketDesc *predecode_packet; PacketDesc *premux_packet; PacketDesc **next_packet; int packet_number; uint8_t lpcm_header[3]; int lpcm_align; int bytes_to_iframe; int align_iframe; int64_t vobu_start_pts; } StreamInfo; typedef struct { int packet_size; /* required packet size */ int packet_number; int pack_header_freq; /* frequency (in packets^-1) at which we send pack headers */ int system_header_freq; int system_header_size; int mux_rate; /* bitrate in units of 50 bytes/s */ /* stream info */ int audio_bound; int video_bound; int is_mpeg2; int is_vcd; int is_svcd; int is_dvd; int64_t last_scr; /* current system clock */ double vcd_padding_bitrate; //FIXME floats int64_t vcd_padding_bytes_written; } MpegMuxContext; extern AVOutputFormat mpeg1vcd_muxer; extern AVOutputFormat mpeg2dvd_muxer; extern AVOutputFormat mpeg2svcd_muxer; extern AVOutputFormat mpeg2vob_muxer; static int put_pack_header(AVFormatContext *ctx, uint8_t *buf, int64_t timestamp) { MpegMuxContext *s = ctx->priv_data; PutBitContext pb; init_put_bits(&pb, buf, 128); put_bits32(&pb, PACK_START_CODE); if (s->is_mpeg2) { put_bits(&pb, 2, 0x1); } else { put_bits(&pb, 4, 0x2); } put_bits(&pb, 3, (uint32_t)((timestamp >> 30) & 0x07)); put_bits(&pb, 1, 1); put_bits(&pb, 15, (uint32_t)((timestamp >> 15) & 0x7fff)); put_bits(&pb, 1, 1); put_bits(&pb, 15, (uint32_t)((timestamp ) & 0x7fff)); put_bits(&pb, 1, 1); if (s->is_mpeg2) { /* clock extension */ put_bits(&pb, 9, 0); } put_bits(&pb, 1, 1); put_bits(&pb, 22, s->mux_rate); put_bits(&pb, 1, 1); if (s->is_mpeg2) { put_bits(&pb, 1, 1); put_bits(&pb, 5, 0x1f); /* reserved */ put_bits(&pb, 3, 0); /* stuffing length */ } flush_put_bits(&pb); return put_bits_ptr(&pb) - pb.buf; } static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_stream_id) { MpegMuxContext *s = ctx->priv_data; int size, i, private_stream_coded, id; PutBitContext pb; init_put_bits(&pb, buf, 128); put_bits32(&pb, SYSTEM_HEADER_START_CODE); put_bits(&pb, 16, 0); put_bits(&pb, 1, 1); put_bits(&pb, 22, s->mux_rate); /* maximum bit rate of the multiplexed stream */ put_bits(&pb, 1, 1); /* marker */ if (s->is_vcd && only_for_stream_id==VIDEO_ID) { /* This header applies only to the video stream (see VCD standard p. IV-7)*/ put_bits(&pb, 6, 0); } else put_bits(&pb, 6, s->audio_bound); if (s->is_vcd) { /* see VCD standard, p. IV-7*/ put_bits(&pb, 1, 0); put_bits(&pb, 1, 1); } else { put_bits(&pb, 1, 0); /* variable bitrate*/ put_bits(&pb, 1, 0); /* non constrainted bit stream */ } if (s->is_vcd || s->is_dvd) { /* see VCD standard p IV-7 */ put_bits(&pb, 1, 1); /* audio locked */ put_bits(&pb, 1, 1); /* video locked */ } else { put_bits(&pb, 1, 0); /* audio locked */ put_bits(&pb, 1, 0); /* video locked */ } put_bits(&pb, 1, 1); /* marker */ if (s->is_vcd && (only_for_stream_id & 0xe0) == AUDIO_ID) { /* This header applies only to the audio stream (see VCD standard p. IV-7)*/ put_bits(&pb, 5, 0); } else put_bits(&pb, 5, s->video_bound); if (s->is_dvd) { put_bits(&pb, 1, 0); /* packet_rate_restriction_flag */ put_bits(&pb, 7, 0x7f); /* reserved byte */ } else put_bits(&pb, 8, 0xff); /* reserved byte */ /* DVD-Video Stream_bound entries id (0xB9) video, maximum P-STD for stream 0xE0. (P-STD_buffer_bound_scale = 1) id (0xB8) audio, maximum P-STD for any MPEG audio (0xC0 to 0xC7) streams. If there are none set to 4096 (32x128). (P-STD_buffer_bound_scale = 0) id (0xBD) private stream 1 (audio other than MPEG and subpictures). (P-STD_buffer_bound_scale = 1) id (0xBF) private stream 2, NAV packs, set to 2x1024. */ if (s->is_dvd) { int P_STD_max_video = 0; int P_STD_max_mpeg_audio = 0; int P_STD_max_mpeg_PS1 = 0; for(i=0;i<ctx->nb_streams;i++) { StreamInfo *stream = ctx->streams[i]->priv_data; id = stream->id; if (id == 0xbd && stream->max_buffer_size > P_STD_max_mpeg_PS1) { P_STD_max_mpeg_PS1 = stream->max_buffer_size; } else if (id >= 0xc0 && id <= 0xc7 && stream->max_buffer_size > P_STD_max_mpeg_audio) { P_STD_max_mpeg_audio = stream->max_buffer_size; } else if (id == 0xe0 && stream->max_buffer_size > P_STD_max_video) { P_STD_max_video = stream->max_buffer_size; } } /* video */ put_bits(&pb, 8, 0xb9); /* stream ID */ put_bits(&pb, 2, 3); put_bits(&pb, 1, 1); put_bits(&pb, 13, P_STD_max_video / 1024); /* audio */ if (P_STD_max_mpeg_audio == 0) P_STD_max_mpeg_audio = 4096; put_bits(&pb, 8, 0xb8); /* stream ID */ put_bits(&pb, 2, 3); put_bits(&pb, 1, 0); put_bits(&pb, 13, P_STD_max_mpeg_audio / 128); /* private stream 1 */ put_bits(&pb, 8, 0xbd); /* stream ID */ put_bits(&pb, 2, 3); put_bits(&pb, 1, 0); put_bits(&pb, 13, P_STD_max_mpeg_PS1 / 128); /* private stream 2 */ put_bits(&pb, 8, 0xbf); /* stream ID */ put_bits(&pb, 2, 3); put_bits(&pb, 1, 1); put_bits(&pb, 13, 2); } else { /* audio stream info */ private_stream_coded = 0; for(i=0;i<ctx->nb_streams;i++) { StreamInfo *stream = ctx->streams[i]->priv_data; /* For VCDs, only include the stream info for the stream that the pack which contains this system belongs to. (see VCD standard p. IV-7) */ if ( !s->is_vcd || stream->id==only_for_stream_id || only_for_stream_id==0) { id = stream->id; if (id < 0xc0) { /* special case for private streams (AC-3 uses that) */ if (private_stream_coded) continue; private_stream_coded = 1; id = 0xbd; } put_bits(&pb, 8, id); /* stream ID */ put_bits(&pb, 2, 3); if (id < 0xe0) { /* audio */ put_bits(&pb, 1, 0); put_bits(&pb, 13, stream->max_buffer_size / 128); } else { /* video */ put_bits(&pb, 1, 1); put_bits(&pb, 13, stream->max_buffer_size / 1024); } } } } flush_put_bits(&pb); size = put_bits_ptr(&pb) - pb.buf; /* patch packet size */ buf[4] = (size - 6) >> 8; buf[5] = (size - 6) & 0xff; return size; } static int get_system_header_size(AVFormatContext *ctx) { int buf_index, i, private_stream_coded; StreamInfo *stream; MpegMuxContext *s = ctx->priv_data; if (s->is_dvd) return 18; // DVD-Video system headers are 18 bytes fixed length. buf_index = 12; private_stream_coded = 0; for(i=0;i<ctx->nb_streams;i++) { stream = ctx->streams[i]->priv_data; if (stream->id < 0xc0) { if (private_stream_coded) continue; private_stream_coded = 1; } buf_index += 3; } return buf_index; } static int mpeg_mux_init(AVFormatContext *ctx) { MpegMuxContext *s = ctx->priv_data; int bitrate, i, mpa_id, mpv_id, mps_id, ac3_id, dts_id, lpcm_id, j; AVStream *st; StreamInfo *stream; int audio_bitrate; int video_bitrate; s->packet_number = 0; s->is_vcd = (CONFIG_MPEG1VCD_MUXER && ctx->oformat == &mpeg1vcd_muxer); s->is_svcd = (CONFIG_MPEG2SVCD_MUXER && ctx->oformat == &mpeg2svcd_muxer); s->is_mpeg2 = ((CONFIG_MPEG2VOB_MUXER && ctx->oformat == &mpeg2vob_muxer) || (CONFIG_MPEG2DVD_MUXER && ctx->oformat == &mpeg2dvd_muxer) || (CONFIG_MPEG2SVCD_MUXER && ctx->oformat == &mpeg2svcd_muxer)); s->is_dvd = (CONFIG_MPEG2DVD_MUXER && ctx->oformat == &mpeg2dvd_muxer); if(ctx->packet_size) { if (ctx->packet_size < 20 || ctx->packet_size > (1 << 23) + 10) { av_log(ctx, AV_LOG_ERROR, "Invalid packet size %d\n", ctx->packet_size); goto fail; } s->packet_size = ctx->packet_size; } else s->packet_size = 2048; s->vcd_padding_bytes_written = 0; s->vcd_padding_bitrate=0; s->audio_bound = 0; s->video_bound = 0; mpa_id = AUDIO_ID; ac3_id = AC3_ID; dts_id = DTS_ID; mpv_id = VIDEO_ID; mps_id = SUB_ID; lpcm_id = LPCM_ID; for(i=0;i<ctx->nb_streams;i++) { st = ctx->streams[i]; stream = av_mallocz(sizeof(StreamInfo)); if (!stream) goto fail; st->priv_data = stream; av_set_pts_info(st, 64, 1, 90000); switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if (st->codec->codec_id == CODEC_ID_AC3) { stream->id = ac3_id++; } else if (st->codec->codec_id == CODEC_ID_DTS) { stream->id = dts_id++; } else if (st->codec->codec_id == CODEC_ID_PCM_S16BE) { stream->id = lpcm_id++; for(j = 0; j < 4; j++) { if (lpcm_freq_tab[j] == st->codec->sample_rate) break; } if (j == 4) goto fail; if (st->codec->channels > 8) return -1; stream->lpcm_header[0] = 0x0c; stream->lpcm_header[1] = (st->codec->channels - 1) | (j << 4); stream->lpcm_header[2] = 0x80; stream->lpcm_align = st->codec->channels * 2; } else { stream->id = mpa_id++; } /* This value HAS to be used for VCD (see VCD standard, p. IV-7). Right now it is also used for everything else.*/ stream->max_buffer_size = 4 * 1024; s->audio_bound++; break; case AVMEDIA_TYPE_VIDEO: stream->id = mpv_id++; if (st->codec->rc_buffer_size) stream->max_buffer_size = 6*1024 + st->codec->rc_buffer_size/8; else stream->max_buffer_size = 230*1024; //FIXME this is probably too small as default #if 0 /* see VCD standard, p. IV-7*/ stream->max_buffer_size = 46 * 1024; else /* This value HAS to be used for SVCD (see SVCD standard, p. 26 V.2.3.2). Right now it is also used for everything else.*/ stream->max_buffer_size = 230 * 1024; #endif s->video_bound++; break; case AVMEDIA_TYPE_SUBTITLE: stream->id = mps_id++; stream->max_buffer_size = 16 * 1024; break; default: return -1; } stream->fifo= av_fifo_alloc(16); if (!stream->fifo) goto fail; } bitrate = 0; audio_bitrate = 0; video_bitrate = 0; for(i=0;i<ctx->nb_streams;i++) { int codec_rate; st = ctx->streams[i]; stream = (StreamInfo*) st->priv_data; if(st->codec->rc_max_rate || stream->id==VIDEO_ID) codec_rate= st->codec->rc_max_rate; else codec_rate= st->codec->bit_rate; if(!codec_rate) codec_rate= (1<<21)*8*50/ctx->nb_streams; bitrate += codec_rate; if ((stream->id & 0xe0) == AUDIO_ID) audio_bitrate += codec_rate; else if (stream->id==VIDEO_ID) video_bitrate += codec_rate; } if(ctx->mux_rate){ s->mux_rate= (ctx->mux_rate + (8 * 50) - 1) / (8 * 50); } else { /* we increase slightly the bitrate to take into account the headers. XXX: compute it exactly */ bitrate += bitrate*5/100; bitrate += 10000; s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50); } if (s->is_vcd) { double overhead_rate; /* The VCD standard mandates that the mux_rate field is 3528 (see standard p. IV-6). The value is actually "wrong", i.e. if you calculate it using the normal formula and the 75 sectors per second transfer rate you get a different value because the real pack size is 2324, not 2352. But the standard explicitly specifies that the mux_rate field in the header must have this value.*/ // s->mux_rate=2352 * 75 / 50; /* = 3528*/ /* The VCD standard states that the muxed stream must be exactly 75 packs / second (the data rate of a single speed cdrom). Since the video bitrate (probably 1150000 bits/sec) will be below the theoretical maximum we have to add some padding packets to make up for the lower data rate. (cf. VCD standard p. IV-6 )*/ /* Add the header overhead to the data rate. 2279 data bytes per audio pack, 2294 data bytes per video pack*/ overhead_rate = ((audio_bitrate / 8.0) / 2279) * (2324 - 2279); overhead_rate += ((video_bitrate / 8.0) / 2294) * (2324 - 2294); overhead_rate *= 8; /* Add padding so that the full bitrate is 2324*75 bytes/sec */ s->vcd_padding_bitrate = 2324 * 75 * 8 - (bitrate + overhead_rate); } if (s->is_vcd || s->is_mpeg2) /* every packet */ s->pack_header_freq = 1; else /* every 2 seconds */ s->pack_header_freq = 2 * bitrate / s->packet_size / 8; /* the above seems to make pack_header_freq zero sometimes */ if (s->pack_header_freq == 0) s->pack_header_freq = 1; if (s->is_mpeg2) /* every 200 packets. Need to look at the spec. */ s->system_header_freq = s->pack_header_freq * 40; else if (s->is_vcd) /* the standard mandates that there are only two system headers in the whole file: one in the first packet of each stream. (see standard p. IV-7 and IV-8) */ s->system_header_freq = 0x7fffffff; else s->system_header_freq = s->pack_header_freq * 5; for(i=0;i<ctx->nb_streams;i++) { stream = ctx->streams[i]->priv_data; stream->packet_number = 0; } s->system_header_size = get_system_header_size(ctx); s->last_scr = 0; return 0; fail: for(i=0;i<ctx->nb_streams;i++) { av_free(ctx->streams[i]->priv_data); } return AVERROR(ENOMEM); } static inline void put_timestamp(ByteIOContext *pb, int id, int64_t timestamp) { put_byte(pb, (id << 4) | (((timestamp >> 30) & 0x07) << 1) | 1); put_be16(pb, (uint16_t)((((timestamp >> 15) & 0x7fff) << 1) | 1)); put_be16(pb, (uint16_t)((((timestamp ) & 0x7fff) << 1) | 1)); } /* return the number of padding bytes that should be inserted into the multiplexed stream.*/ static int get_vcd_padding_size(AVFormatContext *ctx, int64_t pts) { MpegMuxContext *s = ctx->priv_data; int pad_bytes = 0; if (s->vcd_padding_bitrate > 0 && pts!=AV_NOPTS_VALUE) { int64_t full_pad_bytes; full_pad_bytes = (int64_t)((s->vcd_padding_bitrate * (pts / 90000.0)) / 8.0); //FIXME this is wrong pad_bytes = (int) (full_pad_bytes - s->vcd_padding_bytes_written); if (pad_bytes<0) /* might happen if we have already padded to a later timestamp. This can occur if another stream has already advanced further.*/ pad_bytes=0; } return pad_bytes; } #if 0 /* unused, remove? */ /* return the exact available payload size for the next packet for stream 'stream_index'. 'pts' and 'dts' are only used to know if timestamps are needed in the packet header. */ static int get_packet_payload_size(AVFormatContext *ctx, int stream_index, int64_t pts, int64_t dts) { MpegMuxContext *s = ctx->priv_data; int buf_index; StreamInfo *stream; stream = ctx->streams[stream_index]->priv_data; buf_index = 0; if (((s->packet_number % s->pack_header_freq) == 0)) { /* pack header size */ if (s->is_mpeg2) buf_index += 14; else buf_index += 12; if (s->is_vcd) { /* there is exactly one system header for each stream in a VCD MPEG, One in the very first video packet and one in the very first audio packet (see VCD standard p. IV-7 and IV-8).*/ if (stream->packet_number==0) /* The system headers refer only to the stream they occur in, so they have a constant size.*/ buf_index += 15; } else { if ((s->packet_number % s->system_header_freq) == 0) buf_index += s->system_header_size; } } if ((s->is_vcd && stream->packet_number==0) || (s->is_svcd && s->packet_number==0)) /* the first pack of each stream contains only the pack header, the system header and some padding (see VCD standard p. IV-6) Add the padding size, so that the actual payload becomes 0.*/ buf_index += s->packet_size - buf_index; else { /* packet header size */ buf_index += 6; if (s->is_mpeg2) { buf_index += 3; if (stream->packet_number==0) buf_index += 3; /* PES extension */ buf_index += 1; /* obligatory stuffing byte */ } if (pts != AV_NOPTS_VALUE) { if (dts != pts) buf_index += 5 + 5; else buf_index += 5; } else { if (!s->is_mpeg2) buf_index++; } if (stream->id < 0xc0) { /* AC-3/LPCM private data header */ buf_index += 4; if (stream->id >= 0xa0) { int n; buf_index += 3; /* NOTE: we round the payload size to an integer number of LPCM samples */ n = (s->packet_size - buf_index) % stream->lpcm_align; if (n) buf_index += (stream->lpcm_align - n); } } if (s->is_vcd && (stream->id & 0xe0) == AUDIO_ID) /* The VCD standard demands that 20 zero bytes follow each audio packet (see standard p. IV-8).*/ buf_index+=20; } return s->packet_size - buf_index; } #endif /* Write an MPEG padding packet header. */ static void put_padding_packet(AVFormatContext *ctx, ByteIOContext *pb,int packet_bytes) { MpegMuxContext *s = ctx->priv_data; int i; put_be32(pb, PADDING_STREAM); put_be16(pb, packet_bytes - 6); if (!s->is_mpeg2) { put_byte(pb, 0x0f); packet_bytes -= 7; } else packet_bytes -= 6; for(i=0;i<packet_bytes;i++) put_byte(pb, 0xff); } static int get_nb_frames(AVFormatContext *ctx, StreamInfo *stream, int len){ int nb_frames=0; PacketDesc *pkt_desc= stream->premux_packet; while(len>0){ if(pkt_desc->size == pkt_desc->unwritten_size) nb_frames++; len -= pkt_desc->unwritten_size; pkt_desc= pkt_desc->next; } return nb_frames; } /* flush the packet on stream stream_index */ static int flush_packet(AVFormatContext *ctx, int stream_index, int64_t pts, int64_t dts, int64_t scr, int trailer_size) { MpegMuxContext *s = ctx->priv_data; StreamInfo *stream = ctx->streams[stream_index]->priv_data; uint8_t *buf_ptr; int size, payload_size, startcode, id, stuffing_size, i, header_len; int packet_size; uint8_t buffer[128]; int zero_trail_bytes = 0; int pad_packet_bytes = 0; int pes_flags; int general_pack = 0; /*"general" pack without data specific to one stream?*/ int nb_frames; id = stream->id; #if 0 printf("packet ID=%2x PTS=%0.3f\n", id, pts / 90000.0); #endif buf_ptr = buffer; if ((s->packet_number % s->pack_header_freq) == 0 || s->last_scr != scr) { /* output pack and systems header if needed */ size = put_pack_header(ctx, buf_ptr, scr); buf_ptr += size; s->last_scr= scr; if (s->is_vcd) { /* there is exactly one system header for each stream in a VCD MPEG, One in the very first video packet and one in the very first audio packet (see VCD standard p. IV-7 and IV-8).*/ if (stream->packet_number==0) { size = put_system_header(ctx, buf_ptr, id); buf_ptr += size; } } else if (s->is_dvd) { if (stream->align_iframe || s->packet_number == 0){ int PES_bytes_to_fill = s->packet_size - size - 10; if (pts != AV_NOPTS_VALUE) { if (dts != pts) PES_bytes_to_fill -= 5 + 5; else PES_bytes_to_fill -= 5; } if (stream->bytes_to_iframe == 0 || s->packet_number == 0) { size = put_system_header(ctx, buf_ptr, 0); buf_ptr += size; size = buf_ptr - buffer; put_buffer(ctx->pb, buffer, size); put_be32(ctx->pb, PRIVATE_STREAM_2); put_be16(ctx->pb, 0x03d4); // length put_byte(ctx->pb, 0x00); // substream ID, 00=PCI for (i = 0; i < 979; i++) put_byte(ctx->pb, 0x00); put_be32(ctx->pb, PRIVATE_STREAM_2); put_be16(ctx->pb, 0x03fa); // length put_byte(ctx->pb, 0x01); // substream ID, 01=DSI for (i = 0; i < 1017; i++) put_byte(ctx->pb, 0x00); memset(buffer, 0, 128); buf_ptr = buffer; s->packet_number++; stream->align_iframe = 0; scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet size = put_pack_header(ctx, buf_ptr, scr); s->last_scr= scr; buf_ptr += size; /* GOP Start */ } else if (stream->bytes_to_iframe < PES_bytes_to_fill) { pad_packet_bytes = PES_bytes_to_fill - stream->bytes_to_iframe; } } } else { if ((s->packet_number % s->system_header_freq) == 0) { size = put_system_header(ctx, buf_ptr, 0); buf_ptr += size; } } } size = buf_ptr - buffer; put_buffer(ctx->pb, buffer, size); packet_size = s->packet_size - size; if (s->is_vcd && (id & 0xe0) == AUDIO_ID) /* The VCD standard demands that 20 zero bytes follow each audio pack (see standard p. IV-8).*/ zero_trail_bytes += 20; if ((s->is_vcd && stream->packet_number==0) || (s->is_svcd && s->packet_number==0)) { /* for VCD the first pack of each stream contains only the pack header, the system header and lots of padding (see VCD standard p. IV-6). In the case of an audio pack, 20 zero bytes are also added at the end.*/ /* For SVCD we fill the very first pack to increase compatibility with some DVD players. Not mandated by the standard.*/ if (s->is_svcd) general_pack = 1; /* the system header refers to both streams and no stream data*/ pad_packet_bytes = packet_size - zero_trail_bytes; } packet_size -= pad_packet_bytes + zero_trail_bytes; if (packet_size > 0) { /* packet header size */ packet_size -= 6; /* packet header */ if (s->is_mpeg2) { header_len = 3; if (stream->packet_number==0) header_len += 3; /* PES extension */ header_len += 1; /* obligatory stuffing byte */ } else { header_len = 0; } if (pts != AV_NOPTS_VALUE) { if (dts != pts) header_len += 5 + 5; else header_len += 5; } else { if (!s->is_mpeg2) header_len++; } payload_size = packet_size - header_len; if (id < 0xc0) { startcode = PRIVATE_STREAM_1; payload_size -= 1; if (id >= 0x40) { payload_size -= 3; if (id >= 0xa0) payload_size -= 3; } } else { startcode = 0x100 + id; } stuffing_size = payload_size - av_fifo_size(stream->fifo); // first byte does not fit -> reset pts/dts + stuffing if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){ int timestamp_len=0; if(dts != pts) timestamp_len += 5; if(pts != AV_NOPTS_VALUE) timestamp_len += s->is_mpeg2 ? 5 : 4; pts=dts= AV_NOPTS_VALUE; header_len -= timestamp_len; if (s->is_dvd && stream->align_iframe) { pad_packet_bytes += timestamp_len; packet_size -= timestamp_len; } else { payload_size += timestamp_len; } stuffing_size += timestamp_len; if(payload_size > trailer_size) stuffing_size += payload_size - trailer_size; } if (pad_packet_bytes > 0 && pad_packet_bytes <= 7) { // can't use padding, so use stuffing packet_size += pad_packet_bytes; payload_size += pad_packet_bytes; // undo the previous adjustment if (stuffing_size < 0) { stuffing_size = pad_packet_bytes; } else { stuffing_size += pad_packet_bytes; } pad_packet_bytes = 0; } if (stuffing_size < 0) stuffing_size = 0; if (stuffing_size > 16) { /*<=16 for MPEG-1, <=32 for MPEG-2*/ pad_packet_bytes += stuffing_size; packet_size -= stuffing_size; payload_size -= stuffing_size; stuffing_size = 0; } nb_frames= get_nb_frames(ctx, stream, payload_size - stuffing_size); put_be32(ctx->pb, startcode); put_be16(ctx->pb, packet_size); if (!s->is_mpeg2) for(i=0;i<stuffing_size;i++) put_byte(ctx->pb, 0xff); if (s->is_mpeg2) { put_byte(ctx->pb, 0x80); /* mpeg2 id */ pes_flags=0; if (pts != AV_NOPTS_VALUE) { pes_flags |= 0x80; if (dts != pts) pes_flags |= 0x40; } /* Both the MPEG-2 and the SVCD standards demand that the P-STD_buffer_size field be included in the first packet of every stream. (see SVCD standard p. 26 V.2.3.1 and V.2.3.2 and MPEG-2 standard 2.7.7) */ if (stream->packet_number == 0) pes_flags |= 0x01; put_byte(ctx->pb, pes_flags); /* flags */ put_byte(ctx->pb, header_len - 3 + stuffing_size); if (pes_flags & 0x80) /*write pts*/ put_timestamp(ctx->pb, (pes_flags & 0x40) ? 0x03 : 0x02, pts); if (pes_flags & 0x40) /*write dts*/ put_timestamp(ctx->pb, 0x01, dts); if (pes_flags & 0x01) { /*write pes extension*/ put_byte(ctx->pb, 0x10); /* flags */ /* P-STD buffer info */ if ((id & 0xe0) == AUDIO_ID) put_be16(ctx->pb, 0x4000 | stream->max_buffer_size/ 128); else put_be16(ctx->pb, 0x6000 | stream->max_buffer_size/1024); } } else { if (pts != AV_NOPTS_VALUE) { if (dts != pts) { put_timestamp(ctx->pb, 0x03, pts); put_timestamp(ctx->pb, 0x01, dts); } else { put_timestamp(ctx->pb, 0x02, pts); } } else { put_byte(ctx->pb, 0x0f); } } if (s->is_mpeg2) { /* special stuffing byte that is always written to prevent accidental generation of start codes. */ put_byte(ctx->pb, 0xff); for(i=0;i<stuffing_size;i++) put_byte(ctx->pb, 0xff); } if (startcode == PRIVATE_STREAM_1) { put_byte(ctx->pb, id); if (id >= 0xa0) { /* LPCM (XXX: check nb_frames) */ put_byte(ctx->pb, 7); put_be16(ctx->pb, 4); /* skip 3 header bytes */ put_byte(ctx->pb, stream->lpcm_header[0]); put_byte(ctx->pb, stream->lpcm_header[1]); put_byte(ctx->pb, stream->lpcm_header[2]); } else if (id >= 0x40) { /* AC-3 */ put_byte(ctx->pb, nb_frames); put_be16(ctx->pb, trailer_size+1); } } /* output data */ assert(payload_size - stuffing_size <= av_fifo_size(stream->fifo)); av_fifo_generic_read(stream->fifo, ctx->pb, payload_size - stuffing_size, &put_buffer); stream->bytes_to_iframe -= payload_size - stuffing_size; }else{ payload_size= stuffing_size= 0; } if (pad_packet_bytes > 0) put_padding_packet(ctx,ctx->pb, pad_packet_bytes); for(i=0;i<zero_trail_bytes;i++) put_byte(ctx->pb, 0x00); put_flush_packet(ctx->pb); s->packet_number++; /* only increase the stream packet number if this pack actually contains something that is specific to this stream! I.e. a dedicated header or some data.*/ if (!general_pack) stream->packet_number++; return payload_size - stuffing_size; } static void put_vcd_padding_sector(AVFormatContext *ctx) { /* There are two ways to do this padding: writing a sector/pack of 0 values, or writing an MPEG padding pack. Both seem to work with most decoders, BUT the VCD standard only allows a 0-sector (see standard p. IV-4, IV-5). So a 0-sector it is...*/ MpegMuxContext *s = ctx->priv_data; int i; for(i=0;i<s->packet_size;i++) put_byte(ctx->pb, 0); s->vcd_padding_bytes_written += s->packet_size; put_flush_packet(ctx->pb); /* increasing the packet number is correct. The SCR of the following packs is calculated from the packet_number and it has to include the padding sector (it represents the sector index, not the MPEG pack index) (see VCD standard p. IV-6)*/ s->packet_number++; } #if 0 /* unused, remove? */ static int64_t get_vcd_scr(AVFormatContext *ctx,int stream_index,int64_t pts) { MpegMuxContext *s = ctx->priv_data; int64_t scr; /* Since the data delivery rate is constant, SCR is computed using the formula C + i * 1200 where C is the start constant and i is the pack index. It is recommended that SCR 0 is at the beginning of the VCD front margin (a sequence of empty Form 2 sectors on the CD). It is recommended that the front margin is 30 sectors long, so we use C = 30*1200 = 36000 (Note that even if the front margin is not 30 sectors the file will still be correct according to the standard. It just won't have the "recommended" value).*/ scr = 36000 + s->packet_number * 1200; return scr; } #endif static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){ // MpegMuxContext *s = ctx->priv_data; int i; for(i=0; i<ctx->nb_streams; i++){ AVStream *st = ctx->streams[i]; StreamInfo *stream = st->priv_data; PacketDesc *pkt_desc; while((pkt_desc= stream->predecode_packet) && scr > pkt_desc->dts){ //FIXME > vs >= if(stream->buffer_index < pkt_desc->size || stream->predecode_packet == stream->premux_packet){ av_log(ctx, AV_LOG_ERROR, "buffer underflow i=%d bufi=%d size=%d\n", i, stream->buffer_index, pkt_desc->size); break; } stream->buffer_index -= pkt_desc->size; stream->predecode_packet= pkt_desc->next; av_freep(&pkt_desc); } } return 0; } static int output_packet(AVFormatContext *ctx, int flush){ MpegMuxContext *s = ctx->priv_data; AVStream *st; StreamInfo *stream; int i, avail_space=0, es_size, trailer_size; int best_i= -1; int best_score= INT_MIN; int ignore_constraints=0; int64_t scr= s->last_scr; PacketDesc *timestamp_packet; const int64_t max_delay= av_rescale(ctx->max_delay, 90000, AV_TIME_BASE); retry: for(i=0; i<ctx->nb_streams; i++){ AVStream *st = ctx->streams[i]; StreamInfo *stream = st->priv_data; const int avail_data= av_fifo_size(stream->fifo); const int space= stream->max_buffer_size - stream->buffer_index; int rel_space= 1024*space / stream->max_buffer_size; PacketDesc *next_pkt= stream->premux_packet; /* for subtitle, a single PES packet must be generated, so we flush after every single subtitle packet */ if(s->packet_size > avail_data && !flush && st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) return 0; if(avail_data==0) continue; assert(avail_data>0); if(space < s->packet_size && !ignore_constraints) continue; if(next_pkt && next_pkt->dts - scr > max_delay) continue; if(rel_space > best_score){ best_score= rel_space; best_i = i; avail_space= space; } } if(best_i < 0){ int64_t best_dts= INT64_MAX; for(i=0; i<ctx->nb_streams; i++){ AVStream *st = ctx->streams[i]; StreamInfo *stream = st->priv_data; PacketDesc *pkt_desc= stream->predecode_packet; if(pkt_desc && pkt_desc->dts < best_dts) best_dts= pkt_desc->dts; } #if 0 av_log(ctx, AV_LOG_DEBUG, "bumping scr, scr:%f, dts:%f\n", scr/90000.0, best_dts/90000.0); #endif if(best_dts == INT64_MAX) return 0; if(scr >= best_dts+1 && !ignore_constraints){ av_log(ctx, AV_LOG_ERROR, "packet too large, ignoring buffer limits to mux it\n"); ignore_constraints= 1; } scr= FFMAX(best_dts+1, scr); if(remove_decoded_packets(ctx, scr) < 0) return -1; goto retry; } assert(best_i >= 0); st = ctx->streams[best_i]; stream = st->priv_data; assert(av_fifo_size(stream->fifo) > 0); assert(avail_space >= s->packet_size || ignore_constraints); timestamp_packet= stream->premux_packet; if(timestamp_packet->unwritten_size == timestamp_packet->size){ trailer_size= 0; }else{ trailer_size= timestamp_packet->unwritten_size; timestamp_packet= timestamp_packet->next; } if(timestamp_packet){ //av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f scr:%f stream:%d\n", timestamp_packet->dts/90000.0, timestamp_packet->pts/90000.0, scr/90000.0, best_i); es_size= flush_packet(ctx, best_i, timestamp_packet->pts, timestamp_packet->dts, scr, trailer_size); }else{ assert(av_fifo_size(stream->fifo) == trailer_size); es_size= flush_packet(ctx, best_i, AV_NOPTS_VALUE, AV_NOPTS_VALUE, scr, trailer_size); } if (s->is_vcd) { /* Write one or more padding sectors, if necessary, to reach the constant overall bitrate.*/ int vcd_pad_bytes; while((vcd_pad_bytes = get_vcd_padding_size(ctx,stream->premux_packet->pts) ) >= s->packet_size){ //FIXME pts cannot be correct here put_vcd_padding_sector(ctx); s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet } } stream->buffer_index += es_size; s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet while(stream->premux_packet && stream->premux_packet->unwritten_size <= es_size){ es_size -= stream->premux_packet->unwritten_size; stream->premux_packet= stream->premux_packet->next; } if(es_size) stream->premux_packet->unwritten_size -= es_size; if(remove_decoded_packets(ctx, s->last_scr) < 0) return -1; return 1; } static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt) { MpegMuxContext *s = ctx->priv_data; int stream_index= pkt->stream_index; int size= pkt->size; uint8_t *buf= pkt->data; AVStream *st = ctx->streams[stream_index]; StreamInfo *stream = st->priv_data; int64_t pts, dts; PacketDesc *pkt_desc; const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE); const int is_iframe = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & AV_PKT_FLAG_KEY); pts= pkt->pts; dts= pkt->dts; if(pts != AV_NOPTS_VALUE) pts += preload; if(dts != AV_NOPTS_VALUE) dts += preload; //av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f flags:%d stream:%d nopts:%d\n", dts/90000.0, pts/90000.0, pkt->flags, pkt->stream_index, pts != AV_NOPTS_VALUE); if (!stream->premux_packet) stream->next_packet = &stream->premux_packet; *stream->next_packet= pkt_desc= av_mallocz(sizeof(PacketDesc)); pkt_desc->pts= pts; pkt_desc->dts= dts; pkt_desc->unwritten_size= pkt_desc->size= size; if(!stream->predecode_packet) stream->predecode_packet= pkt_desc; stream->next_packet= &pkt_desc->next; if (av_fifo_realloc2(stream->fifo, av_fifo_size(stream->fifo) + size) < 0) return -1; if (s->is_dvd){ if (is_iframe && (s->packet_number == 0 || (pts - stream->vobu_start_pts >= 36000))) { // min VOBU length 0.4 seconds (mpucoder) stream->bytes_to_iframe = av_fifo_size(stream->fifo); stream->align_iframe = 1; stream->vobu_start_pts = pts; } } av_fifo_generic_write(stream->fifo, buf, size, NULL); for(;;){ int ret= output_packet(ctx, 0); if(ret<=0) return ret; } } static int mpeg_mux_end(AVFormatContext *ctx) { // MpegMuxContext *s = ctx->priv_data; StreamInfo *stream; int i; for(;;){ int ret= output_packet(ctx, 1); if(ret<0) return ret; else if(ret==0) break; } /* End header according to MPEG1 systems standard. We do not write it as it is usually not needed by decoders and because it complicates MPEG stream concatenation. */ //put_be32(ctx->pb, ISO_11172_END_CODE); //put_flush_packet(ctx->pb); for(i=0;i<ctx->nb_streams;i++) { stream = ctx->streams[i]->priv_data; assert(av_fifo_size(stream->fifo) == 0); av_fifo_free(stream->fifo); } return 0; } #if CONFIG_MPEG1SYSTEM_MUXER AVOutputFormat mpeg1system_muxer = { "mpeg", NULL_IF_CONFIG_SMALL("MPEG-1 System format"), "video/mpeg", "mpg,mpeg", sizeof(MpegMuxContext), CODEC_ID_MP2, CODEC_ID_MPEG1VIDEO, mpeg_mux_init, mpeg_mux_write_packet, mpeg_mux_end, }; #endif #if CONFIG_MPEG1VCD_MUXER AVOutputFormat mpeg1vcd_muxer = { "vcd", NULL_IF_CONFIG_SMALL("MPEG-1 System format (VCD)"), "video/mpeg", NULL, sizeof(MpegMuxContext), CODEC_ID_MP2, CODEC_ID_MPEG1VIDEO, mpeg_mux_init, mpeg_mux_write_packet, mpeg_mux_end, }; #endif #if CONFIG_MPEG2VOB_MUXER AVOutputFormat mpeg2vob_muxer = { "vob", NULL_IF_CONFIG_SMALL("MPEG-2 PS format (VOB)"), "video/mpeg", "vob", sizeof(MpegMuxContext), CODEC_ID_MP2, CODEC_ID_MPEG2VIDEO, mpeg_mux_init, mpeg_mux_write_packet, mpeg_mux_end, }; #endif /* Same as mpeg2vob_mux except that the pack size is 2324 */ #if CONFIG_MPEG2SVCD_MUXER AVOutputFormat mpeg2svcd_muxer = { "svcd", NULL_IF_CONFIG_SMALL("MPEG-2 PS format (VOB)"), "video/mpeg", "vob", sizeof(MpegMuxContext), CODEC_ID_MP2, CODEC_ID_MPEG2VIDEO, mpeg_mux_init, mpeg_mux_write_packet, mpeg_mux_end, }; #endif /* Same as mpeg2vob_mux except the 'is_dvd' flag is set to produce NAV pkts */ #if CONFIG_MPEG2DVD_MUXER AVOutputFormat mpeg2dvd_muxer = { "dvd", NULL_IF_CONFIG_SMALL("MPEG-2 PS format (DVD VOB)"), "video/mpeg", "dvd", sizeof(MpegMuxContext), CODEC_ID_MP2, CODEC_ID_MPEG2VIDEO, mpeg_mux_init, mpeg_mux_write_packet, mpeg_mux_end, }; #endif
123linslouis-android-video-cutter
jni/libavformat/mpegenc.c
C
asf20
42,967
/* * TTA demuxer * Copyright (c) 2006 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/get_bits.h" #include "avformat.h" #include "id3v2.h" #include "id3v1.h" typedef struct { int totalframes, currentframe; } TTAContext; static int tta_probe(AVProbeData *p) { const uint8_t *d = p->buf; if (ff_id3v2_match(d)) d += ff_id3v2_tag_len(d); if (d - p->buf >= p->buf_size) return 0; if (d[0] == 'T' && d[1] == 'T' && d[2] == 'A' && d[3] == '1') return 80; return 0; } static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap) { TTAContext *c = s->priv_data; AVStream *st; int i, channels, bps, samplerate, datalen, framelen; uint64_t framepos, start_offset; ff_id3v2_read(s); if (!av_metadata_get(s->metadata, "", NULL, AV_METADATA_IGNORE_SUFFIX)) ff_id3v1_read(s); start_offset = url_ftell(s->pb); if (get_le32(s->pb) != AV_RL32("TTA1")) return -1; // not tta file url_fskip(s->pb, 2); // FIXME: flags channels = get_le16(s->pb); bps = get_le16(s->pb); samplerate = get_le32(s->pb); if(samplerate <= 0 || samplerate > 1000000){ av_log(s, AV_LOG_ERROR, "nonsense samplerate\n"); return -1; } datalen = get_le32(s->pb); if(datalen < 0){ av_log(s, AV_LOG_ERROR, "nonsense datalen\n"); return -1; } url_fskip(s->pb, 4); // header crc framelen = samplerate*256/245; c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0); c->currentframe = 0; if(c->totalframes >= UINT_MAX/sizeof(uint32_t)){ av_log(s, AV_LOG_ERROR, "totalframes too large\n"); return -1; } st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 64, 1, samplerate); st->start_time = 0; st->duration = datalen; framepos = url_ftell(s->pb) + 4*c->totalframes + 4; for (i = 0; i < c->totalframes; i++) { uint32_t size = get_le32(s->pb); av_add_index_entry(st, framepos, i*framelen, size, 0, AVINDEX_KEYFRAME); framepos += size; } url_fskip(s->pb, 4); // seektable crc st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_TTA; st->codec->channels = channels; st->codec->sample_rate = samplerate; st->codec->bits_per_coded_sample = bps; st->codec->extradata_size = url_ftell(s->pb) - start_offset; if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){ //this check is redundant as get_buffer should fail av_log(s, AV_LOG_ERROR, "extradata_size too large\n"); return -1; } st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE); url_fseek(s->pb, start_offset, SEEK_SET); get_buffer(s->pb, st->codec->extradata, st->codec->extradata_size); return 0; } static int tta_read_packet(AVFormatContext *s, AVPacket *pkt) { TTAContext *c = s->priv_data; AVStream *st = s->streams[0]; int size, ret; // FIXME! if (c->currentframe > c->totalframes) return -1; size = st->index_entries[c->currentframe].size; ret = av_get_packet(s->pb, pkt, size); pkt->dts = st->index_entries[c->currentframe++].timestamp; return ret; } static int tta_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { TTAContext *c = s->priv_data; AVStream *st = s->streams[stream_index]; int index = av_index_search_timestamp(st, timestamp, flags); if (index < 0) return -1; c->currentframe = index; url_fseek(s->pb, st->index_entries[index].pos, SEEK_SET); return 0; } AVInputFormat tta_demuxer = { "tta", NULL_IF_CONFIG_SMALL("True Audio"), sizeof(TTAContext), tta_probe, tta_read_header, tta_read_packet, NULL, tta_read_seek, .extensions = "tta", };
123linslouis-android-video-cutter
jni/libavformat/tta.c
C
asf20
4,686
/* * copyright (c) 2007 Luca Abeni * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <string.h> #include "libavutil/avstring.h" #include "libavutil/base64.h" #include "avformat.h" #include "internal.h" #include "avc.h" #include "rtp.h" #if CONFIG_NETWORK #include "network.h" #endif #if CONFIG_RTP_MUXER #define MAX_EXTRADATA_SIZE ((INT_MAX - 10) / 2) struct sdp_session_level { int sdp_version; /**< protocol version (currently 0) */ int id; /**< session ID */ int version; /**< session version */ int start_time; /**< session start time (NTP time, in seconds), or 0 in case of permanent session */ int end_time; /**< session end time (NTP time, in seconds), or 0 if the session is not bounded */ int ttl; /**< TTL, in case of multicast stream */ const char *user; /**< username of the session's creator */ const char *src_addr; /**< IP address of the machine from which the session was created */ const char *dst_addr; /**< destination IP address (can be multicast) */ const char *name; /**< session name (can be an empty string) */ }; static void sdp_write_address(char *buff, int size, const char *dest_addr, int ttl) { if (dest_addr) { if (ttl > 0) { av_strlcatf(buff, size, "c=IN IP4 %s/%d\r\n", dest_addr, ttl); } else { av_strlcatf(buff, size, "c=IN IP4 %s\r\n", dest_addr); } } } static void sdp_write_header(char *buff, int size, struct sdp_session_level *s) { av_strlcatf(buff, size, "v=%d\r\n" "o=- %d %d IN IP4 %s\r\n" "s=%s\r\n", s->sdp_version, s->id, s->version, s->src_addr, s->name); sdp_write_address(buff, size, s->dst_addr, s->ttl); av_strlcatf(buff, size, "t=%d %d\r\n" "a=tool:libavformat " AV_STRINGIFY(LIBAVFORMAT_VERSION) "\r\n", s->start_time, s->end_time); } #if CONFIG_NETWORK static void resolve_destination(char *dest_addr, int size) { struct addrinfo hints, *ai, *cur; if (!dest_addr[0]) return; /* Resolve the destination, since it must be written * as a numeric IP address in the SDP. */ memset(&hints, 0, sizeof(hints)); /* We only support IPv4 addresses in the SDP at the moment. */ hints.ai_family = AF_INET; if (getaddrinfo(dest_addr, NULL, &hints, &ai)) return; for (cur = ai; cur; cur = cur->ai_next) { if (cur->ai_family == AF_INET) { getnameinfo(cur->ai_addr, cur->ai_addrlen, dest_addr, size, NULL, 0, NI_NUMERICHOST); break; } } freeaddrinfo(ai); } #else static void resolve_destination(char *dest_addr, int size) { } #endif static int sdp_get_address(char *dest_addr, int size, int *ttl, const char *url) { int port; const char *p; char proto[32]; ff_url_split(proto, sizeof(proto), NULL, 0, dest_addr, size, &port, NULL, 0, url); *ttl = 0; if (strcmp(proto, "rtp")) { /* The url isn't for the actual rtp sessions, * don't parse out anything else than the destination. */ return 0; } p = strchr(url, '?'); if (p) { char buff[64]; int is_multicast = find_info_tag(buff, sizeof(buff), "multicast", p); if (is_multicast) { if (find_info_tag(buff, sizeof(buff), "ttl", p)) { *ttl = strtol(buff, NULL, 10); } else { *ttl = 5; } } } return port; } #define MAX_PSET_SIZE 1024 static char *extradata2psets(AVCodecContext *c) { char *psets, *p; const uint8_t *r; const char *pset_string = "; sprop-parameter-sets="; if (c->extradata_size > MAX_EXTRADATA_SIZE) { av_log(c, AV_LOG_ERROR, "Too much extradata!\n"); return NULL; } psets = av_mallocz(MAX_PSET_SIZE); if (psets == NULL) { av_log(c, AV_LOG_ERROR, "Cannot allocate memory for the parameter sets.\n"); return NULL; } memcpy(psets, pset_string, strlen(pset_string)); p = psets + strlen(pset_string); r = ff_avc_find_startcode(c->extradata, c->extradata + c->extradata_size); while (r < c->extradata + c->extradata_size) { const uint8_t *r1; uint8_t nal_type; while (!*(r++)); nal_type = *r & 0x1f; r1 = ff_avc_find_startcode(r, c->extradata + c->extradata_size); if (nal_type != 7 && nal_type != 8) { /* Only output SPS and PPS */ r = r1; continue; } if (p != (psets + strlen(pset_string))) { *p = ','; p++; } if (av_base64_encode(p, MAX_PSET_SIZE - (p - psets), r, r1 - r) == NULL) { av_log(c, AV_LOG_ERROR, "Cannot Base64-encode %td %td!\n", MAX_PSET_SIZE - (p - psets), r1 - r); av_free(psets); return NULL; } p += strlen(p); r = r1; } return psets; } static char *extradata2config(AVCodecContext *c) { char *config; if (c->extradata_size > MAX_EXTRADATA_SIZE) { av_log(c, AV_LOG_ERROR, "Too much extradata!\n"); return NULL; } config = av_malloc(10 + c->extradata_size * 2); if (config == NULL) { av_log(c, AV_LOG_ERROR, "Cannot allocate memory for the config info.\n"); return NULL; } memcpy(config, "; config=", 9); ff_data_to_hex(config + 9, c->extradata, c->extradata_size, 0); config[9 + c->extradata_size * 2] = 0; return config; } static char *sdp_write_media_attributes(char *buff, int size, AVCodecContext *c, int payload_type) { char *config = NULL; switch (c->codec_id) { case CODEC_ID_H264: if (c->extradata_size) { config = extradata2psets(c); } av_strlcatf(buff, size, "a=rtpmap:%d H264/90000\r\n" "a=fmtp:%d packetization-mode=1%s\r\n", payload_type, payload_type, config ? config : ""); break; case CODEC_ID_H263: case CODEC_ID_H263P: av_strlcatf(buff, size, "a=rtpmap:%d H263-2000/90000\r\n", payload_type); break; case CODEC_ID_MPEG4: if (c->extradata_size) { config = extradata2config(c); } av_strlcatf(buff, size, "a=rtpmap:%d MP4V-ES/90000\r\n" "a=fmtp:%d profile-level-id=1%s\r\n", payload_type, payload_type, config ? config : ""); break; case CODEC_ID_AAC: if (c->extradata_size) { config = extradata2config(c); } else { /* FIXME: maybe we can forge config information based on the * codec parameters... */ av_log(c, AV_LOG_ERROR, "AAC with no global headers is currently not supported.\n"); return NULL; } if (config == NULL) { return NULL; } av_strlcatf(buff, size, "a=rtpmap:%d MPEG4-GENERIC/%d/%d\r\n" "a=fmtp:%d profile-level-id=1;" "mode=AAC-hbr;sizelength=13;indexlength=3;" "indexdeltalength=3%s\r\n", payload_type, c->sample_rate, c->channels, payload_type, config); break; case CODEC_ID_PCM_S16BE: if (payload_type >= RTP_PT_PRIVATE) av_strlcatf(buff, size, "a=rtpmap:%d L16/%d/%d\r\n", payload_type, c->sample_rate, c->channels); break; case CODEC_ID_PCM_MULAW: if (payload_type >= RTP_PT_PRIVATE) av_strlcatf(buff, size, "a=rtpmap:%d PCMU/%d/%d\r\n", payload_type, c->sample_rate, c->channels); break; case CODEC_ID_PCM_ALAW: if (payload_type >= RTP_PT_PRIVATE) av_strlcatf(buff, size, "a=rtpmap:%d PCMA/%d/%d\r\n", payload_type, c->sample_rate, c->channels); break; case CODEC_ID_AMR_NB: av_strlcatf(buff, size, "a=rtpmap:%d AMR/%d/%d\r\n" "a=fmtp:%d octet-align=1\r\n", payload_type, c->sample_rate, c->channels, payload_type); break; case CODEC_ID_AMR_WB: av_strlcatf(buff, size, "a=rtpmap:%d AMR-WB/%d/%d\r\n" "a=fmtp:%d octet-align=1\r\n", payload_type, c->sample_rate, c->channels, payload_type); break; default: /* Nothing special to do here... */ break; } av_free(config); return buff; } void ff_sdp_write_media(char *buff, int size, AVCodecContext *c, const char *dest_addr, int port, int ttl) { const char *type; int payload_type; payload_type = ff_rtp_get_payload_type(c); if (payload_type < 0) { payload_type = RTP_PT_PRIVATE + (c->codec_type == AVMEDIA_TYPE_AUDIO); } switch (c->codec_type) { case AVMEDIA_TYPE_VIDEO : type = "video" ; break; case AVMEDIA_TYPE_AUDIO : type = "audio" ; break; case AVMEDIA_TYPE_SUBTITLE: type = "text" ; break; default : type = "application"; break; } av_strlcatf(buff, size, "m=%s %d RTP/AVP %d\r\n", type, port, payload_type); sdp_write_address(buff, size, dest_addr, ttl); if (c->bit_rate) { av_strlcatf(buff, size, "b=AS:%d\r\n", c->bit_rate / 1000); } sdp_write_media_attributes(buff, size, c, payload_type); } int avf_sdp_create(AVFormatContext *ac[], int n_files, char *buff, int size) { AVMetadataTag *title = av_metadata_get(ac[0]->metadata, "title", NULL, 0); struct sdp_session_level s; int i, j, port, ttl; char dst[32]; memset(buff, 0, size); memset(&s, 0, sizeof(struct sdp_session_level)); s.user = "-"; s.src_addr = "127.0.0.1"; /* FIXME: Properly set this */ s.name = title ? title->value : "No Name"; port = 0; ttl = 0; if (n_files == 1) { port = sdp_get_address(dst, sizeof(dst), &ttl, ac[0]->filename); resolve_destination(dst, sizeof(dst)); if (dst[0]) { s.dst_addr = dst; s.ttl = ttl; } } sdp_write_header(buff, size, &s); dst[0] = 0; for (i = 0; i < n_files; i++) { if (n_files != 1) { port = sdp_get_address(dst, sizeof(dst), &ttl, ac[i]->filename); resolve_destination(dst, sizeof(dst)); } for (j = 0; j < ac[i]->nb_streams; j++) { ff_sdp_write_media(buff, size, ac[i]->streams[j]->codec, dst[0] ? dst : NULL, (port > 0) ? port + j * 2 : 0, ttl); if (port <= 0) { av_strlcatf(buff, size, "a=control:streamid=%d\r\n", i + j); } } } return 0; } #else int avf_sdp_create(AVFormatContext *ac[], int n_files, char *buff, int size) { return AVERROR(ENOSYS); } void ff_sdp_write_media(char *buff, int size, AVCodecContext *c, const char *dest_addr, int port, int ttl) { } #endif
123linslouis-android-video-cutter
jni/libavformat/sdp.c
C
asf20
12,748
/* * raw FLAC muxer * Copyright (C) 2009 Justin Ruggles * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_FLACENC_H #define AVFORMAT_FLACENC_H #include "libavcodec/flac.h" #include "libavcodec/bytestream.h" #include "avformat.h" int ff_flac_write_header(ByteIOContext *pb, AVCodecContext *codec, int last_block); #endif /* AVFORMAT_FLACENC_H */
123linslouis-android-video-cutter
jni/libavformat/flacenc.h
C
asf20
1,106
/* * VC1 Test Bitstreams Format Demuxer * Copyright (c) 2006, 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * VC1 test bitstream file demuxer * by Konstantin Shishkov * Format specified in SMPTE standard 421 Annex L */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define VC1_EXTRADATA_SIZE 4 static int vc1t_probe(AVProbeData *p) { if (p->buf_size < 24) return 0; if (p->buf[3] != 0xC5 || AV_RL32(&p->buf[4]) != 4 || AV_RL32(&p->buf[20]) != 0xC) return 0; return AVPROBE_SCORE_MAX/2; } static int vc1t_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; AVStream *st; int frames; uint32_t fps; frames = get_le24(pb); if(get_byte(pb) != 0xC5 || get_le32(pb) != 4) return -1; /* init video codec */ st = av_new_stream(s, 0); if (!st) return -1; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_WMV3; st->codec->extradata = av_malloc(VC1_EXTRADATA_SIZE); st->codec->extradata_size = VC1_EXTRADATA_SIZE; get_buffer(pb, st->codec->extradata, VC1_EXTRADATA_SIZE); st->codec->height = get_le32(pb); st->codec->width = get_le32(pb); if(get_le32(pb) != 0xC) return -1; url_fskip(pb, 8); fps = get_le32(pb); if(fps == 0xFFFFFFFF) av_set_pts_info(st, 32, 1, 1000); else{ if (!fps) { av_log(s, AV_LOG_ERROR, "Zero FPS specified, defaulting to 1 FPS\n"); fps = 1; } av_set_pts_info(st, 24, 1, fps); st->duration = frames; } return 0; } static int vc1t_read_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; int frame_size; int keyframe = 0; uint32_t pts; if(url_feof(pb)) return AVERROR(EIO); frame_size = get_le24(pb); if(get_byte(pb) & 0x80) keyframe = 1; pts = get_le32(pb); if(av_get_packet(pb, pkt, frame_size) < 0) return AVERROR(EIO); if(s->streams[0]->time_base.den == 1000) pkt->pts = pts; pkt->flags |= keyframe ? AV_PKT_FLAG_KEY : 0; pkt->pos -= 8; return pkt->size; } AVInputFormat vc1t_demuxer = { "vc1test", NULL_IF_CONFIG_SMALL("VC-1 test bitstream format"), 0, vc1t_probe, vc1t_read_header, vc1t_read_packet, .flags = AVFMT_GENERIC_INDEX, };
123linslouis-android-video-cutter
jni/libavformat/vc1test.c
C
asf20
3,190
/* * ARMovie/RPL demuxer * Copyright (c) 2007 Christian Ohm, 2008 Eli Friedman * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/avstring.h" #include "avformat.h" #include <stdlib.h> #define RPL_SIGNATURE "ARMovie\x0A" #define RPL_SIGNATURE_SIZE 8 /** 256 is arbitrary, but should be big enough for any reasonable file. */ #define RPL_LINE_LENGTH 256 static int rpl_probe(AVProbeData *p) { if (memcmp(p->buf, RPL_SIGNATURE, RPL_SIGNATURE_SIZE)) return 0; return AVPROBE_SCORE_MAX; } typedef struct RPLContext { // RPL header data int32_t frames_per_chunk; // Stream position data uint32_t chunk_number; uint32_t chunk_part; uint32_t frame_in_part; } RPLContext; static int read_line(ByteIOContext * pb, char* line, int bufsize) { int i; for (i = 0; i < bufsize - 1; i++) { int b = get_byte(pb); if (b == 0) break; if (b == '\n') { line[i] = '\0'; return 0; } line[i] = b; } line[i] = '\0'; return -1; } static int32_t read_int(const char* line, const char** endptr, int* error) { unsigned long result = 0; for (; *line>='0' && *line<='9'; line++) { if (result > (0x7FFFFFFF - 9) / 10) *error = -1; result = 10 * result + *line - '0'; } *endptr = line; return result; } static int32_t read_line_and_int(ByteIOContext * pb, int* error) { char line[RPL_LINE_LENGTH]; const char *endptr; *error |= read_line(pb, line, sizeof(line)); return read_int(line, &endptr, error); } /** Parsing for fps, which can be a fraction. Unfortunately, * the spec for the header leaves out a lot of details, * so this is mostly guessing. */ static AVRational read_fps(const char* line, int* error) { int64_t num, den = 1; AVRational result; num = read_int(line, &line, error); if (*line == '.') line++; for (; *line>='0' && *line<='9'; line++) { // Truncate any numerator too large to fit into an int64_t if (num > (INT64_MAX - 9) / 10 || den > INT64_MAX / 10) break; num = 10 * num + *line - '0'; den *= 10; } if (!num) *error = -1; av_reduce(&result.num, &result.den, num, den, 0x7FFFFFFF); return result; } static int rpl_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; RPLContext *rpl = s->priv_data; AVStream *vst = NULL, *ast = NULL; int total_audio_size; int error = 0; uint32_t i; int32_t audio_format, chunk_catalog_offset, number_of_chunks; AVRational fps; char line[RPL_LINE_LENGTH]; // The header for RPL/ARMovie files is 21 lines of text // containing the various header fields. The fields are always // in the same order, and other text besides the first // number usually isn't important. // (The spec says that there exists some significance // for the text in a few cases; samples needed.) error |= read_line(pb, line, sizeof(line)); // ARMovie error |= read_line(pb, line, sizeof(line)); // movie name av_metadata_set2(&s->metadata, "title" , line, 0); error |= read_line(pb, line, sizeof(line)); // date/copyright av_metadata_set2(&s->metadata, "copyright", line, 0); error |= read_line(pb, line, sizeof(line)); // author and other av_metadata_set2(&s->metadata, "author" , line, 0); // video headers vst = av_new_stream(s, 0); if (!vst) return AVERROR(ENOMEM); vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_tag = read_line_and_int(pb, &error); // video format vst->codec->width = read_line_and_int(pb, &error); // video width vst->codec->height = read_line_and_int(pb, &error); // video height vst->codec->bits_per_coded_sample = read_line_and_int(pb, &error); // video bits per sample error |= read_line(pb, line, sizeof(line)); // video frames per second fps = read_fps(line, &error); av_set_pts_info(vst, 32, fps.den, fps.num); // Figure out the video codec switch (vst->codec->codec_tag) { #if 0 case 122: vst->codec->codec_id = CODEC_ID_ESCAPE122; break; #endif case 124: vst->codec->codec_id = CODEC_ID_ESCAPE124; // The header is wrong here, at least sometimes vst->codec->bits_per_coded_sample = 16; break; #if 0 case 130: vst->codec->codec_id = CODEC_ID_ESCAPE130; break; #endif default: av_log(s, AV_LOG_WARNING, "RPL video format %i not supported yet!\n", vst->codec->codec_tag); vst->codec->codec_id = CODEC_ID_NONE; } // Audio headers // ARMovie supports multiple audio tracks; I don't have any // samples, though. This code will ignore additional tracks. audio_format = read_line_and_int(pb, &error); // audio format ID if (audio_format) { ast = av_new_stream(s, 0); if (!ast) return AVERROR(ENOMEM); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_tag = audio_format; ast->codec->sample_rate = read_line_and_int(pb, &error); // audio bitrate ast->codec->channels = read_line_and_int(pb, &error); // number of audio channels ast->codec->bits_per_coded_sample = read_line_and_int(pb, &error); // audio bits per sample // At least one sample uses 0 for ADPCM, which is really 4 bits // per sample. if (ast->codec->bits_per_coded_sample == 0) ast->codec->bits_per_coded_sample = 4; ast->codec->bit_rate = ast->codec->sample_rate * ast->codec->bits_per_coded_sample * ast->codec->channels; ast->codec->codec_id = CODEC_ID_NONE; switch (audio_format) { case 1: if (ast->codec->bits_per_coded_sample == 16) { // 16-bit audio is always signed ast->codec->codec_id = CODEC_ID_PCM_S16LE; break; } // There are some other formats listed as legal per the spec; // samples needed. break; case 101: if (ast->codec->bits_per_coded_sample == 8) { // The samples with this kind of audio that I have // are all unsigned. ast->codec->codec_id = CODEC_ID_PCM_U8; break; } else if (ast->codec->bits_per_coded_sample == 4) { ast->codec->codec_id = CODEC_ID_ADPCM_IMA_EA_SEAD; break; } break; } if (ast->codec->codec_id == CODEC_ID_NONE) { av_log(s, AV_LOG_WARNING, "RPL audio format %i not supported yet!\n", audio_format); } av_set_pts_info(ast, 32, 1, ast->codec->bit_rate); } else { for (i = 0; i < 3; i++) error |= read_line(pb, line, sizeof(line)); } rpl->frames_per_chunk = read_line_and_int(pb, &error); // video frames per chunk if (rpl->frames_per_chunk > 1 && vst->codec->codec_tag != 124) av_log(s, AV_LOG_WARNING, "Don't know how to split frames for video format %i. " "Video stream will be broken!\n", vst->codec->codec_tag); number_of_chunks = read_line_and_int(pb, &error); // number of chunks in the file // The number in the header is actually the index of the last chunk. number_of_chunks++; error |= read_line(pb, line, sizeof(line)); // "even" chunk size in bytes error |= read_line(pb, line, sizeof(line)); // "odd" chunk size in bytes chunk_catalog_offset = // offset of the "chunk catalog" read_line_and_int(pb, &error); // (file index) error |= read_line(pb, line, sizeof(line)); // offset to "helpful" sprite error |= read_line(pb, line, sizeof(line)); // size of "helpful" sprite error |= read_line(pb, line, sizeof(line)); // offset to key frame list // Read the index url_fseek(pb, chunk_catalog_offset, SEEK_SET); total_audio_size = 0; for (i = 0; i < number_of_chunks; i++) { int64_t offset, video_size, audio_size; error |= read_line(pb, line, sizeof(line)); if (3 != sscanf(line, "%"PRId64" , %"PRId64" ; %"PRId64, &offset, &video_size, &audio_size)) error = -1; av_add_index_entry(vst, offset, i * rpl->frames_per_chunk, video_size, rpl->frames_per_chunk, 0); if (ast) av_add_index_entry(ast, offset + video_size, total_audio_size, audio_size, audio_size * 8, 0); total_audio_size += audio_size * 8; } if (error) return AVERROR(EIO); return 0; } static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt) { RPLContext *rpl = s->priv_data; ByteIOContext *pb = s->pb; AVStream* stream; AVIndexEntry* index_entry; uint32_t ret; if (rpl->chunk_part == s->nb_streams) { rpl->chunk_number++; rpl->chunk_part = 0; } stream = s->streams[rpl->chunk_part]; if (rpl->chunk_number >= stream->nb_index_entries) return -1; index_entry = &stream->index_entries[rpl->chunk_number]; if (rpl->frame_in_part == 0) if (url_fseek(pb, index_entry->pos, SEEK_SET) < 0) return AVERROR(EIO); if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO && stream->codec->codec_tag == 124) { // We have to split Escape 124 frames because there are // multiple frames per chunk in Escape 124 samples. uint32_t frame_size, frame_flags; frame_flags = get_le32(pb); frame_size = get_le32(pb); if (url_fseek(pb, -8, SEEK_CUR) < 0) return AVERROR(EIO); ret = av_get_packet(pb, pkt, frame_size); if (ret != frame_size) { av_free_packet(pkt); return AVERROR(EIO); } pkt->duration = 1; pkt->pts = index_entry->timestamp + rpl->frame_in_part; pkt->stream_index = rpl->chunk_part; rpl->frame_in_part++; if (rpl->frame_in_part == rpl->frames_per_chunk) { rpl->frame_in_part = 0; rpl->chunk_part++; } } else { ret = av_get_packet(pb, pkt, index_entry->size); if (ret != index_entry->size) { av_free_packet(pkt); return AVERROR(EIO); } if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) { // frames_per_chunk should always be one here; the header // parsing will warn if it isn't. pkt->duration = rpl->frames_per_chunk; } else { // All the audio codecs supported in this container // (at least so far) are constant-bitrate. pkt->duration = ret * 8; } pkt->pts = index_entry->timestamp; pkt->stream_index = rpl->chunk_part; rpl->chunk_part++; } // None of the Escape formats have keyframes, and the ADPCM // format used doesn't have keyframes. if (rpl->chunk_number == 0 && rpl->frame_in_part == 0) pkt->flags |= AV_PKT_FLAG_KEY; return ret; } AVInputFormat rpl_demuxer = { "rpl", NULL_IF_CONFIG_SMALL("RPL/ARMovie format"), sizeof(RPLContext), rpl_probe, rpl_read_header, rpl_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/rpl.c
C
asf20
12,452
/* * Copyright (C) 2010 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/bytestream.h" #include "avformat.h" #include "oggdec.h" static int skeleton_header(AVFormatContext *s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; uint8_t *buf = os->buf + os->pstart; int version_major, version_minor; int64_t start_num, start_den, start_granule; int target_idx, start_time; strcpy(st->codec->codec_name, "skeleton"); st->codec->codec_type = AVMEDIA_TYPE_DATA; if (os->psize < 8) return -1; if (!strncmp(buf, "fishead", 8)) { if (os->psize < 64) return -1; version_major = AV_RL16(buf+8); version_minor = AV_RL16(buf+10); if (version_major != 3) { av_log(s, AV_LOG_WARNING, "Unknown skeleton version %d.%d\n", version_major, version_minor); return -1; } // This is the overall start time. We use it for the start time of // of the skeleton stream since if left unset lavf assumes 0, // which we don't want since skeleton is timeless // FIXME: the real meaning of this field is "start playback at // this time which can be in the middle of a packet start_num = AV_RL64(buf+12); start_den = AV_RL64(buf+20); if (start_den) { av_reduce(&start_time, &st->time_base.den, start_num, start_den, INT_MAX); st->time_base.num = 1; os->lastpts = st->start_time = start_time; } } else if (!strncmp(buf, "fisbone", 8)) { if (os->psize < 52) return -1; target_idx = ogg_find_stream(ogg, AV_RL32(buf+12)); start_granule = AV_RL64(buf+36); if (target_idx >= 0 && start_granule != -1) { ogg->streams[target_idx].lastpts = s->streams[target_idx]->start_time = ogg_gptopts(s, target_idx, start_granule, NULL); } } return 1; } const struct ogg_codec ff_skeleton_codec = { .magic = "fishead", .magicsize = 8, .header = skeleton_header, };
123linslouis-android-video-cutter
jni/libavformat/oggparseskeleton.c
C
asf20
2,910
/* * id Quake II CIN File Demuxer * Copyright (c) 2003 The ffmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * id Quake II CIN file demuxer by Mike Melanson (melanson@pcisys.net) * For more information about the id CIN format, visit: * http://www.csse.monash.edu.au/~timf/ * * CIN is a somewhat quirky and ill-defined format. Here are some notes * for anyone trying to understand the technical details of this format: * * The format has no definite file signature. This is problematic for a * general-purpose media player that wants to automatically detect file * types. However, a CIN file does start with 5 32-bit numbers that * specify audio and video parameters. This demuxer gets around the lack * of file signature by performing sanity checks on those parameters. * Probabalistically, this is a reasonable solution since the number of * valid combinations of the 5 parameters is a very small subset of the * total 160-bit number space. * * Refer to the function idcin_probe() for the precise A/V parameters * that this demuxer allows. * * Next, each audio and video frame has a duration of 1/14 sec. If the * audio sample rate is a multiple of the common frequency 22050 Hz it will * divide evenly by 14. However, if the sample rate is 11025 Hz: * 11025 (samples/sec) / 14 (frames/sec) = 787.5 (samples/frame) * The way the CIN stores audio in this case is by storing 787 sample * frames in the first audio frame and 788 sample frames in the second * audio frame. Therefore, the total number of bytes in an audio frame * is given as: * audio frame #0: 787 * (bytes/sample) * (# channels) bytes in frame * audio frame #1: 788 * (bytes/sample) * (# channels) bytes in frame * audio frame #2: 787 * (bytes/sample) * (# channels) bytes in frame * audio frame #3: 788 * (bytes/sample) * (# channels) bytes in frame * * Finally, not all id CIN creation tools agree on the resolution of the * color palette, apparently. Some creation tools specify red, green, and * blue palette components in terms of 6-bit VGA color DAC values which * range from 0..63. Other tools specify the RGB components as full 8-bit * values that range from 0..255. Since there are no markers in the file to * differentiate between the two variants, this demuxer uses the following * heuristic: * - load the 768 palette bytes from disk * - assume that they will need to be shifted left by 2 bits to * transform them from 6-bit values to 8-bit values * - scan through all 768 palette bytes * - if any bytes exceed 63, do not shift the bytes at all before * transmitting them to the video decoder */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define HUFFMAN_TABLE_SIZE (64 * 1024) #define IDCIN_FPS 14 typedef struct IdcinDemuxContext { int video_stream_index; int audio_stream_index; int audio_chunk_size1; int audio_chunk_size2; /* demux state variables */ int current_audio_chunk; int next_chunk_is_video; int audio_present; int64_t pts; AVPaletteControl palctrl; } IdcinDemuxContext; static int idcin_probe(AVProbeData *p) { unsigned int number; /* * This is what you could call a "probabilistic" file check: id CIN * files don't have a definite file signature. In lieu of such a marker, * perform sanity checks on the 5 32-bit header fields: * width, height: greater than 0, less than or equal to 1024 * audio sample rate: greater than or equal to 8000, less than or * equal to 48000, or 0 for no audio * audio sample width (bytes/sample): 0 for no audio, or 1 or 2 * audio channels: 0 for no audio, or 1 or 2 */ /* check we have enough data to do all checks, otherwise the 0-padding may cause a wrong recognition */ if (p->buf_size < 20) return 0; /* check the video width */ number = AV_RL32(&p->buf[0]); if ((number == 0) || (number > 1024)) return 0; /* check the video height */ number = AV_RL32(&p->buf[4]); if ((number == 0) || (number > 1024)) return 0; /* check the audio sample rate */ number = AV_RL32(&p->buf[8]); if ((number != 0) && ((number < 8000) | (number > 48000))) return 0; /* check the audio bytes/sample */ number = AV_RL32(&p->buf[12]); if (number > 2) return 0; /* check the audio channels */ number = AV_RL32(&p->buf[16]); if (number > 2) return 0; /* return half certainly since this check is a bit sketchy */ return AVPROBE_SCORE_MAX / 2; } static int idcin_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; IdcinDemuxContext *idcin = s->priv_data; AVStream *st; unsigned int width, height; unsigned int sample_rate, bytes_per_sample, channels; /* get the 5 header parameters */ width = get_le32(pb); height = get_le32(pb); sample_rate = get_le32(pb); bytes_per_sample = get_le32(pb); channels = get_le32(pb); st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, IDCIN_FPS); idcin->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_IDCIN; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = width; st->codec->height = height; /* load up the Huffman tables into extradata */ st->codec->extradata_size = HUFFMAN_TABLE_SIZE; st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE); if (get_buffer(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) != HUFFMAN_TABLE_SIZE) return AVERROR(EIO); /* save a reference in order to transport the palette */ st->codec->palctrl = &idcin->palctrl; /* if sample rate is 0, assume no audio */ if (sample_rate) { idcin->audio_present = 1; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, IDCIN_FPS); idcin->audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 1; st->codec->channels = channels; st->codec->sample_rate = sample_rate; st->codec->bits_per_coded_sample = bytes_per_sample * 8; st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels; st->codec->block_align = bytes_per_sample * channels; if (bytes_per_sample == 1) st->codec->codec_id = CODEC_ID_PCM_U8; else st->codec->codec_id = CODEC_ID_PCM_S16LE; if (sample_rate % 14 != 0) { idcin->audio_chunk_size1 = (sample_rate / 14) * bytes_per_sample * channels; idcin->audio_chunk_size2 = (sample_rate / 14 + 1) * bytes_per_sample * channels; } else { idcin->audio_chunk_size1 = idcin->audio_chunk_size2 = (sample_rate / 14) * bytes_per_sample * channels; } idcin->current_audio_chunk = 0; } else idcin->audio_present = 1; idcin->next_chunk_is_video = 1; idcin->pts = 0; return 0; } static int idcin_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret; unsigned int command; unsigned int chunk_size; IdcinDemuxContext *idcin = s->priv_data; ByteIOContext *pb = s->pb; int i; int palette_scale; unsigned char r, g, b; unsigned char palette_buffer[768]; if (url_feof(s->pb)) return AVERROR(EIO); if (idcin->next_chunk_is_video) { command = get_le32(pb); if (command == 2) { return AVERROR(EIO); } else if (command == 1) { /* trigger a palette change */ idcin->palctrl.palette_changed = 1; if (get_buffer(pb, palette_buffer, 768) != 768) return AVERROR(EIO); /* scale the palette as necessary */ palette_scale = 2; for (i = 0; i < 768; i++) if (palette_buffer[i] > 63) { palette_scale = 0; break; } for (i = 0; i < 256; i++) { r = palette_buffer[i * 3 ] << palette_scale; g = palette_buffer[i * 3 + 1] << palette_scale; b = palette_buffer[i * 3 + 2] << palette_scale; idcin->palctrl.palette[i] = (r << 16) | (g << 8) | (b); } } chunk_size = get_le32(pb); /* skip the number of decoded bytes (always equal to width * height) */ url_fseek(pb, 4, SEEK_CUR); chunk_size -= 4; ret= av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = idcin->video_stream_index; pkt->pts = idcin->pts; } else { /* send out the audio chunk */ if (idcin->current_audio_chunk) chunk_size = idcin->audio_chunk_size2; else chunk_size = idcin->audio_chunk_size1; ret= av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = idcin->audio_stream_index; pkt->pts = idcin->pts; idcin->current_audio_chunk ^= 1; idcin->pts++; } if (idcin->audio_present) idcin->next_chunk_is_video ^= 1; return ret; } AVInputFormat idcin_demuxer = { "idcin", NULL_IF_CONFIG_SMALL("id Cinematic format"), sizeof(IdcinDemuxContext), idcin_probe, idcin_read_header, idcin_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/idcin.c
C
asf20
10,359
/* * Cyril Comparon, Larbi Joubala, Resonate-MP4 2009 * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avlanguage.h" #include "libavutil/avstring.h" #include <stdlib.h> #include <stdint.h> #include <string.h> typedef struct LangEntry { const char str[4]; uint16_t next_equivalent; } LangEntry; static const uint16_t lang_table_counts[] = { 484, 20, 184 }; static const uint16_t lang_table_offsets[] = { 0, 484, 504 }; static const LangEntry lang_table[] = { /*----- AV_LANG_ISO639_2_BIBL entries (484) -----*/ /*0000*/ { "aar", 504 }, /*0001*/ { "abk", 505 }, /*0002*/ { "ace", 2 }, /*0003*/ { "ach", 3 }, /*0004*/ { "ada", 4 }, /*0005*/ { "ady", 5 }, /*0006*/ { "afa", 6 }, /*0007*/ { "afh", 7 }, /*0008*/ { "afr", 507 }, /*0009*/ { "ain", 9 }, /*0010*/ { "aka", 508 }, /*0011*/ { "akk", 11 }, /*0012*/ { "alb", 502 }, /*0013*/ { "ale", 13 }, /*0014*/ { "alg", 14 }, /*0015*/ { "alt", 15 }, /*0016*/ { "amh", 509 }, /*0017*/ { "ang", 17 }, /*0018*/ { "anp", 18 }, /*0019*/ { "apa", 19 }, /*0020*/ { "ara", 511 }, /*0021*/ { "arc", 21 }, /*0022*/ { "arg", 510 }, /*0023*/ { "arm", 492 }, /*0024*/ { "arn", 24 }, /*0025*/ { "arp", 25 }, /*0026*/ { "art", 26 }, /*0027*/ { "arw", 27 }, /*0028*/ { "asm", 512 }, /*0029*/ { "ast", 29 }, /*0030*/ { "ath", 30 }, /*0031*/ { "aus", 31 }, /*0032*/ { "ava", 513 }, /*0033*/ { "ave", 506 }, /*0034*/ { "awa", 34 }, /*0035*/ { "aym", 514 }, /*0036*/ { "aze", 515 }, /*0037*/ { "bad", 37 }, /*0038*/ { "bai", 38 }, /*0039*/ { "bak", 516 }, /*0040*/ { "bal", 40 }, /*0041*/ { "bam", 521 }, /*0042*/ { "ban", 42 }, /*0043*/ { "baq", 489 }, /*0044*/ { "bas", 44 }, /*0045*/ { "bat", 45 }, /*0046*/ { "bej", 46 }, /*0047*/ { "bel", 517 }, /*0048*/ { "bem", 48 }, /*0049*/ { "ben", 522 }, /*0050*/ { "ber", 50 }, /*0051*/ { "bho", 51 }, /*0052*/ { "bih", 519 }, /*0053*/ { "bik", 53 }, /*0054*/ { "bin", 54 }, /*0055*/ { "bis", 520 }, /*0056*/ { "bla", 56 }, /*0057*/ { "bnt", 57 }, /*0058*/ { "bos", 525 }, /*0059*/ { "bra", 59 }, /*0060*/ { "bre", 524 }, /*0061*/ { "btk", 61 }, /*0062*/ { "bua", 62 }, /*0063*/ { "bug", 63 }, /*0064*/ { "bul", 518 }, /*0065*/ { "bur", 498 }, /*0066*/ { "byn", 66 }, /*0067*/ { "cad", 67 }, /*0068*/ { "cai", 68 }, /*0069*/ { "car", 69 }, /*0070*/ { "cat", 526 }, /*0071*/ { "cau", 71 }, /*0072*/ { "ceb", 72 }, /*0073*/ { "cel", 73 }, /*0074*/ { "cha", 528 }, /*0075*/ { "chb", 75 }, /*0076*/ { "che", 527 }, /*0077*/ { "chg", 77 }, /*0078*/ { "chi", 503 }, /*0079*/ { "chk", 79 }, /*0080*/ { "chm", 80 }, /*0081*/ { "chn", 81 }, /*0082*/ { "cho", 82 }, /*0083*/ { "chp", 83 }, /*0084*/ { "chr", 84 }, /*0085*/ { "chu", 532 }, /*0086*/ { "chv", 533 }, /*0087*/ { "chy", 87 }, /*0088*/ { "cmc", 88 }, /*0089*/ { "cop", 89 }, /*0090*/ { "cor", 593 }, /*0091*/ { "cos", 529 }, /*0092*/ { "cpe", 92 }, /*0093*/ { "cpf", 93 }, /*0094*/ { "cpp", 94 }, /*0095*/ { "cre", 530 }, /*0096*/ { "crh", 96 }, /*0097*/ { "crp", 97 }, /*0098*/ { "csb", 98 }, /*0099*/ { "cus", 99 }, /*0100*/ { "cze", 485 }, /*0101*/ { "dak", 101 }, /*0102*/ { "dan", 535 }, /*0103*/ { "dar", 103 }, /*0104*/ { "day", 104 }, /*0105*/ { "del", 105 }, /*0106*/ { "den", 106 }, /*0107*/ { "dgr", 107 }, /*0108*/ { "din", 108 }, /*0109*/ { "div", 537 }, /*0110*/ { "doi", 110 }, /*0111*/ { "dra", 111 }, /*0112*/ { "dsb", 112 }, /*0113*/ { "dua", 113 }, /*0114*/ { "dum", 114 }, /*0115*/ { "dut", 499 }, /*0116*/ { "dyu", 116 }, /*0117*/ { "dzo", 538 }, /*0118*/ { "efi", 118 }, /*0119*/ { "egy", 119 }, /*0120*/ { "eka", 120 }, /*0121*/ { "elx", 121 }, /*0122*/ { "eng", 541 }, /*0123*/ { "enm", 123 }, /*0124*/ { "epo", 542 }, /*0125*/ { "est", 544 }, /*0126*/ { "ewe", 539 }, /*0127*/ { "ewo", 127 }, /*0128*/ { "fan", 128 }, /*0129*/ { "fao", 550 }, /*0130*/ { "fat", 130 }, /*0131*/ { "fij", 549 }, /*0132*/ { "fil", 132 }, /*0133*/ { "fin", 548 }, /*0134*/ { "fiu", 134 }, /*0135*/ { "fon", 135 }, /*0136*/ { "fre", 491 }, /*0137*/ { "frm", 137 }, /*0138*/ { "fro", 138 }, /*0139*/ { "frr", 139 }, /*0140*/ { "frs", 140 }, /*0141*/ { "fry", 552 }, /*0142*/ { "ful", 547 }, /*0143*/ { "fur", 143 }, /*0144*/ { "gaa", 144 }, /*0145*/ { "gay", 145 }, /*0146*/ { "gba", 146 }, /*0147*/ { "gem", 147 }, /*0148*/ { "geo", 494 }, /*0149*/ { "ger", 487 }, /*0150*/ { "gez", 150 }, /*0151*/ { "gil", 151 }, /*0152*/ { "gla", 554 }, /*0153*/ { "gle", 553 }, /*0154*/ { "glg", 555 }, /*0155*/ { "glv", 558 }, /*0156*/ { "gmh", 156 }, /*0157*/ { "goh", 157 }, /*0158*/ { "gon", 158 }, /*0159*/ { "gor", 159 }, /*0160*/ { "got", 160 }, /*0161*/ { "grb", 161 }, /*0162*/ { "grc", 162 }, /*0163*/ { "gre", 488 }, /*0164*/ { "grn", 556 }, /*0165*/ { "gsw", 165 }, /*0166*/ { "guj", 557 }, /*0167*/ { "gwi", 167 }, /*0168*/ { "hai", 168 }, /*0169*/ { "hat", 564 }, /*0170*/ { "hau", 559 }, /*0171*/ { "haw", 171 }, /*0172*/ { "heb", 560 }, /*0173*/ { "her", 567 }, /*0174*/ { "hil", 174 }, /*0175*/ { "him", 175 }, /*0176*/ { "hin", 561 }, /*0177*/ { "hit", 177 }, /*0178*/ { "hmn", 178 }, /*0179*/ { "hmo", 562 }, /*0180*/ { "hrv", 563 }, /*0181*/ { "hsb", 181 }, /*0182*/ { "hun", 565 }, /*0183*/ { "hup", 183 }, /*0184*/ { "iba", 184 }, /*0185*/ { "ibo", 571 }, /*0186*/ { "ice", 493 }, /*0187*/ { "ido", 574 }, /*0188*/ { "iii", 572 }, /*0189*/ { "ijo", 189 }, /*0190*/ { "iku", 577 }, /*0191*/ { "ile", 570 }, /*0192*/ { "ilo", 192 }, /*0193*/ { "ina", 568 }, /*0194*/ { "inc", 194 }, /*0195*/ { "ind", 569 }, /*0196*/ { "ine", 196 }, /*0197*/ { "inh", 197 }, /*0198*/ { "ipk", 573 }, /*0199*/ { "ira", 199 }, /*0200*/ { "iro", 200 }, /*0201*/ { "ita", 576 }, /*0202*/ { "jav", 579 }, /*0203*/ { "jbo", 203 }, /*0204*/ { "jpn", 578 }, /*0205*/ { "jpr", 205 }, /*0206*/ { "jrb", 206 }, /*0207*/ { "kaa", 207 }, /*0208*/ { "kab", 208 }, /*0209*/ { "kac", 209 }, /*0210*/ { "kal", 585 }, /*0211*/ { "kam", 211 }, /*0212*/ { "kan", 587 }, /*0213*/ { "kar", 213 }, /*0214*/ { "kas", 590 }, /*0215*/ { "kau", 589 }, /*0216*/ { "kaw", 216 }, /*0217*/ { "kaz", 584 }, /*0218*/ { "kbd", 218 }, /*0219*/ { "kha", 219 }, /*0220*/ { "khi", 220 }, /*0221*/ { "khm", 586 }, /*0222*/ { "kho", 222 }, /*0223*/ { "kik", 582 }, /*0224*/ { "kin", 640 }, /*0225*/ { "kir", 594 }, /*0226*/ { "kmb", 226 }, /*0227*/ { "kok", 227 }, /*0228*/ { "kom", 592 }, /*0229*/ { "kon", 581 }, /*0230*/ { "kor", 588 }, /*0231*/ { "kos", 231 }, /*0232*/ { "kpe", 232 }, /*0233*/ { "krc", 233 }, /*0234*/ { "krl", 234 }, /*0235*/ { "kro", 235 }, /*0236*/ { "kru", 236 }, /*0237*/ { "kua", 583 }, /*0238*/ { "kum", 238 }, /*0239*/ { "kur", 591 }, /*0240*/ { "kut", 240 }, /*0241*/ { "lad", 241 }, /*0242*/ { "lah", 242 }, /*0243*/ { "lam", 243 }, /*0244*/ { "lao", 600 }, /*0245*/ { "lat", 595 }, /*0246*/ { "lav", 603 }, /*0247*/ { "lez", 247 }, /*0248*/ { "lim", 598 }, /*0249*/ { "lin", 599 }, /*0250*/ { "lit", 601 }, /*0251*/ { "lol", 251 }, /*0252*/ { "loz", 252 }, /*0253*/ { "ltz", 596 }, /*0254*/ { "lua", 254 }, /*0255*/ { "lub", 602 }, /*0256*/ { "lug", 597 }, /*0257*/ { "lui", 257 }, /*0258*/ { "lun", 258 }, /*0259*/ { "luo", 259 }, /*0260*/ { "lus", 260 }, /*0261*/ { "mac", 495 }, /*0262*/ { "mad", 262 }, /*0263*/ { "mag", 263 }, /*0264*/ { "mah", 605 }, /*0265*/ { "mai", 265 }, /*0266*/ { "mak", 266 }, /*0267*/ { "mal", 608 }, /*0268*/ { "man", 268 }, /*0269*/ { "mao", 496 }, /*0270*/ { "map", 270 }, /*0271*/ { "mar", 610 }, /*0272*/ { "mas", 272 }, /*0273*/ { "may", 497 }, /*0274*/ { "mdf", 274 }, /*0275*/ { "mdr", 275 }, /*0276*/ { "men", 276 }, /*0277*/ { "mga", 277 }, /*0278*/ { "mic", 278 }, /*0279*/ { "min", 279 }, /*0280*/ { "mis", 280 }, /*0281*/ { "mkh", 281 }, /*0282*/ { "mlg", 604 }, /*0283*/ { "mlt", 612 }, /*0284*/ { "mnc", 284 }, /*0285*/ { "mni", 285 }, /*0286*/ { "mno", 286 }, /*0287*/ { "moh", 287 }, /*0288*/ { "mon", 609 }, /*0289*/ { "mos", 289 }, /*0290*/ { "mul", 290 }, /*0291*/ { "mun", 291 }, /*0292*/ { "mus", 292 }, /*0293*/ { "mwl", 293 }, /*0294*/ { "mwr", 294 }, /*0295*/ { "myn", 295 }, /*0296*/ { "myv", 296 }, /*0297*/ { "nah", 297 }, /*0298*/ { "nai", 298 }, /*0299*/ { "nap", 299 }, /*0300*/ { "nau", 614 }, /*0301*/ { "nav", 623 }, /*0302*/ { "nbl", 622 }, /*0303*/ { "nde", 616 }, /*0304*/ { "ndo", 618 }, /*0305*/ { "nds", 305 }, /*0306*/ { "nep", 617 }, /*0307*/ { "new", 307 }, /*0308*/ { "nia", 308 }, /*0309*/ { "nic", 309 }, /*0310*/ { "niu", 310 }, /*0311*/ { "nno", 620 }, /*0312*/ { "nob", 615 }, /*0313*/ { "nog", 313 }, /*0314*/ { "non", 314 }, /*0315*/ { "nor", 621 }, /*0316*/ { "nqo", 316 }, /*0317*/ { "nso", 317 }, /*0318*/ { "nub", 318 }, /*0319*/ { "nwc", 319 }, /*0320*/ { "nya", 624 }, /*0321*/ { "nym", 321 }, /*0322*/ { "nyn", 322 }, /*0323*/ { "nyo", 323 }, /*0324*/ { "nzi", 324 }, /*0325*/ { "oci", 625 }, /*0326*/ { "oji", 626 }, /*0327*/ { "ori", 628 }, /*0328*/ { "orm", 627 }, /*0329*/ { "osa", 329 }, /*0330*/ { "oss", 629 }, /*0331*/ { "ota", 331 }, /*0332*/ { "oto", 332 }, /*0333*/ { "paa", 333 }, /*0334*/ { "pag", 334 }, /*0335*/ { "pal", 335 }, /*0336*/ { "pam", 336 }, /*0337*/ { "pan", 630 }, /*0338*/ { "pap", 338 }, /*0339*/ { "pau", 339 }, /*0340*/ { "peo", 340 }, /*0341*/ { "per", 490 }, /*0342*/ { "phi", 342 }, /*0343*/ { "phn", 343 }, /*0344*/ { "pli", 631 }, /*0345*/ { "pol", 632 }, /*0346*/ { "pon", 346 }, /*0347*/ { "por", 634 }, /*0348*/ { "pra", 348 }, /*0349*/ { "pro", 349 }, /*0350*/ { "pus", 633 }, /*0351*/ { "que", 635 }, /*0352*/ { "raj", 352 }, /*0353*/ { "rap", 353 }, /*0354*/ { "rar", 354 }, /*0355*/ { "roa", 355 }, /*0356*/ { "roh", 636 }, /*0357*/ { "rom", 357 }, /*0358*/ { "rum", 500 }, /*0359*/ { "run", 637 }, /*0360*/ { "rup", 360 }, /*0361*/ { "rus", 639 }, /*0362*/ { "sad", 362 }, /*0363*/ { "sag", 645 }, /*0364*/ { "sah", 364 }, /*0365*/ { "sai", 365 }, /*0366*/ { "sal", 366 }, /*0367*/ { "sam", 367 }, /*0368*/ { "san", 641 }, /*0369*/ { "sas", 369 }, /*0370*/ { "sat", 370 }, /*0371*/ { "scn", 371 }, /*0372*/ { "sco", 372 }, /*0373*/ { "sel", 373 }, /*0374*/ { "sem", 374 }, /*0375*/ { "sga", 375 }, /*0376*/ { "sgn", 376 }, /*0377*/ { "shn", 377 }, /*0378*/ { "sid", 378 }, /*0379*/ { "sin", 646 }, /*0380*/ { "sio", 380 }, /*0381*/ { "sit", 381 }, /*0382*/ { "sla", 382 }, /*0383*/ { "slo", 501 }, /*0384*/ { "slv", 648 }, /*0385*/ { "sma", 385 }, /*0386*/ { "sme", 644 }, /*0387*/ { "smi", 387 }, /*0388*/ { "smj", 388 }, /*0389*/ { "smn", 389 }, /*0390*/ { "smo", 649 }, /*0391*/ { "sms", 391 }, /*0392*/ { "sna", 650 }, /*0393*/ { "snd", 643 }, /*0394*/ { "snk", 394 }, /*0395*/ { "sog", 395 }, /*0396*/ { "som", 651 }, /*0397*/ { "son", 397 }, /*0398*/ { "sot", 655 }, /*0399*/ { "spa", 543 }, /*0400*/ { "srd", 642 }, /*0401*/ { "srn", 401 }, /*0402*/ { "srp", 653 }, /*0403*/ { "srr", 403 }, /*0404*/ { "ssa", 404 }, /*0405*/ { "ssw", 654 }, /*0406*/ { "suk", 406 }, /*0407*/ { "sun", 656 }, /*0408*/ { "sus", 408 }, /*0409*/ { "sux", 409 }, /*0410*/ { "swa", 658 }, /*0411*/ { "swe", 657 }, /*0412*/ { "syc", 412 }, /*0413*/ { "syr", 413 }, /*0414*/ { "tah", 672 }, /*0415*/ { "tai", 415 }, /*0416*/ { "tam", 659 }, /*0417*/ { "tat", 670 }, /*0418*/ { "tel", 660 }, /*0419*/ { "tem", 419 }, /*0420*/ { "ter", 420 }, /*0421*/ { "tet", 421 }, /*0422*/ { "tgk", 661 }, /*0423*/ { "tgl", 665 }, /*0424*/ { "tha", 662 }, /*0425*/ { "tib", 484 }, /*0426*/ { "tig", 426 }, /*0427*/ { "tir", 663 }, /*0428*/ { "tiv", 428 }, /*0429*/ { "tkl", 429 }, /*0430*/ { "tlh", 430 }, /*0431*/ { "tli", 431 }, /*0432*/ { "tmh", 432 }, /*0433*/ { "tog", 433 }, /*0434*/ { "ton", 667 }, /*0435*/ { "tpi", 435 }, /*0436*/ { "tsi", 436 }, /*0437*/ { "tsn", 666 }, /*0438*/ { "tso", 669 }, /*0439*/ { "tuk", 664 }, /*0440*/ { "tum", 440 }, /*0441*/ { "tup", 441 }, /*0442*/ { "tur", 668 }, /*0443*/ { "tut", 443 }, /*0444*/ { "tvl", 444 }, /*0445*/ { "twi", 671 }, /*0446*/ { "tyv", 446 }, /*0447*/ { "udm", 447 }, /*0448*/ { "uga", 448 }, /*0449*/ { "uig", 673 }, /*0450*/ { "ukr", 674 }, /*0451*/ { "umb", 451 }, /*0452*/ { "und", 452 }, /*0453*/ { "urd", 675 }, /*0454*/ { "uzb", 676 }, /*0455*/ { "vai", 455 }, /*0456*/ { "ven", 677 }, /*0457*/ { "vie", 678 }, /*0458*/ { "vol", 679 }, /*0459*/ { "vot", 459 }, /*0460*/ { "wak", 460 }, /*0461*/ { "wal", 461 }, /*0462*/ { "war", 462 }, /*0463*/ { "was", 463 }, /*0464*/ { "wel", 486 }, /*0465*/ { "wen", 465 }, /*0466*/ { "wln", 680 }, /*0467*/ { "wol", 681 }, /*0468*/ { "xal", 468 }, /*0469*/ { "xho", 682 }, /*0470*/ { "yao", 470 }, /*0471*/ { "yap", 471 }, /*0472*/ { "yid", 683 }, /*0473*/ { "yor", 684 }, /*0474*/ { "ypk", 474 }, /*0475*/ { "zap", 475 }, /*0476*/ { "zbl", 476 }, /*0477*/ { "zen", 477 }, /*0478*/ { "zha", 685 }, /*0479*/ { "znd", 479 }, /*0480*/ { "zul", 687 }, /*0481*/ { "zun", 481 }, /*0482*/ { "zxx", 482 }, /*0483*/ { "zza", 483 }, /*----- AV_LANG_ISO639_2_TERM entries (20) -----*/ /*0484*/ { "bod", 523 }, /*0485*/ { "ces", 531 }, /*0486*/ { "cym", 534 }, /*0487*/ { "deu", 536 }, /*0488*/ { "ell", 540 }, /*0489*/ { "eus", 545 }, /*0490*/ { "fas", 546 }, /*0491*/ { "fra", 551 }, /*0492*/ { "hye", 566 }, /*0493*/ { "isl", 575 }, /*0494*/ { "kat", 580 }, /*0495*/ { "mkd", 607 }, /*0496*/ { "mri", 606 }, /*0497*/ { "msa", 611 }, /*0498*/ { "mya", 613 }, /*0499*/ { "nld", 619 }, /*0500*/ { "ron", 638 }, /*0501*/ { "slk", 647 }, /*0502*/ { "sqi", 652 }, /*0503*/ { "zho", 686 }, /*----- AV_LANG_ISO639_1 entries (184) -----*/ /*0504*/ { "aa" , 0 }, /*0505*/ { "ab" , 1 }, /*0506*/ { "ae" , 33 }, /*0507*/ { "af" , 8 }, /*0508*/ { "ak" , 10 }, /*0509*/ { "am" , 16 }, /*0510*/ { "an" , 22 }, /*0511*/ { "ar" , 20 }, /*0512*/ { "as" , 28 }, /*0513*/ { "av" , 32 }, /*0514*/ { "ay" , 35 }, /*0515*/ { "az" , 36 }, /*0516*/ { "ba" , 39 }, /*0517*/ { "be" , 47 }, /*0518*/ { "bg" , 64 }, /*0519*/ { "bh" , 52 }, /*0520*/ { "bi" , 55 }, /*0521*/ { "bm" , 41 }, /*0522*/ { "bn" , 49 }, /*0523*/ { "bo" , 425 }, /*0524*/ { "br" , 60 }, /*0525*/ { "bs" , 58 }, /*0526*/ { "ca" , 70 }, /*0527*/ { "ce" , 76 }, /*0528*/ { "ch" , 74 }, /*0529*/ { "co" , 91 }, /*0530*/ { "cr" , 95 }, /*0531*/ { "cs" , 100 }, /*0532*/ { "cu" , 85 }, /*0533*/ { "cv" , 86 }, /*0534*/ { "cy" , 464 }, /*0535*/ { "da" , 102 }, /*0536*/ { "de" , 149 }, /*0537*/ { "dv" , 109 }, /*0538*/ { "dz" , 117 }, /*0539*/ { "ee" , 126 }, /*0540*/ { "el" , 163 }, /*0541*/ { "en" , 122 }, /*0542*/ { "eo" , 124 }, /*0543*/ { "es" , 399 }, /*0544*/ { "et" , 125 }, /*0545*/ { "eu" , 43 }, /*0546*/ { "fa" , 341 }, /*0547*/ { "ff" , 142 }, /*0548*/ { "fi" , 133 }, /*0549*/ { "fj" , 131 }, /*0550*/ { "fo" , 129 }, /*0551*/ { "fr" , 136 }, /*0552*/ { "fy" , 141 }, /*0553*/ { "ga" , 153 }, /*0554*/ { "gd" , 152 }, /*0555*/ { "gl" , 154 }, /*0556*/ { "gn" , 164 }, /*0557*/ { "gu" , 166 }, /*0558*/ { "gv" , 155 }, /*0559*/ { "ha" , 170 }, /*0560*/ { "he" , 172 }, /*0561*/ { "hi" , 176 }, /*0562*/ { "ho" , 179 }, /*0563*/ { "hr" , 180 }, /*0564*/ { "ht" , 169 }, /*0565*/ { "hu" , 182 }, /*0566*/ { "hy" , 23 }, /*0567*/ { "hz" , 173 }, /*0568*/ { "ia" , 193 }, /*0569*/ { "id" , 195 }, /*0570*/ { "ie" , 191 }, /*0571*/ { "ig" , 185 }, /*0572*/ { "ii" , 188 }, /*0573*/ { "ik" , 198 }, /*0574*/ { "io" , 187 }, /*0575*/ { "is" , 186 }, /*0576*/ { "it" , 201 }, /*0577*/ { "iu" , 190 }, /*0578*/ { "ja" , 204 }, /*0579*/ { "jv" , 202 }, /*0580*/ { "ka" , 148 }, /*0581*/ { "kg" , 229 }, /*0582*/ { "ki" , 223 }, /*0583*/ { "kj" , 237 }, /*0584*/ { "kk" , 217 }, /*0585*/ { "kl" , 210 }, /*0586*/ { "km" , 221 }, /*0587*/ { "kn" , 212 }, /*0588*/ { "ko" , 230 }, /*0589*/ { "kr" , 215 }, /*0590*/ { "ks" , 214 }, /*0591*/ { "ku" , 239 }, /*0592*/ { "kv" , 228 }, /*0593*/ { "kw" , 90 }, /*0594*/ { "ky" , 225 }, /*0595*/ { "la" , 245 }, /*0596*/ { "lb" , 253 }, /*0597*/ { "lg" , 256 }, /*0598*/ { "li" , 248 }, /*0599*/ { "ln" , 249 }, /*0600*/ { "lo" , 244 }, /*0601*/ { "lt" , 250 }, /*0602*/ { "lu" , 255 }, /*0603*/ { "lv" , 246 }, /*0604*/ { "mg" , 282 }, /*0605*/ { "mh" , 264 }, /*0606*/ { "mi" , 269 }, /*0607*/ { "mk" , 261 }, /*0608*/ { "ml" , 267 }, /*0609*/ { "mn" , 288 }, /*0610*/ { "mr" , 271 }, /*0611*/ { "ms" , 273 }, /*0612*/ { "mt" , 283 }, /*0613*/ { "my" , 65 }, /*0614*/ { "na" , 300 }, /*0615*/ { "nb" , 312 }, /*0616*/ { "nd" , 303 }, /*0617*/ { "ne" , 306 }, /*0618*/ { "ng" , 304 }, /*0619*/ { "nl" , 115 }, /*0620*/ { "nn" , 311 }, /*0621*/ { "no" , 315 }, /*0622*/ { "nr" , 302 }, /*0623*/ { "nv" , 301 }, /*0624*/ { "ny" , 320 }, /*0625*/ { "oc" , 325 }, /*0626*/ { "oj" , 326 }, /*0627*/ { "om" , 328 }, /*0628*/ { "or" , 327 }, /*0629*/ { "os" , 330 }, /*0630*/ { "pa" , 337 }, /*0631*/ { "pi" , 344 }, /*0632*/ { "pl" , 345 }, /*0633*/ { "ps" , 350 }, /*0634*/ { "pt" , 347 }, /*0635*/ { "qu" , 351 }, /*0636*/ { "rm" , 356 }, /*0637*/ { "rn" , 359 }, /*0638*/ { "ro" , 358 }, /*0639*/ { "ru" , 361 }, /*0640*/ { "rw" , 224 }, /*0641*/ { "sa" , 368 }, /*0642*/ { "sc" , 400 }, /*0643*/ { "sd" , 393 }, /*0644*/ { "se" , 386 }, /*0645*/ { "sg" , 363 }, /*0646*/ { "si" , 379 }, /*0647*/ { "sk" , 383 }, /*0648*/ { "sl" , 384 }, /*0649*/ { "sm" , 390 }, /*0650*/ { "sn" , 392 }, /*0651*/ { "so" , 396 }, /*0652*/ { "sq" , 12 }, /*0653*/ { "sr" , 402 }, /*0654*/ { "ss" , 405 }, /*0655*/ { "st" , 398 }, /*0656*/ { "su" , 407 }, /*0657*/ { "sv" , 411 }, /*0658*/ { "sw" , 410 }, /*0659*/ { "ta" , 416 }, /*0660*/ { "te" , 418 }, /*0661*/ { "tg" , 422 }, /*0662*/ { "th" , 424 }, /*0663*/ { "ti" , 427 }, /*0664*/ { "tk" , 439 }, /*0665*/ { "tl" , 423 }, /*0666*/ { "tn" , 437 }, /*0667*/ { "to" , 434 }, /*0668*/ { "tr" , 442 }, /*0669*/ { "ts" , 438 }, /*0670*/ { "tt" , 417 }, /*0671*/ { "tw" , 445 }, /*0672*/ { "ty" , 414 }, /*0673*/ { "ug" , 449 }, /*0674*/ { "uk" , 450 }, /*0675*/ { "ur" , 453 }, /*0676*/ { "uz" , 454 }, /*0677*/ { "ve" , 456 }, /*0678*/ { "vi" , 457 }, /*0679*/ { "vo" , 458 }, /*0680*/ { "wa" , 466 }, /*0681*/ { "wo" , 467 }, /*0682*/ { "xh" , 469 }, /*0683*/ { "yi" , 472 }, /*0684*/ { "yo" , 473 }, /*0685*/ { "za" , 478 }, /*0686*/ { "zh" , 78 }, /*0687*/ { "zu" , 480 }, { "", 0 } }; static int lang_table_compare(const void *lhs, const void *rhs) { return strcmp(lhs, ((const LangEntry *)rhs)->str); } const char *av_convert_lang_to(const char *lang, enum AVLangCodespace target_codespace) { int i; const LangEntry *entry = NULL; const int NB_CODESPACES = sizeof(lang_table_counts)/sizeof(*lang_table_counts); if (target_codespace >= NB_CODESPACES) return NULL; for (i=0; !entry && i<NB_CODESPACES; i++) entry = bsearch(lang, lang_table + lang_table_offsets[i], lang_table_counts[i], sizeof(LangEntry), lang_table_compare); if (!entry) return NULL; for (i=0; i<NB_CODESPACES; i++) if (entry >= lang_table + lang_table_offsets[target_codespace] && entry < lang_table + lang_table_offsets[target_codespace] + lang_table_counts[target_codespace]) return entry->str; else entry = lang_table + entry->next_equivalent; if (target_codespace == AV_LANG_ISO639_2_TERM) return av_convert_lang_to(lang, AV_LANG_ISO639_2_BIBL); return NULL; }
123linslouis-android-video-cutter
jni/libavformat/avlanguage.c
C
asf20
23,189
/* * AVI muxer * Copyright (c) 2000 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "avi.h" #include "riff.h" #include "libavutil/intreadwrite.h" /* * TODO: * - fill all fields if non streamed (nb_frames for example) */ typedef struct AVIIentry { unsigned int flags, pos, len; } AVIIentry; #define AVI_INDEX_CLUSTER_SIZE 16384 typedef struct AVIIndex { int64_t indx_start; int entry; int ents_allocated; AVIIentry** cluster; } AVIIndex; typedef struct { int64_t riff_start, movi_list, odml_list; int64_t frames_hdr_all; int riff_id; } AVIContext; typedef struct { int64_t frames_hdr_strm; int audio_strm_length; int packet_count; int entry; AVIIndex indexes; } AVIStream ; static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id) { int cl = ent_id / AVI_INDEX_CLUSTER_SIZE; int id = ent_id % AVI_INDEX_CLUSTER_SIZE; return &idx->cluster[cl][id]; } static int64_t avi_start_new_riff(AVFormatContext *s, ByteIOContext *pb, const char* riff_tag, const char* list_tag) { AVIContext *avi= s->priv_data; int64_t loff; int i; avi->riff_id++; for (i=0; i<s->nb_streams; i++){ AVIStream *avist= s->streams[i]->priv_data; avist->indexes.entry = 0; } avi->riff_start = ff_start_tag(pb, "RIFF"); put_tag(pb, riff_tag); loff = ff_start_tag(pb, "LIST"); put_tag(pb, list_tag); return loff; } static char* avi_stream2fourcc(char* tag, int index, enum AVMediaType type) { tag[0] = '0'; tag[1] = '0' + index; if (type == AVMEDIA_TYPE_VIDEO) { tag[2] = 'd'; tag[3] = 'c'; } else if (type == AVMEDIA_TYPE_SUBTITLE) { // note: this is not an official code tag[2] = 's'; tag[3] = 'b'; } else { tag[2] = 'w'; tag[3] = 'b'; } tag[4] = '\0'; return tag; } static void avi_write_info_tag(ByteIOContext *pb, const char *tag, const char *str) { int len = strlen(str); if (len > 0) { len++; put_tag(pb, tag); put_le32(pb, len); put_strz(pb, str); if (len & 1) put_byte(pb, 0); } } static int avi_write_counters(AVFormatContext* s, int riff_id) { ByteIOContext *pb = s->pb; AVIContext *avi = s->priv_data; int n, au_byterate, au_ssize, au_scale, nb_frames = 0; int64_t file_size; AVCodecContext* stream; file_size = url_ftell(pb); for(n = 0; n < s->nb_streams; n++) { AVIStream *avist= s->streams[n]->priv_data; assert(avist->frames_hdr_strm); stream = s->streams[n]->codec; url_fseek(pb, avist->frames_hdr_strm, SEEK_SET); ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale); if(au_ssize == 0) { put_le32(pb, avist->packet_count); } else { put_le32(pb, avist->audio_strm_length / au_ssize); } if(stream->codec_type == AVMEDIA_TYPE_VIDEO) nb_frames = FFMAX(nb_frames, avist->packet_count); } if(riff_id == 1) { assert(avi->frames_hdr_all); url_fseek(pb, avi->frames_hdr_all, SEEK_SET); put_le32(pb, nb_frames); } url_fseek(pb, file_size, SEEK_SET); return 0; } static int avi_write_header(AVFormatContext *s) { AVIContext *avi = s->priv_data; ByteIOContext *pb = s->pb; int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale; AVCodecContext *stream, *video_enc; int64_t list1, list2, strh, strf; AVMetadataTag *t = NULL; for(n=0;n<s->nb_streams;n++) { s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream)); if(!s->streams[n]->priv_data) return AVERROR(ENOMEM); } /* header list */ avi->riff_id = 0; list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl"); /* avi header */ put_tag(pb, "avih"); put_le32(pb, 14 * 4); bitrate = 0; video_enc = NULL; for(n=0;n<s->nb_streams;n++) { stream = s->streams[n]->codec; bitrate += stream->bit_rate; if (stream->codec_type == AVMEDIA_TYPE_VIDEO) video_enc = stream; } nb_frames = 0; if(video_enc){ put_le32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den)); } else { put_le32(pb, 0); } put_le32(pb, bitrate / 8); /* XXX: not quite exact */ put_le32(pb, 0); /* padding */ if (url_is_streamed(pb)) put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */ else put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */ avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */ put_le32(pb, nb_frames); /* nb frames, filled later */ put_le32(pb, 0); /* initial frame */ put_le32(pb, s->nb_streams); /* nb streams */ put_le32(pb, 1024 * 1024); /* suggested buffer size */ if(video_enc){ put_le32(pb, video_enc->width); put_le32(pb, video_enc->height); } else { put_le32(pb, 0); put_le32(pb, 0); } put_le32(pb, 0); /* reserved */ put_le32(pb, 0); /* reserved */ put_le32(pb, 0); /* reserved */ put_le32(pb, 0); /* reserved */ /* stream list */ for(i=0;i<n;i++) { AVIStream *avist= s->streams[i]->priv_data; list2 = ff_start_tag(pb, "LIST"); put_tag(pb, "strl"); stream = s->streams[i]->codec; /* stream generic header */ strh = ff_start_tag(pb, "strh"); switch(stream->codec_type) { case AVMEDIA_TYPE_SUBTITLE: // XSUB subtitles behave like video tracks, other subtitles // are not (yet) supported. if (stream->codec_id != CODEC_ID_XSUB) break; case AVMEDIA_TYPE_VIDEO: put_tag(pb, "vids"); break; case AVMEDIA_TYPE_AUDIO: put_tag(pb, "auds"); break; // case AVMEDIA_TYPE_TEXT : put_tag(pb, "txts"); break; case AVMEDIA_TYPE_DATA : put_tag(pb, "dats"); break; } if(stream->codec_type == AVMEDIA_TYPE_VIDEO || stream->codec_id == CODEC_ID_XSUB) put_le32(pb, stream->codec_tag); else put_le32(pb, 1); put_le32(pb, 0); /* flags */ put_le16(pb, 0); /* priority */ put_le16(pb, 0); /* language */ put_le32(pb, 0); /* initial frame */ ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale); put_le32(pb, au_scale); /* scale */ put_le32(pb, au_byterate); /* rate */ av_set_pts_info(s->streams[i], 64, au_scale, au_byterate); put_le32(pb, 0); /* start */ avist->frames_hdr_strm = url_ftell(pb); /* remember this offset to fill later */ if (url_is_streamed(pb)) put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */ else put_le32(pb, 0); /* length, XXX: filled later */ /* suggested buffer size */ //FIXME set at the end to largest chunk if(stream->codec_type == AVMEDIA_TYPE_VIDEO) put_le32(pb, 1024 * 1024); else if(stream->codec_type == AVMEDIA_TYPE_AUDIO) put_le32(pb, 12 * 1024); else put_le32(pb, 0); put_le32(pb, -1); /* quality */ put_le32(pb, au_ssize); /* sample size */ put_le32(pb, 0); put_le16(pb, stream->width); put_le16(pb, stream->height); ff_end_tag(pb, strh); if(stream->codec_type != AVMEDIA_TYPE_DATA){ strf = ff_start_tag(pb, "strf"); switch(stream->codec_type) { case AVMEDIA_TYPE_SUBTITLE: // XSUB subtitles behave like video tracks, other subtitles // are not (yet) supported. if (stream->codec_id != CODEC_ID_XSUB) break; case AVMEDIA_TYPE_VIDEO: ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0); break; case AVMEDIA_TYPE_AUDIO: if (ff_put_wav_header(pb, stream) < 0) { return -1; } break; default: return -1; } ff_end_tag(pb, strf); if ((t = av_metadata_get(s->streams[i]->metadata, "strn", NULL, 0))) { avi_write_info_tag(s->pb, t->key, t->value); t = NULL; } //FIXME a limitation of metadata conversion system else if ((t = av_metadata_get(s->streams[i]->metadata, "INAM", NULL, 0))) { avi_write_info_tag(s->pb, "strn", t->value); t = NULL; } } if (!url_is_streamed(pb)) { unsigned char tag[5]; int j; /* Starting to lay out AVI OpenDML master index. * We want to make it JUNK entry for now, since we'd * like to get away without making AVI an OpenDML one * for compatibility reasons. */ avist->indexes.entry = avist->indexes.ents_allocated = 0; avist->indexes.indx_start = ff_start_tag(pb, "JUNK"); put_le16(pb, 4); /* wLongsPerEntry */ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */ put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */ put_le32(pb, 0); /* nEntriesInUse (will fill out later on) */ put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type)); /* dwChunkId */ put_le64(pb, 0); /* dwReserved[3] put_le32(pb, 0); Must be 0. */ for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++) put_le64(pb, 0); ff_end_tag(pb, avist->indexes.indx_start); } if( stream->codec_type == AVMEDIA_TYPE_VIDEO && s->streams[i]->sample_aspect_ratio.num>0 && s->streams[i]->sample_aspect_ratio.den>0){ int vprp= ff_start_tag(pb, "vprp"); AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio, (AVRational){stream->width, stream->height}); int num, den; av_reduce(&num, &den, dar.num, dar.den, 0xFFFF); put_le32(pb, 0); //video format = unknown put_le32(pb, 0); //video standard= unknown put_le32(pb, lrintf(1.0/av_q2d(stream->time_base))); put_le32(pb, stream->width ); put_le32(pb, stream->height); put_le16(pb, den); put_le16(pb, num); put_le32(pb, stream->width ); put_le32(pb, stream->height); put_le32(pb, 1); //progressive FIXME put_le32(pb, stream->height); put_le32(pb, stream->width ); put_le32(pb, stream->height); put_le32(pb, stream->width ); put_le32(pb, 0); put_le32(pb, 0); put_le32(pb, 0); put_le32(pb, 0); ff_end_tag(pb, vprp); } ff_end_tag(pb, list2); } if (!url_is_streamed(pb)) { /* AVI could become an OpenDML one, if it grows beyond 2Gb range */ avi->odml_list = ff_start_tag(pb, "JUNK"); put_tag(pb, "odml"); put_tag(pb, "dmlh"); put_le32(pb, 248); for (i = 0; i < 248; i+= 4) put_le32(pb, 0); ff_end_tag(pb, avi->odml_list); } ff_end_tag(pb, list1); list2 = ff_start_tag(pb, "LIST"); put_tag(pb, "INFO"); for (i = 0; *ff_avi_tags[i]; i++) { if ((t = av_metadata_get(s->metadata, ff_avi_tags[i], NULL, AV_METADATA_MATCH_CASE))) avi_write_info_tag(s->pb, t->key, t->value); } ff_end_tag(pb, list2); /* some padding for easier tag editing */ list2 = ff_start_tag(pb, "JUNK"); for (i = 0; i < 1016; i += 4) put_le32(pb, 0); ff_end_tag(pb, list2); avi->movi_list = ff_start_tag(pb, "LIST"); put_tag(pb, "movi"); put_flush_packet(pb); return 0; } static int avi_write_ix(AVFormatContext *s) { ByteIOContext *pb = s->pb; AVIContext *avi = s->priv_data; char tag[5]; char ix_tag[] = "ix00"; int i, j; assert(!url_is_streamed(pb)); if (avi->riff_id > AVI_MASTER_INDEX_SIZE) return -1; for (i=0;i<s->nb_streams;i++) { AVIStream *avist= s->streams[i]->priv_data; int64_t ix, pos; avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type); ix_tag[3] = '0' + i; /* Writing AVI OpenDML leaf index chunk */ ix = url_ftell(pb); put_tag(pb, &ix_tag[0]); /* ix?? */ put_le32(pb, avist->indexes.entry * 8 + 24); /* chunk size */ put_le16(pb, 2); /* wLongsPerEntry */ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */ put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */ put_le32(pb, avist->indexes.entry); /* nEntriesInUse */ put_tag(pb, &tag[0]); /* dwChunkId */ put_le64(pb, avi->movi_list);/* qwBaseOffset */ put_le32(pb, 0); /* dwReserved_3 (must be 0) */ for (j=0; j<avist->indexes.entry; j++) { AVIIentry* ie = avi_get_ientry(&avist->indexes, j); put_le32(pb, ie->pos + 8); put_le32(pb, ((uint32_t)ie->len & ~0x80000000) | (ie->flags & 0x10 ? 0 : 0x80000000)); } put_flush_packet(pb); pos = url_ftell(pb); /* Updating one entry in the AVI OpenDML master index */ url_fseek(pb, avist->indexes.indx_start - 8, SEEK_SET); put_tag(pb, "indx"); /* enabling this entry */ url_fskip(pb, 8); put_le32(pb, avi->riff_id); /* nEntriesInUse */ url_fskip(pb, 16*avi->riff_id); put_le64(pb, ix); /* qwOffset */ put_le32(pb, pos - ix); /* dwSize */ put_le32(pb, avist->indexes.entry); /* dwDuration */ url_fseek(pb, pos, SEEK_SET); } return 0; } static int avi_write_idx1(AVFormatContext *s) { ByteIOContext *pb = s->pb; AVIContext *avi = s->priv_data; int64_t idx_chunk; int i; char tag[5]; if (!url_is_streamed(pb)) { AVIStream *avist; AVIIentry* ie = 0, *tie; int empty, stream_id = -1; idx_chunk = ff_start_tag(pb, "idx1"); for(i=0; i<s->nb_streams; i++){ avist= s->streams[i]->priv_data; avist->entry=0; } do { empty = 1; for (i=0; i<s->nb_streams; i++) { avist= s->streams[i]->priv_data; if (avist->indexes.entry <= avist->entry) continue; tie = avi_get_ientry(&avist->indexes, avist->entry); if (empty || tie->pos < ie->pos) { ie = tie; stream_id = i; } empty = 0; } if (!empty) { avist= s->streams[stream_id]->priv_data; avi_stream2fourcc(&tag[0], stream_id, s->streams[stream_id]->codec->codec_type); put_tag(pb, &tag[0]); put_le32(pb, ie->flags); put_le32(pb, ie->pos); put_le32(pb, ie->len); avist->entry++; } } while (!empty); ff_end_tag(pb, idx_chunk); avi_write_counters(s, avi->riff_id); } return 0; } static int avi_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIContext *avi = s->priv_data; ByteIOContext *pb = s->pb; unsigned char tag[5]; unsigned int flags=0; const int stream_index= pkt->stream_index; AVIStream *avist= s->streams[stream_index]->priv_data; AVCodecContext *enc= s->streams[stream_index]->codec; int size= pkt->size; // av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index); while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count){ AVPacket empty_packet; av_init_packet(&empty_packet); empty_packet.size= 0; empty_packet.data= NULL; empty_packet.stream_index= stream_index; avi_write_packet(s, &empty_packet); // av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]); } avist->packet_count++; // Make sure to put an OpenDML chunk when the file size exceeds the limits if (!url_is_streamed(pb) && (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) { avi_write_ix(s); ff_end_tag(pb, avi->movi_list); if (avi->riff_id == 1) avi_write_idx1(s); ff_end_tag(pb, avi->riff_start); avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi"); } avi_stream2fourcc(&tag[0], stream_index, enc->codec_type); if(pkt->flags&AV_PKT_FLAG_KEY) flags = 0x10; if (enc->codec_type == AVMEDIA_TYPE_AUDIO) { avist->audio_strm_length += size; } if (!url_is_streamed(s->pb)) { AVIIndex* idx = &avist->indexes; int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE; int id = idx->entry % AVI_INDEX_CLUSTER_SIZE; if (idx->ents_allocated <= idx->entry) { idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*)); if (!idx->cluster) return -1; idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry)); if (!idx->cluster[cl]) return -1; idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE; } idx->cluster[cl][id].flags = flags; idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list; idx->cluster[cl][id].len = size; idx->entry++; } put_buffer(pb, tag, 4); put_le32(pb, size); put_buffer(pb, pkt->data, size); if (size & 1) put_byte(pb, 0); put_flush_packet(pb); return 0; } static int avi_write_trailer(AVFormatContext *s) { AVIContext *avi = s->priv_data; ByteIOContext *pb = s->pb; int res = 0; int i, j, n, nb_frames; int64_t file_size; if (!url_is_streamed(pb)){ if (avi->riff_id == 1) { ff_end_tag(pb, avi->movi_list); res = avi_write_idx1(s); ff_end_tag(pb, avi->riff_start); } else { avi_write_ix(s); ff_end_tag(pb, avi->movi_list); ff_end_tag(pb, avi->riff_start); file_size = url_ftell(pb); url_fseek(pb, avi->odml_list - 8, SEEK_SET); put_tag(pb, "LIST"); /* Making this AVI OpenDML one */ url_fskip(pb, 16); for (n=nb_frames=0;n<s->nb_streams;n++) { AVCodecContext *stream = s->streams[n]->codec; AVIStream *avist= s->streams[n]->priv_data; if (stream->codec_type == AVMEDIA_TYPE_VIDEO) { if (nb_frames < avist->packet_count) nb_frames = avist->packet_count; } else { if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) { nb_frames += avist->packet_count; } } } put_le32(pb, nb_frames); url_fseek(pb, file_size, SEEK_SET); avi_write_counters(s, avi->riff_id); } } put_flush_packet(pb); for (i=0; i<s->nb_streams; i++) { AVIStream *avist= s->streams[i]->priv_data; for (j=0; j<avist->indexes.ents_allocated/AVI_INDEX_CLUSTER_SIZE; j++) av_free(avist->indexes.cluster[j]); av_freep(&avist->indexes.cluster); avist->indexes.ents_allocated = avist->indexes.entry = 0; } return res; } AVOutputFormat avi_muxer = { "avi", NULL_IF_CONFIG_SMALL("AVI format"), "video/x-msvideo", "avi", sizeof(AVIContext), CODEC_ID_MP2, CODEC_ID_MPEG4, avi_write_header, avi_write_packet, avi_write_trailer, .codec_tag= (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, 0}, .flags= AVFMT_VARIABLE_FPS, .metadata_conv = ff_avi_metadata_conv, };
123linslouis-android-video-cutter
jni/libavformat/avienc.c
C
asf20
21,167
/* * SSA/ASS muxer * Copyright (c) 2008 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" typedef struct ASSContext{ unsigned int extra_index; }ASSContext; static int write_header(AVFormatContext *s) { ASSContext *ass = s->priv_data; AVCodecContext *avctx= s->streams[0]->codec; uint8_t *last= NULL; if(s->nb_streams != 1 || avctx->codec_id != CODEC_ID_SSA){ av_log(s, AV_LOG_ERROR, "Exactly one ASS/SSA stream is needed.\n"); return -1; } while(ass->extra_index < avctx->extradata_size){ uint8_t *p = avctx->extradata + ass->extra_index; uint8_t *end= strchr(p, '\n'); if(!end) end= avctx->extradata + avctx->extradata_size; else end++; put_buffer(s->pb, p, end-p); ass->extra_index += end-p; if(last && !memcmp(last, "[Events]", 8)) break; last=p; } put_flush_packet(s->pb); return 0; } static int write_packet(AVFormatContext *s, AVPacket *pkt) { put_buffer(s->pb, pkt->data, pkt->size); put_flush_packet(s->pb); return 0; } static int write_trailer(AVFormatContext *s) { ASSContext *ass = s->priv_data; AVCodecContext *avctx= s->streams[0]->codec; put_buffer(s->pb, avctx->extradata + ass->extra_index, avctx->extradata_size - ass->extra_index); put_flush_packet(s->pb); return 0; } AVOutputFormat ass_muxer = { "ass", NULL_IF_CONFIG_SMALL("SSA/ASS format"), NULL, "ass,ssa", sizeof(ASSContext), CODEC_ID_NONE, CODEC_ID_NONE, write_header, write_packet, write_trailer, .flags = AVFMT_GLOBALHEADER | AVFMT_NOTIMESTAMPS };
123linslouis-android-video-cutter
jni/libavformat/assenc.c
C
asf20
2,439
/* * Microsoft RTP/ASF support. * Copyright (c) 2008 Ronald S. Bultje * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_RTPDEC_ASF_H #define AVFORMAT_RTPDEC_ASF_H #include "avformat.h" #include "rtpdec.h" /** * Parse a Windows Media Server-specific SDP line * * @param s RTSP demux context * @param line the SDP line to be parsed */ void ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p); /** * Handlers for the x-asf-pf payloads (the payload ID for RTP/ASF). * Defined and implemented in rtp_asf.c, registered in rtpdec.c. */ extern RTPDynamicProtocolHandler ff_ms_rtp_asf_pfv_handler, ff_ms_rtp_asf_pfa_handler; #endif /* AVFORMAT_RTPDEC_ASF_H */
123linslouis-android-video-cutter
jni/libavformat/rtpdec_asf.h
C
asf20
1,436
/* * MOV, 3GP, MP4 muxer RTP hinting * Copyright (c) 2010 Martin Storsjo * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "movenc.h" #include "libavutil/intreadwrite.h" int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index) { MOVMuxContext *mov = s->priv_data; MOVTrack *track = &mov->tracks[index]; MOVTrack *src_track = &mov->tracks[src_index]; AVStream *src_st = s->streams[src_index]; int ret = AVERROR(ENOMEM); AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL); track->tag = MKTAG('r','t','p',' '); track->src_track = src_index; if (!rtp_format) { ret = AVERROR(ENOENT); goto fail; } track->enc = avcodec_alloc_context(); if (!track->enc) goto fail; track->enc->codec_type = AVMEDIA_TYPE_DATA; track->enc->codec_tag = track->tag; track->rtp_ctx = avformat_alloc_context(); if (!track->rtp_ctx) goto fail; track->rtp_ctx->oformat = rtp_format; if (!av_new_stream(track->rtp_ctx, 0)) goto fail; /* Copy stream parameters */ track->rtp_ctx->streams[0]->sample_aspect_ratio = src_st->sample_aspect_ratio; /* Remove the allocated codec context, link to the original one * instead, to give the rtp muxer access to codec parameters. */ av_free(track->rtp_ctx->streams[0]->codec); track->rtp_ctx->streams[0]->codec = src_st->codec; if ((ret = url_open_dyn_packet_buf(&track->rtp_ctx->pb, RTP_MAX_PACKET_SIZE)) < 0) goto fail; ret = av_write_header(track->rtp_ctx); if (ret) goto fail; /* Copy the RTP AVStream timebase back to the hint AVStream */ track->timescale = track->rtp_ctx->streams[0]->time_base.den; /* Mark the hinted track that packets written to it should be * sent to this track for hinting. */ src_track->hint_track = index; return 0; fail: av_log(s, AV_LOG_WARNING, "Unable to initialize hinting of stream %d\n", src_index); if (track->rtp_ctx && track->rtp_ctx->pb) { uint8_t *buf; url_close_dyn_buf(track->rtp_ctx->pb, &buf); av_free(buf); } if (track->rtp_ctx && track->rtp_ctx->streams[0]) { av_metadata_free(&track->rtp_ctx->streams[0]->metadata); av_free(track->rtp_ctx->streams[0]); } if (track->rtp_ctx) { av_metadata_free(&track->rtp_ctx->metadata); av_free(track->rtp_ctx->priv_data); av_freep(&track->rtp_ctx); } av_freep(&track->enc); /* Set a default timescale, to avoid crashes in dump_format */ track->timescale = 90000; return ret; } /** * Remove the first sample from the sample queue. */ static void sample_queue_pop(HintSampleQueue *queue) { if (queue->len <= 0) return; if (queue->samples[0].own_data) av_free(queue->samples[0].data); queue->len--; memmove(queue->samples, queue->samples + 1, sizeof(HintSample)*queue->len); } /** * Empty the sample queue, releasing all memory. */ static void sample_queue_free(HintSampleQueue *queue) { int i; for (i = 0; i < queue->len; i++) if (queue->samples[i].own_data) av_free(queue->samples[i].data); av_freep(&queue->samples); queue->len = 0; queue->size = 0; } /** * Add a reference to the sample data to the sample queue. The data is * not copied. sample_queue_retain should be called before pkt->data * is reused/freed. */ static void sample_queue_push(HintSampleQueue *queue, AVPacket *pkt, int sample) { /* No need to keep track of smaller samples, since describing them * with immediates is more efficient. */ if (pkt->size <= 14) return; if (!queue->samples || queue->len >= queue->size) { HintSample* samples; queue->size += 10; samples = av_realloc(queue->samples, sizeof(HintSample)*queue->size); if (!samples) return; queue->samples = samples; } queue->samples[queue->len].data = pkt->data; queue->samples[queue->len].size = pkt->size; queue->samples[queue->len].sample_number = sample; queue->samples[queue->len].offset = 0; queue->samples[queue->len].own_data = 0; queue->len++; } /** * Make local copies of all referenced sample data in the queue. */ static void sample_queue_retain(HintSampleQueue *queue) { int i; for (i = 0; i < queue->len; ) { HintSample *sample = &queue->samples[i]; if (!sample->own_data) { uint8_t* ptr = av_malloc(sample->size); if (!ptr) { /* Unable to allocate memory for this one, remove it */ memmove(queue->samples + i, queue->samples + i + 1, sizeof(HintSample)*(queue->len - i - 1)); queue->len--; continue; } memcpy(ptr, sample->data, sample->size); sample->data = ptr; sample->own_data = 1; } i++; } } /** * Find matches of needle[n_pos ->] within haystack. If a sufficiently * large match is found, matching bytes before n_pos are included * in the match, too (within the limits of the arrays). * * @param haystack buffer that may contain parts of needle * @param h_len length of the haystack buffer * @param needle buffer containing source data that have been used to * construct haystack * @param n_pos start position in needle used for looking for matches * @param n_len length of the needle buffer * @param match_h_offset_ptr offset of the first matching byte within haystack * @param match_n_offset_ptr offset of the first matching byte within needle * @param match_len_ptr length of the matched segment * @return 0 if a match was found, < 0 if no match was found */ static int match_segments(const uint8_t *haystack, int h_len, const uint8_t *needle, int n_pos, int n_len, int *match_h_offset_ptr, int *match_n_offset_ptr, int *match_len_ptr) { int h_pos; for (h_pos = 0; h_pos < h_len; h_pos++) { int match_len = 0; int match_h_pos, match_n_pos; /* Check how many bytes match at needle[n_pos] and haystack[h_pos] */ while (h_pos + match_len < h_len && n_pos + match_len < n_len && needle[n_pos + match_len] == haystack[h_pos + match_len]) match_len++; if (match_len <= 8) continue; /* If a sufficiently large match was found, try to expand * the matched segment backwards. */ match_h_pos = h_pos; match_n_pos = n_pos; while (match_n_pos > 0 && match_h_pos > 0 && needle[match_n_pos - 1] == haystack[match_h_pos - 1]) { match_n_pos--; match_h_pos--; match_len++; } if (match_len <= 14) continue; *match_h_offset_ptr = match_h_pos; *match_n_offset_ptr = match_n_pos; *match_len_ptr = match_len; return 0; } return -1; } /** * Look for segments in samples in the sample queue matching the data * in ptr. Samples not matching are removed from the queue. If a match * is found, the next time it will look for matches starting from the * end of the previous matched segment. * * @param data data to find matches for in the sample queue * @param len length of the data buffer * @param queue samples used for looking for matching segments * @param pos the offset in data of the matched segment * @param match_sample the number of the sample that contained the match * @param match_offset the offset of the matched segment within the sample * @param match_len the length of the matched segment * @return 0 if a match was found, < 0 if no match was found */ static int find_sample_match(const uint8_t *data, int len, HintSampleQueue *queue, int *pos, int *match_sample, int *match_offset, int *match_len) { while (queue->len > 0) { HintSample *sample = &queue->samples[0]; /* If looking for matches in a new sample, skip the first 5 bytes, * since they often may be modified/removed in the output packet. */ if (sample->offset == 0 && sample->size > 5) sample->offset = 5; if (match_segments(data, len, sample->data, sample->offset, sample->size, pos, match_offset, match_len) == 0) { *match_sample = sample->sample_number; /* Next time, look for matches at this offset, with a little * margin to this match. */ sample->offset = *match_offset + *match_len + 5; if (sample->offset + 10 >= sample->size) sample_queue_pop(queue); /* Not enough useful data left */ return 0; } if (sample->offset < 10 && sample->size > 20) { /* No match found from the start of the sample, * try from the middle of the sample instead. */ sample->offset = sample->size/2; } else { /* No match for this sample, remove it */ sample_queue_pop(queue); } } return -1; } static void output_immediate(const uint8_t *data, int size, ByteIOContext *out, int *entries) { while (size > 0) { int len = size; if (len > 14) len = 14; put_byte(out, 1); /* immediate constructor */ put_byte(out, len); /* amount of valid data */ put_buffer(out, data, len); data += len; size -= len; for (; len < 14; len++) put_byte(out, 0); (*entries)++; } } static void output_match(ByteIOContext *out, int match_sample, int match_offset, int match_len, int *entries) { put_byte(out, 2); /* sample constructor */ put_byte(out, 0); /* track reference */ put_be16(out, match_len); put_be32(out, match_sample); put_be32(out, match_offset); put_be16(out, 1); /* bytes per block */ put_be16(out, 1); /* samples per block */ (*entries)++; } static void describe_payload(const uint8_t *data, int size, ByteIOContext *out, int *entries, HintSampleQueue *queue) { /* Describe the payload using different constructors */ while (size > 0) { int match_sample, match_offset, match_len, pos; if (find_sample_match(data, size, queue, &pos, &match_sample, &match_offset, &match_len) < 0) break; output_immediate(data, pos, out, entries); data += pos; size -= pos; output_match(out, match_sample, match_offset, match_len, entries); data += match_len; size -= match_len; } output_immediate(data, size, out, entries); } /** * Write an RTP hint (that may contain one or more RTP packets) * for the packets in data. data contains one or more packets with a * BE32 size header. * * @param out buffer where the hints are written * @param data buffer containing RTP packets * @param size the size of the data buffer * @param trk the MOVTrack for the hint track * @param pts pointer where the timestamp for the written RTP hint is stored * @return the number of RTP packets in the written hint */ static int write_hint_packets(ByteIOContext *out, const uint8_t *data, int size, MOVTrack *trk, int64_t *pts) { int64_t curpos; int64_t count_pos, entries_pos; int count = 0, entries; count_pos = url_ftell(out); /* RTPsample header */ put_be16(out, 0); /* packet count */ put_be16(out, 0); /* reserved */ while (size > 4) { uint32_t packet_len = AV_RB32(data); uint16_t seq; uint32_t ts; data += 4; size -= 4; if (packet_len > size || packet_len <= 12) break; if (data[1] >= 200 && data[1] <= 204) { /* RTCP packet, just skip */ data += packet_len; size -= packet_len; continue; } if (packet_len > trk->max_packet_size) trk->max_packet_size = packet_len; seq = AV_RB16(&data[2]); ts = AV_RB32(&data[4]); if (trk->prev_rtp_ts == 0) trk->prev_rtp_ts = ts; /* Unwrap the 32-bit RTP timestamp that wraps around often * into a not (as often) wrapping 64-bit timestamp. */ trk->cur_rtp_ts_unwrapped += (int32_t) (ts - trk->prev_rtp_ts); trk->prev_rtp_ts = ts; if (*pts == AV_NOPTS_VALUE) *pts = trk->cur_rtp_ts_unwrapped; count++; /* RTPpacket header */ put_be32(out, 0); /* relative_time */ put_buffer(out, data, 2); /* RTP header */ put_be16(out, seq); /* RTPsequenceseed */ put_be16(out, 0); /* reserved + flags */ entries_pos = url_ftell(out); put_be16(out, 0); /* entry count */ data += 12; size -= 12; packet_len -= 12; entries = 0; /* Write one or more constructors describing the payload data */ describe_payload(data, packet_len, out, &entries, &trk->sample_queue); data += packet_len; size -= packet_len; curpos = url_ftell(out); url_fseek(out, entries_pos, SEEK_SET); put_be16(out, entries); url_fseek(out, curpos, SEEK_SET); } curpos = url_ftell(out); url_fseek(out, count_pos, SEEK_SET); put_be16(out, count); url_fseek(out, curpos, SEEK_SET); return count; } int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt, int track_index, int sample) { MOVMuxContext *mov = s->priv_data; MOVTrack *trk = &mov->tracks[track_index]; AVFormatContext *rtp_ctx = trk->rtp_ctx; uint8_t *buf = NULL; int size; ByteIOContext *hintbuf = NULL; AVPacket hint_pkt; AVPacket local_pkt; int ret = 0, count; if (!rtp_ctx) return AVERROR(ENOENT); if (!rtp_ctx->pb) return AVERROR(ENOMEM); sample_queue_push(&trk->sample_queue, pkt, sample); /* Feed the packet to the RTP muxer */ local_pkt = *pkt; local_pkt.stream_index = 0; local_pkt.pts = av_rescale_q(pkt->pts, s->streams[pkt->stream_index]->time_base, rtp_ctx->streams[0]->time_base); local_pkt.dts = av_rescale_q(pkt->dts, s->streams[pkt->stream_index]->time_base, rtp_ctx->streams[0]->time_base); av_write_frame(rtp_ctx, &local_pkt); /* Fetch the output from the RTP muxer, open a new output buffer * for next time. */ size = url_close_dyn_buf(rtp_ctx->pb, &buf); if ((ret = url_open_dyn_packet_buf(&rtp_ctx->pb, RTP_MAX_PACKET_SIZE)) < 0) goto done; if (size <= 0) goto done; /* Open a buffer for writing the hint */ if ((ret = url_open_dyn_buf(&hintbuf)) < 0) goto done; av_init_packet(&hint_pkt); count = write_hint_packets(hintbuf, buf, size, trk, &hint_pkt.dts); av_freep(&buf); /* Write the hint data into the hint track */ hint_pkt.size = size = url_close_dyn_buf(hintbuf, &buf); hint_pkt.data = buf; hint_pkt.pts = hint_pkt.dts; hint_pkt.stream_index = track_index; if (pkt->flags & AV_PKT_FLAG_KEY) hint_pkt.flags |= AV_PKT_FLAG_KEY; if (count > 0) ff_mov_write_packet(s, &hint_pkt); done: av_free(buf); sample_queue_retain(&trk->sample_queue); return ret; } void ff_mov_close_hinting(MOVTrack *track) { AVFormatContext* rtp_ctx = track->rtp_ctx; uint8_t *ptr; av_freep(&track->enc); sample_queue_free(&track->sample_queue); if (!rtp_ctx) return; if (rtp_ctx->pb) { av_write_trailer(rtp_ctx); url_close_dyn_buf(rtp_ctx->pb, &ptr); av_free(ptr); } av_metadata_free(&rtp_ctx->streams[0]->metadata); av_metadata_free(&rtp_ctx->metadata); av_free(rtp_ctx->streams[0]); av_freep(&rtp_ctx); }
123linslouis-android-video-cutter
jni/libavformat/movenchint.c
C
asf20
16,937
/* * General DV muxer/demuxer * Copyright (c) 2003 Roman Shaposhnik * * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth * of DV technical info. * * Raw DV format * Copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_DV_H #define AVFORMAT_DV_H #include "avformat.h" typedef struct DVDemuxContext DVDemuxContext; DVDemuxContext* dv_init_demux(AVFormatContext* s); int dv_get_packet(DVDemuxContext*, AVPacket *); int dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int); void dv_offset_reset(DVDemuxContext *c, int64_t frame_offset); typedef struct DVMuxContext DVMuxContext; DVMuxContext* dv_init_mux(AVFormatContext* s); int dv_assemble_frame(DVMuxContext *c, AVStream*, uint8_t*, int, uint8_t**); void dv_delete_mux(DVMuxContext*); #endif /* AVFORMAT_DV_H */
123linslouis-android-video-cutter
jni/libavformat/dv.h
C
asf20
1,561
/* * Creative Voice File demuxer. * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_VOC_H #define AVFORMAT_VOC_H #include "avformat.h" #include "riff.h" /* for CodecTag */ typedef struct voc_dec_context { int64_t remaining_size; } VocDecContext; typedef enum voc_type { VOC_TYPE_EOF = 0x00, VOC_TYPE_VOICE_DATA = 0x01, VOC_TYPE_VOICE_DATA_CONT = 0x02, VOC_TYPE_SILENCE = 0x03, VOC_TYPE_MARKER = 0x04, VOC_TYPE_ASCII = 0x05, VOC_TYPE_REPETITION_START = 0x06, VOC_TYPE_REPETITION_END = 0x07, VOC_TYPE_EXTENDED = 0x08, VOC_TYPE_NEW_VOICE_DATA = 0x09, } VocType; extern const unsigned char ff_voc_magic[21]; extern const AVCodecTag ff_voc_codec_tags[]; int voc_get_packet(AVFormatContext *s, AVPacket *pkt, AVStream *st, int max_size); #endif /* AVFORMAT_VOC_H */
123linslouis-android-video-cutter
jni/libavformat/voc.h
C
asf20
1,684
/* * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_AVFORMAT_H #define AVFORMAT_AVFORMAT_H #define LIBAVFORMAT_VERSION_MAJOR 52 #define LIBAVFORMAT_VERSION_MINOR 64 #define LIBAVFORMAT_VERSION_MICRO 2 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ LIBAVFORMAT_VERSION_MINOR, \ LIBAVFORMAT_VERSION_MICRO) #define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ LIBAVFORMAT_VERSION_MINOR, \ LIBAVFORMAT_VERSION_MICRO) #define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT #define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) /** * I return the LIBAVFORMAT_VERSION_INT constant. You got * a fucking problem with that, douchebag? */ unsigned avformat_version(void); /** * Returns the libavformat build-time configuration. */ const char *avformat_configuration(void); /** * Returns the libavformat license. */ const char *avformat_license(void); #include <time.h> #include <stdio.h> /* FILE */ #include "libavcodec/avcodec.h" #include "avio.h" struct AVFormatContext; /* * Public Metadata API. * The metadata API allows libavformat to export metadata tags to a client * application using a sequence of key/value pairs. Like all strings in FFmpeg, * metadata must be stored as UTF-8 encoded Unicode. Note that metadata * exported by demuxers isn't checked to be valid UTF-8 in most cases. * Important concepts to keep in mind: * 1. Keys are unique; there can never be 2 tags with the same key. This is * also meant semantically, i.e., a demuxer should not knowingly produce * several keys that are literally different but semantically identical. * E.g., key=Author5, key=Author6. In this example, all authors must be * placed in the same tag. * 2. Metadata is flat, not hierarchical; there are no subtags. If you * want to store, e.g., the email address of the child of producer Alice * and actor Bob, that could have key=alice_and_bobs_childs_email_address. * 3. Several modifiers can be applied to the tag name. This is done by * appending a dash character ('-') and the modifier name in the order * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. * a) language -- a tag whose value is localized for a particular language * is appended with the ISO 639-2/B 3-letter language code. * For example: Author-ger=Michael, Author-eng=Mike * The original/default language is in the unqualified "Author" tag. * A demuxer should set a default if it sets any translated tag. * b) sorting -- a modified version of a tag that should be used for * sorting will have '-sort' appended. E.g. artist="The Beatles", * artist-sort="Beatles, The". * * 4. Tag names are normally exported exactly as stored in the container to * allow lossless remuxing to the same format. For container-independent * handling of metadata, av_metadata_conv() can convert it to ffmpeg generic * format. Follows a list of generic tag names: * * album -- name of the set this work belongs to * album_artist -- main creator of the set/album, if different from artist. * e.g. "Various Artists" for compilation albums. * artist -- main creator of the work * comment -- any additional description of the file. * composer -- who composed the work, if different from artist. * copyright -- name of copyright holder. * date -- date when the work was created, preferably in ISO 8601. * disc -- number of a subset, e.g. disc in a multi-disc collection. * encoder -- name/settings of the software/hardware that produced the file. * encoded_by -- person/group who created the file. * filename -- original name of the file. * genre -- <self-evident>. * language -- main language in which the work is performed, preferably * in ISO 639-2 format. * performer -- artist who performed the work, if different from artist. * E.g for "Also sprach Zarathustra", artist would be "Richard * Strauss" and performer "London Philharmonic Orchestra". * publisher -- name of the label/publisher. * title -- name of the work. * track -- number of this work in the set, can be in form current/total. */ #define AV_METADATA_MATCH_CASE 1 #define AV_METADATA_IGNORE_SUFFIX 2 #define AV_METADATA_DONT_STRDUP_KEY 4 #define AV_METADATA_DONT_STRDUP_VAL 8 #define AV_METADATA_DONT_OVERWRITE 16 ///< Don't overwrite existing tags. typedef struct { char *key; char *value; }AVMetadataTag; typedef struct AVMetadata AVMetadata; typedef struct AVMetadataConv AVMetadataConv; /** * Gets a metadata element with matching key. * @param prev Set to the previous matching element to find the next. * If set to NULL the first matching element is returned. * @param flags Allows case as well as suffix-insensitive comparisons. * @return Found tag or NULL, changing key or value leads to undefined behavior. */ AVMetadataTag * av_metadata_get(AVMetadata *m, const char *key, const AVMetadataTag *prev, int flags); #if LIBAVFORMAT_VERSION_MAJOR == 52 /** * Sets the given tag in m, overwriting an existing tag. * @param key tag key to add to m (will be av_strduped) * @param value tag value to add to m (will be av_strduped) * @return >= 0 on success otherwise an error code <0 * @deprecated Use av_metadata_set2() instead. */ attribute_deprecated int av_metadata_set(AVMetadata **pm, const char *key, const char *value); #endif /** * Sets the given tag in m, overwriting an existing tag. * @param key tag key to add to m (will be av_strduped depending on flags) * @param value tag value to add to m (will be av_strduped depending on flags) * @return >= 0 on success otherwise an error code <0 */ int av_metadata_set2(AVMetadata **pm, const char *key, const char *value, int flags); /** * Converts all the metadata sets from ctx according to the source and * destination conversion tables. If one of the tables is NULL, then * tags are converted to/from ffmpeg generic tag names. * @param d_conv destination tags format conversion table * @param s_conv source tags format conversion table */ void av_metadata_conv(struct AVFormatContext *ctx,const AVMetadataConv *d_conv, const AVMetadataConv *s_conv); /** * Frees all the memory allocated for an AVMetadata struct. */ void av_metadata_free(AVMetadata **m); /* packet functions */ /** * Allocates and reads the payload of a packet and initializes its * fields with default values. * * @param pkt packet * @param size desired payload size * @return >0 (read size) if OK, AVERROR_xxx otherwise */ int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size); /*************************************************/ /* fractional numbers for exact pts handling */ /** * The exact value of the fractional number is: 'val + num / den'. * num is assumed to be 0 <= num < den. */ typedef struct AVFrac { int64_t val, num, den; } AVFrac; /*************************************************/ /* input/output formats */ struct AVCodecTag; /** This structure contains the data a format has to probe a file. */ typedef struct AVProbeData { const char *filename; unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ int buf_size; /**< Size of buf except extra allocated bytes */ } AVProbeData; #define AVPROBE_SCORE_MAX 100 ///< maximum score, half of that is used for file-extension-based detection #define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer typedef struct AVFormatParameters { AVRational time_base; int sample_rate; int channels; int width; int height; enum PixelFormat pix_fmt; int channel; /**< Used to select DV channel. */ const char *standard; /**< TV standard, NTSC, PAL, SECAM */ unsigned int mpeg2ts_raw:1; /**< Force raw MPEG-2 transport stream output, if possible. */ unsigned int mpeg2ts_compute_pcr:1; /**< Compute exact PCR for each transport stream packet (only meaningful if mpeg2ts_raw is TRUE). */ unsigned int initial_pause:1; /**< Do not begin to play the stream immediately (RTSP only). */ unsigned int prealloced_context:1; #if LIBAVFORMAT_VERSION_INT < (53<<16) enum CodecID video_codec_id; enum CodecID audio_codec_id; #endif } AVFormatParameters; //! Demuxer will use url_fopen, no opened file should be provided by the caller. #define AVFMT_NOFILE 0x0001 #define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ #define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ #define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for raw picture data. */ #define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ #define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ #define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ #define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. */ #define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ #define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ typedef struct AVOutputFormat { const char *name; /** * Descriptive name for the format, meant to be more human-readable * than name. You should use the NULL_IF_CONFIG_SMALL() macro * to define it. */ const char *long_name; const char *mime_type; const char *extensions; /**< comma-separated filename extensions */ /** size of private data so that it can be allocated in the wrapper */ int priv_data_size; /* output support */ enum CodecID audio_codec; /**< default audio codec */ enum CodecID video_codec; /**< default video codec */ int (*write_header)(struct AVFormatContext *); int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); int (*write_trailer)(struct AVFormatContext *); /** can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER */ int flags; /** Currently only used to set pixel format if not YUV420P. */ int (*set_parameters)(struct AVFormatContext *, AVFormatParameters *); int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, AVPacket *in, int flush); /** * List of supported codec_id-codec_tag pairs, ordered by "better * choice first". The arrays are all terminated by CODEC_ID_NONE. */ const struct AVCodecTag * const *codec_tag; enum CodecID subtitle_codec; /**< default subtitle codec */ const AVMetadataConv *metadata_conv; /* private fields */ struct AVOutputFormat *next; } AVOutputFormat; typedef struct AVInputFormat { const char *name; /** * Descriptive name for the format, meant to be more human-readable * than name. You should use the NULL_IF_CONFIG_SMALL() macro * to define it. */ const char *long_name; /** Size of private data so that it can be allocated in the wrapper. */ int priv_data_size; /** * Tell if a given file has a chance of being parsed as this format. * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes * big so you do not have to check for that unless you need more. */ int (*read_probe)(AVProbeData *); /** Read the format header and initialize the AVFormatContext structure. Return 0 if OK. 'ap' if non-NULL contains additional parameters. Only used in raw format right now. 'av_new_stream' should be called to create new streams. */ int (*read_header)(struct AVFormatContext *, AVFormatParameters *ap); /** Read one packet and put it in 'pkt'. pts and flags are also set. 'av_new_stream' can be called only if the flag AVFMTCTX_NOHEADER is used. @return 0 on success, < 0 on error. When returning an error, pkt must not have been allocated or must be freed before returning */ int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); /** Close the stream. The AVFormatContext and AVStreams are not freed by this function */ int (*read_close)(struct AVFormatContext *); #if LIBAVFORMAT_VERSION_MAJOR < 53 /** * Seek to a given timestamp relative to the frames in * stream component stream_index. * @param stream_index Must not be -1. * @param flags Selects which direction should be preferred if no exact * match is available. * @return >= 0 on success (but not necessarily the new offset) */ int (*read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags); #endif /** * Gets the next timestamp in stream[stream_index].time_base units. * @return the timestamp or AV_NOPTS_VALUE if an error occurred */ int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, int64_t *pos, int64_t pos_limit); /** Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER. */ int flags; /** If extensions are defined, then no probe is done. You should usually not use extension format guessing because it is not reliable enough */ const char *extensions; /** General purpose read-only value that the format can use. */ int value; /** Starts/resumes playing - only meaningful if using a network-based format (RTSP). */ int (*read_play)(struct AVFormatContext *); /** Pauses playing - only meaningful if using a network-based format (RTSP). */ int (*read_pause)(struct AVFormatContext *); const struct AVCodecTag * const *codec_tag; /** * Seeks to timestamp ts. * Seeking will be done so that the point from which all active streams * can be presented successfully will be closest to ts and within min/max_ts. * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. */ int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); const AVMetadataConv *metadata_conv; /* private fields */ struct AVInputFormat *next; } AVInputFormat; enum AVStreamParseType { AVSTREAM_PARSE_NONE, AVSTREAM_PARSE_FULL, /**< full parsing and repack */ AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ }; typedef struct AVIndexEntry { int64_t pos; int64_t timestamp; #define AVINDEX_KEYFRAME 0x0001 int flags:2; int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ } AVIndexEntry; #define AV_DISPOSITION_DEFAULT 0x0001 #define AV_DISPOSITION_DUB 0x0002 #define AV_DISPOSITION_ORIGINAL 0x0004 #define AV_DISPOSITION_COMMENT 0x0008 #define AV_DISPOSITION_LYRICS 0x0010 #define AV_DISPOSITION_KARAOKE 0x0020 /** * Stream structure. * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. * sizeof(AVStream) must not be used outside libav*. */ typedef struct AVStream { int index; /**< stream index in AVFormatContext */ int id; /**< format-specific stream ID */ AVCodecContext *codec; /**< codec context */ /** * Real base framerate of the stream. * This is the lowest framerate with which all timestamps can be * represented accurately (it is the least common multiple of all * framerates in the stream). Note, this value is just a guess! * For example, if the time base is 1/90000 and all frames have either * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. */ AVRational r_frame_rate; void *priv_data; /* internal data used in av_find_stream_info() */ int64_t first_dts; /** encoding: pts generation when outputting stream */ struct AVFrac pts; /** * This is the fundamental unit of time (in seconds) in terms * of which frame timestamps are represented. For fixed-fps content, * time base should be 1/framerate and timestamp increments should be 1. */ AVRational time_base; int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ /* ffmpeg.c private use */ int stream_copy; /**< If set, just copy stream. */ enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. //FIXME move stuff to a flags field? /** Quality, as it has been removed from AVCodecContext and put in AVVideoFrame. * MN: dunno if that is the right place for it */ float quality; /** * Decoding: pts of the first frame of the stream, in stream time base. * Only set this if you are absolutely 100% sure that the value you set * it to really is the pts of the first frame. * This may be undefined (AV_NOPTS_VALUE). * @note The ASF header does NOT contain a correct start_time the ASF * demuxer must NOT set this. */ int64_t start_time; /** * Decoding: duration of the stream, in stream time base. * If a source file does not specify a duration, but does specify * a bitrate, this value will be estimated from bitrate and file size. */ int64_t duration; #if LIBAVFORMAT_VERSION_INT < (53<<16) char language[4]; /** ISO 639-2/B 3-letter language code (empty string if undefined) */ #endif /* av_read_frame() support */ enum AVStreamParseType need_parsing; struct AVCodecParserContext *parser; int64_t cur_dts; int last_IP_duration; int64_t last_IP_pts; /* av_seek_frame() support */ AVIndexEntry *index_entries; /**< Only used if the format does not support seeking natively. */ int nb_index_entries; unsigned int index_entries_allocated_size; int64_t nb_frames; ///< number of frames in this stream if known or 0 #if LIBAVFORMAT_VERSION_INT < (53<<16) int64_t unused[4+1]; char *filename; /**< source filename of the stream */ #endif int disposition; /**< AV_DISPOSITION_* bit field */ AVProbeData probe_data; #define MAX_REORDER_DELAY 16 int64_t pts_buffer[MAX_REORDER_DELAY+1]; /** * sample aspect ratio (0 if unknown) * - encoding: Set by user. * - decoding: Set by libavformat. */ AVRational sample_aspect_ratio; AVMetadata *metadata; /* av_read_frame() support */ const uint8_t *cur_ptr; int cur_len; AVPacket cur_pkt; // Timestamp generation support: /** * Timestamp corresponding to the last dts sync point. * * Initialized when AVCodecParserContext.dts_sync_point >= 0 and * a DTS is received from the underlying container. Otherwise set to * AV_NOPTS_VALUE by default. */ int64_t reference_dts; /** * Number of packets to buffer for codec probing * NOT PART OF PUBLIC API */ #define MAX_PROBE_PACKETS 2500 int probe_packets; /** * last packet in packet_buffer for this stream when muxing. * used internally, NOT PART OF PUBLIC API, dont read or write from outside of libav* */ struct AVPacketList *last_in_packet_buffer; /** * Average framerate */ AVRational avg_frame_rate; /** * Number of frames that have been demuxed during av_find_stream_info() */ int codec_info_nb_frames; } AVStream; #define AV_PROGRAM_RUNNING 1 /** * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. * sizeof(AVProgram) must not be used outside libav*. */ typedef struct AVProgram { int id; #if LIBAVFORMAT_VERSION_INT < (53<<16) char *provider_name; ///< network name for DVB streams char *name; ///< service name for DVB streams #endif int flags; enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller unsigned int *stream_index; unsigned int nb_stream_indexes; AVMetadata *metadata; } AVProgram; #define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present (streams are added dynamically) */ typedef struct AVChapter { int id; ///< unique ID to identify the chapter AVRational time_base; ///< time base in which the start/end timestamps are specified int64_t start, end; ///< chapter start/end time in time_base units #if LIBAVFORMAT_VERSION_INT < (53<<16) char *title; ///< chapter title #endif AVMetadata *metadata; } AVChapter; #if LIBAVFORMAT_VERSION_MAJOR < 53 #define MAX_STREAMS 20 #else #define MAX_STREAMS 100 #endif /** * Format I/O context. * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. * sizeof(AVFormatContext) must not be used outside libav*. */ typedef struct AVFormatContext { const AVClass *av_class; /**< Set by avformat_alloc_context. */ /* Can only be iformat or oformat, not both at the same time. */ struct AVInputFormat *iformat; struct AVOutputFormat *oformat; void *priv_data; ByteIOContext *pb; unsigned int nb_streams; AVStream *streams[MAX_STREAMS]; char filename[1024]; /**< input or output filename */ /* stream info */ int64_t timestamp; #if LIBAVFORMAT_VERSION_INT < (53<<16) char title[512]; char author[512]; char copyright[512]; char comment[512]; char album[512]; int year; /**< ID3 year, 0 if none */ int track; /**< track number, 0 if none */ char genre[32]; /**< ID3 genre */ #endif int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */ /* private data for pts handling (do not modify directly). */ /** This buffer is only needed when packets were already buffered but not decoded, for example to get the codec parameters in MPEG streams. */ struct AVPacketList *packet_buffer; /** Decoding: position of the first frame of the component, in AV_TIME_BASE fractional seconds. NEVER set this value directly: It is deduced from the AVStream values. */ int64_t start_time; /** Decoding: duration of the stream, in AV_TIME_BASE fractional seconds. Only set this value if you know none of the individual stream durations and also dont set any of them. This is deduced from the AVStream values if not set. */ int64_t duration; /** decoding: total file size, 0 if unknown */ int64_t file_size; /** Decoding: total stream bitrate in bit/s, 0 if not available. Never set it directly if the file_size and the duration are known as FFmpeg can compute it automatically. */ int bit_rate; /* av_read_frame() support */ AVStream *cur_st; #if LIBAVFORMAT_VERSION_INT < (53<<16) const uint8_t *cur_ptr_deprecated; int cur_len_deprecated; AVPacket cur_pkt_deprecated; #endif /* av_seek_frame() support */ int64_t data_offset; /** offset of the first packet */ int index_built; int mux_rate; unsigned int packet_size; int preload; int max_delay; #define AVFMT_NOOUTPUTLOOP -1 #define AVFMT_INFINITEOUTPUTLOOP 0 /** number of times to loop output in formats that support it */ int loop_output; int flags; #define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. #define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. #define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. #define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS #define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container #define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled #define AVFMT_FLAG_RTP_HINT 0x0040 ///< Add RTP hinting to the output file int loop_input; /** decoding: size of data to probe; encoding: unused. */ unsigned int probesize; /** * Maximum time (in AV_TIME_BASE units) during which the input should * be analyzed in av_find_stream_info(). */ int max_analyze_duration; const uint8_t *key; int keylen; unsigned int nb_programs; AVProgram **programs; /** * Forced video codec_id. * Demuxing: Set by user. */ enum CodecID video_codec_id; /** * Forced audio codec_id. * Demuxing: Set by user. */ enum CodecID audio_codec_id; /** * Forced subtitle codec_id. * Demuxing: Set by user. */ enum CodecID subtitle_codec_id; /** * Maximum amount of memory in bytes to use for the index of each stream. * If the index exceeds this size, entries will be discarded as * needed to maintain a smaller size. This can lead to slower or less * accurate seeking (depends on demuxer). * Demuxers for which a full in-memory index is mandatory will ignore * this. * muxing : unused * demuxing: set by user */ unsigned int max_index_size; /** * Maximum amount of memory in bytes to use for buffering frames * obtained from realtime capture devices. */ unsigned int max_picture_buffer; unsigned int nb_chapters; AVChapter **chapters; /** * Flags to enable debugging. */ int debug; #define FF_FDEBUG_TS 0x0001 /** * Raw packets from the demuxer, prior to parsing and decoding. * This buffer is used for buffering packets until the codec can * be identified, as parsing cannot be done without knowing the * codec. */ struct AVPacketList *raw_packet_buffer; struct AVPacketList *raw_packet_buffer_end; struct AVPacketList *packet_buffer_end; AVMetadata *metadata; /** * Remaining size available for raw_packet_buffer, in bytes. * NOT PART OF PUBLIC API */ #define RAW_PACKET_BUFFER_SIZE 2500000 int raw_packet_buffer_remaining_size; /** * Start time of the stream in real world time, in microseconds * since the unix epoch (00:00 1st January 1970). That is, pts=0 * in the stream was captured at this real world time. * - encoding: Set by user. * - decoding: Unused. */ int64_t start_time_realtime; } AVFormatContext; typedef struct AVPacketList { AVPacket pkt; struct AVPacketList *next; } AVPacketList; #if LIBAVFORMAT_VERSION_INT < (53<<16) extern AVInputFormat *first_iformat; extern AVOutputFormat *first_oformat; #endif /** * If f is NULL, returns the first registered input format, * if f is non-NULL, returns the next registered input format after f * or NULL if f is the last one. */ AVInputFormat *av_iformat_next(AVInputFormat *f); /** * If f is NULL, returns the first registered output format, * if f is non-NULL, returns the next registered output format after f * or NULL if f is the last one. */ AVOutputFormat *av_oformat_next(AVOutputFormat *f); enum CodecID av_guess_image2_codec(const char *filename); /* XXX: Use automatic init with either ELF sections or C file parser */ /* modules. */ /* utils.c */ void av_register_input_format(AVInputFormat *format); void av_register_output_format(AVOutputFormat *format); #if LIBAVFORMAT_VERSION_MAJOR < 53 attribute_deprecated AVOutputFormat *guess_stream_format(const char *short_name, const char *filename, const char *mime_type); /** * @deprecated Use av_guess_format() instead. */ attribute_deprecated AVOutputFormat *guess_format(const char *short_name, const char *filename, const char *mime_type); #endif /** * Returns the output format in the list of registered output formats * which best matches the provided parameters, or returns NULL if * there is no match. * * @param short_name if non-NULL checks if short_name matches with the * names of the registered formats * @param filename if non-NULL checks if filename terminates with the * extensions of the registered formats * @param mime_type if non-NULL checks if mime_type matches with the * MIME type of the registered formats */ AVOutputFormat *av_guess_format(const char *short_name, const char *filename, const char *mime_type); /** * Guesses the codec ID based upon muxer and filename. */ enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, const char *filename, const char *mime_type, enum AVMediaType type); /** * Sends a nice hexadecimal dump of a buffer to the specified file stream. * * @param f The file stream pointer where the dump should be sent to. * @param buf buffer * @param size buffer size * * @see av_hex_dump_log, av_pkt_dump, av_pkt_dump_log */ void av_hex_dump(FILE *f, uint8_t *buf, int size); /** * Sends a nice hexadecimal dump of a buffer to the log. * * @param avcl A pointer to an arbitrary struct of which the first field is a * pointer to an AVClass struct. * @param level The importance level of the message, lower values signifying * higher importance. * @param buf buffer * @param size buffer size * * @see av_hex_dump, av_pkt_dump, av_pkt_dump_log */ void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size); /** * Sends a nice dump of a packet to the specified file stream. * * @param f The file stream pointer where the dump should be sent to. * @param pkt packet to dump * @param dump_payload True if the payload must be displayed, too. */ void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload); /** * Sends a nice dump of a packet to the log. * * @param avcl A pointer to an arbitrary struct of which the first field is a * pointer to an AVClass struct. * @param level The importance level of the message, lower values signifying * higher importance. * @param pkt packet to dump * @param dump_payload True if the payload must be displayed, too. */ void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload); /** * Initializes libavformat and registers all the muxers, demuxers and * protocols. If you do not call this function, then you can select * exactly which formats you want to support. * * @see av_register_input_format() * @see av_register_output_format() * @see av_register_protocol() */ void av_register_all(void); /** * Gets the CodecID for the given codec tag tag. * If no codec id is found returns CODEC_ID_NONE. * * @param tags list of supported codec_id-codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag */ enum CodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); /** * Gets the codec tag for the given codec id id. * If no codec tag is found returns 0. * * @param tags list of supported codec_id-codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag */ unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum CodecID id); /* media file input */ /** * Finds AVInputFormat based on the short name of the input format. */ AVInputFormat *av_find_input_format(const char *short_name); /** * Guesses the file format. * * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. */ AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); /** * Guesses the file format. * * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. * @param score_max A probe score larger that this is required to accept a * detection, the variable is set to the actual detection * score afterwards. * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended * to retry with a larger probe buffer. */ AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); /** * Allocates all the structures needed to read an input stream. * This does not open the needed codecs for decoding the stream[s]. */ int av_open_input_stream(AVFormatContext **ic_ptr, ByteIOContext *pb, const char *filename, AVInputFormat *fmt, AVFormatParameters *ap); /** * Opens a media file as input. The codecs are not opened. Only the file * header (if present) is read. * * @param ic_ptr The opened media file handle is put here. * @param filename filename to open * @param fmt If non-NULL, force the file format to use. * @param buf_size optional buffer size (zero if default is OK) * @param ap Additional parameters needed when opening the file * (NULL if default). * @return 0 if OK, AVERROR_xxx otherwise */ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, AVInputFormat *fmt, int buf_size, AVFormatParameters *ap); #if LIBAVFORMAT_VERSION_MAJOR < 53 /** * @deprecated Use avformat_alloc_context() instead. */ attribute_deprecated AVFormatContext *av_alloc_format_context(void); #endif /** * Allocates an AVFormatContext. * Can be freed with av_free() but do not forget to free everything you * explicitly allocated as well! */ AVFormatContext *avformat_alloc_context(void); /** * Reads packets of a media file to get stream information. This * is useful for file formats with no headers such as MPEG. This * function also computes the real framerate in case of MPEG-2 repeat * frame mode. * The logical file position is not changed by this function; * examined packets may be buffered for later processing. * * @param ic media file handle * @return >=0 if OK, AVERROR_xxx on error * @todo Let the user decide somehow what information is needed so that * we do not waste time getting stuff the user does not need. */ int av_find_stream_info(AVFormatContext *ic); /** * Reads a transport packet from a media file. * * This function is obsolete and should never be used. * Use av_read_frame() instead. * * @param s media file handle * @param pkt is filled * @return 0 if OK, AVERROR_xxx on error */ int av_read_packet(AVFormatContext *s, AVPacket *pkt); /** * Returns the next frame of a stream. * * The returned packet is valid * until the next av_read_frame() or until av_close_input_file() and * must be freed with av_free_packet. For video, the packet contains * exactly one frame. For audio, it contains an integer number of * frames if each frame has a known fixed size (e.g. PCM or ADPCM * data). If the audio frames have a variable size (e.g. MPEG audio), * then it contains one frame. * * pkt->pts, pkt->dts and pkt->duration are always set to correct * values in AVStream.time_base units (and guessed if the format cannot * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format * has B-frames, so it is better to rely on pkt->dts if you do not * decompress the payload. * * @return 0 if OK, < 0 on error or end of file */ int av_read_frame(AVFormatContext *s, AVPacket *pkt); /** * Seeks to the keyframe at timestamp. * 'timestamp' in 'stream_index'. * @param stream_index If stream_index is (-1), a default * stream is selected, and timestamp is automatically converted * from AV_TIME_BASE units to the stream specific time_base. * @param timestamp Timestamp in AVStream.time_base units * or, if no stream is specified, in AV_TIME_BASE units. * @param flags flags which select direction and seeking mode * @return >= 0 on success */ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags); /** * Seeks to timestamp ts. * Seeking will be done so that the point from which all active streams * can be presented successfully will be closest to ts and within min/max_ts. * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. * * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and * are the file position (this may not be supported by all demuxers). * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames * in the stream with stream_index (this may not be supported by all demuxers). * Otherwise all timestamps are in units of the stream selected by stream_index * or if stream_index is -1, in AV_TIME_BASE units. * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as * keyframes (this may not be supported by all demuxers). * * @param stream_index index of the stream which is used as time base reference * @param min_ts smallest acceptable timestamp * @param ts target timestamp * @param max_ts largest acceptable timestamp * @param flags flags * @return >=0 on success, error code otherwise * * @NOTE This is part of the new seek API which is still under construction. * Thus do not use this yet. It may change at any time, do not expect * ABI compatibility yet! */ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); /** * Starts playing a network-based stream (e.g. RTSP stream) at the * current position. */ int av_read_play(AVFormatContext *s); /** * Pauses a network-based stream (e.g. RTSP stream). * * Use av_read_play() to resume it. */ int av_read_pause(AVFormatContext *s); /** * Frees a AVFormatContext allocated by av_open_input_stream. * @param s context to free */ void av_close_input_stream(AVFormatContext *s); /** * Closes a media file (but not its codecs). * * @param s media file handle */ void av_close_input_file(AVFormatContext *s); /** * Adds a new stream to a media file. * * Can only be called in the read_header() function. If the flag * AVFMTCTX_NOHEADER is in the format context, then new streams * can be added in read_packet too. * * @param s media file handle * @param id file-format-dependent stream ID */ AVStream *av_new_stream(AVFormatContext *s, int id); AVProgram *av_new_program(AVFormatContext *s, int id); /** * Adds a new chapter. * This function is NOT part of the public API * and should ONLY be used by demuxers. * * @param s media file handle * @param id unique ID for this chapter * @param start chapter start time in time_base units * @param end chapter end time in time_base units * @param title chapter title * * @return AVChapter or NULL on error */ AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title); /** * Sets the pts for a given stream. * * @param s stream * @param pts_wrap_bits number of bits effectively used by the pts * (used for wrap control, 33 is the value for MPEG) * @param pts_num numerator to convert to seconds (MPEG: 1) * @param pts_den denominator to convert to seconds (MPEG: 90000) */ void av_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den); #define AVSEEK_FLAG_BACKWARD 1 ///< seek backward #define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes #define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes #define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number int av_find_default_stream_index(AVFormatContext *s); /** * Gets the index for a specific timestamp. * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond * to the timestamp which is <= the requested one, if backward * is 0, then it will be >= * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise * @return < 0 if no such timestamp could be found */ int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); /** * Ensures the index uses less memory than the maximum specified in * AVFormatContext.max_index_size by discarding entries if it grows * too large. * This function is not part of the public API and should only be called * by demuxers. */ void ff_reduce_index(AVFormatContext *s, int stream_index); /** * Adds an index entry into a sorted list. Updates the entry if the list * already contains it. * * @param timestamp timestamp in the time base of the given stream */ int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int size, int distance, int flags); /** * Does a binary search using av_index_search_timestamp() and * AVCodec.read_timestamp(). * This is not supposed to be called directly by a user application, * but by demuxers. * @param target_ts target timestamp in the time base of the given stream * @param stream_index stream number */ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags); /** * Updates cur_dts of all streams based on the given timestamp and AVStream. * * Stream ref_st unchanged, others set cur_dts in their native time base. * Only needed for timestamp wrapping or if (dts not set and pts!=dts). * @param timestamp new dts expressed in time_base of param ref_st * @param ref_st reference stream giving time_base of param timestamp */ void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp); /** * Does a binary search using read_timestamp(). * This is not supposed to be called directly by a user application, * but by demuxers. * @param target_ts target timestamp in the time base of the given stream * @param stream_index stream number */ int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); /** media file output */ int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap); /** * Allocates the stream private data and writes the stream header to an * output media file. * * @param s media file handle * @return 0 if OK, AVERROR_xxx on error */ int av_write_header(AVFormatContext *s); /** * Writes a packet to an output media file. * * The packet shall contain one audio or video frame. * The packet must be correctly interleaved according to the container * specification, if not then av_interleaved_write_frame must be used. * * @param s media file handle * @param pkt The packet, which contains the stream_index, buf/buf_size, dts/pts, ... * @return < 0 on error, = 0 if OK, 1 if end of stream wanted */ int av_write_frame(AVFormatContext *s, AVPacket *pkt); /** * Writes a packet to an output media file ensuring correct interleaving. * * The packet must contain one audio or video frame. * If the packets are already correctly interleaved, the application should * call av_write_frame() instead as it is slightly faster. It is also important * to keep in mind that completely non-interleaved input will need huge amounts * of memory to interleave with this, so it is preferable to interleave at the * demuxer level. * * @param s media file handle * @param pkt The packet, which contains the stream_index, buf/buf_size, dts/pts, ... * @return < 0 on error, = 0 if OK, 1 if end of stream wanted */ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); /** * Interleaves a packet per dts in an output media file. * * Packets with pkt->destruct == av_destruct_packet will be freed inside this * function, so they cannot be used after it. Note that calling av_free_packet() * on them is still safe. * * @param s media file handle * @param out the interleaved packet will be output here * @param in the input packet * @param flush 1 if no further packets are available as input and all * remaining packets should be output * @return 1 if a packet was output, 0 if no packet could be output, * < 0 if an error occurred */ int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush); /** * Writes the stream trailer to an output media file and frees the * file private data. * * May only be called after a successful call to av_write_header. * * @param s media file handle * @return 0 if OK, AVERROR_xxx on error */ int av_write_trailer(AVFormatContext *s); void dump_format(AVFormatContext *ic, int index, const char *url, int is_output); #if LIBAVFORMAT_VERSION_MAJOR < 53 /** * Parses width and height out of string str. * @deprecated Use av_parse_video_frame_size instead. */ attribute_deprecated int parse_image_size(int *width_ptr, int *height_ptr, const char *str); /** * Converts framerate from a string to a fraction. * @deprecated Use av_parse_video_frame_rate instead. */ attribute_deprecated int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg); #endif /** * Parses datestr and returns a corresponding number of microseconds. * @param datestr String representing a date or a duration. * - If a date the syntax is: * @code * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]} * @endcode * Time is local time unless Z is appended, in which case it is * interpreted as UTC. * If the year-month-day part is not specified it takes the current * year-month-day. * Returns the number of microseconds since 1st of January, 1970 up to * the time of the parsed date or INT64_MIN if datestr cannot be * successfully parsed. * - If a duration the syntax is: * @code * [-]HH[:MM[:SS[.m...]]] * [-]S+[.m...] * @endcode * Returns the number of microseconds contained in a time interval * with the specified duration or INT64_MIN if datestr cannot be * successfully parsed. * @param duration Flag which tells how to interpret datestr, if * not zero datestr is interpreted as a duration, otherwise as a * date. */ int64_t parse_date(const char *datestr, int duration); /** Gets the current time in microseconds. */ int64_t av_gettime(void); /* ffm-specific for ffserver */ #define FFM_PACKET_SIZE 4096 int64_t ffm_read_write_index(int fd); int ffm_write_write_index(int fd, int64_t pos); void ffm_set_write_index(AVFormatContext *s, int64_t pos, int64_t file_size); /** * Attempts to find a specific tag in a URL. * * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. * Return 1 if found. */ int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); /** * Returns in 'buf' the path with '%d' replaced by a number. * * Also handles the '%0nd' format where 'n' is the total number * of digits and '%%'. * * @param buf destination buffer * @param buf_size destination buffer size * @param path numbered sequence string * @param number frame number * @return 0 if OK, -1 on format error */ int av_get_frame_filename(char *buf, int buf_size, const char *path, int number); /** * Checks whether filename actually is a numbered sequence generator. * * @param filename possible numbered sequence string * @return 1 if a valid numbered sequence string, 0 otherwise */ int av_filename_number_test(const char *filename); /** * Generates an SDP for an RTP session. * * @param ac array of AVFormatContexts describing the RTP streams. If the * array is composed by only one context, such context can contain * multiple AVStreams (one AVStream per RTP stream). Otherwise, * all the contexts in the array (an AVCodecContext per RTP stream) * must contain only one AVStream. * @param n_files number of AVCodecContexts contained in ac * @param buff buffer where the SDP will be stored (must be allocated by * the caller) * @param size the size of the buffer * @return 0 if OK, AVERROR_xxx on error */ int avf_sdp_create(AVFormatContext *ac[], int n_files, char *buff, int size); /** * Returns a positive value if the given filename has one of the given * extensions, 0 otherwise. * * @param extensions a comma-separated list of filename extensions */ int av_match_ext(const char *filename, const char *extensions); #endif /* AVFORMAT_AVFORMAT_H */
123linslouis-android-video-cutter
jni/libavformat/avformat.h
C
asf20
50,662
/* * HTTP authentication * Copyright (c) 2010 Martin Storsjo * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "httpauth.h" #include "libavutil/base64.h" #include "libavutil/avstring.h" #include "internal.h" #include "libavutil/random_seed.h" #include "libavutil/md5.h" #include "avformat.h" #include <ctype.h> static void parse_key_value(const char *params, void (*callback_get_buf)(HTTPAuthState *state, const char *key, int key_len, char **dest, int *dest_len), HTTPAuthState *state) { const char *ptr = params; /* Parse key=value pairs. */ for (;;) { const char *key; char *dest = NULL, *dest_end; int key_len, dest_len = 0; /* Skip whitespace and potential commas. */ while (*ptr && (isspace(*ptr) || *ptr == ',')) ptr++; if (!*ptr) break; key = ptr; if (!(ptr = strchr(key, '='))) break; ptr++; key_len = ptr - key; callback_get_buf(state, key, key_len, &dest, &dest_len); dest_end = dest + dest_len - 1; if (*ptr == '\"') { ptr++; while (*ptr && *ptr != '\"') { if (*ptr == '\\') { if (!ptr[1]) break; if (dest && dest < dest_end) *dest++ = ptr[1]; ptr += 2; } else { if (dest && dest < dest_end) *dest++ = *ptr; ptr++; } } if (*ptr == '\"') ptr++; } else { for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++) if (dest && dest < dest_end) *dest++ = *ptr; } if (dest) *dest = 0; } } static void handle_basic_params(HTTPAuthState *state, const char *key, int key_len, char **dest, int *dest_len) { if (!strncmp(key, "realm=", key_len)) { *dest = state->realm; *dest_len = sizeof(state->realm); } } static void handle_digest_params(HTTPAuthState *state, const char *key, int key_len, char **dest, int *dest_len) { DigestParams *digest = &state->digest_params; if (!strncmp(key, "realm=", key_len)) { *dest = state->realm; *dest_len = sizeof(state->realm); } else if (!strncmp(key, "nonce=", key_len)) { *dest = digest->nonce; *dest_len = sizeof(digest->nonce); } else if (!strncmp(key, "opaque=", key_len)) { *dest = digest->opaque; *dest_len = sizeof(digest->opaque); } else if (!strncmp(key, "algorithm=", key_len)) { *dest = digest->algorithm; *dest_len = sizeof(digest->algorithm); } else if (!strncmp(key, "qop=", key_len)) { *dest = digest->qop; *dest_len = sizeof(digest->qop); } } static void handle_digest_update(HTTPAuthState *state, const char *key, int key_len, char **dest, int *dest_len) { DigestParams *digest = &state->digest_params; if (!strncmp(key, "nextnonce=", key_len)) { *dest = digest->nonce; *dest_len = sizeof(digest->nonce); } } static void choose_qop(char *qop, int size) { char *ptr = strstr(qop, "auth"); char *end = ptr + strlen("auth"); if (ptr && (!*end || isspace(*end) || *end == ',') && (ptr == qop || isspace(ptr[-1]) || ptr[-1] == ',')) { av_strlcpy(qop, "auth", size); } else { qop[0] = 0; } } void ff_http_auth_handle_header(HTTPAuthState *state, const char *key, const char *value) { if (!strcmp(key, "WWW-Authenticate")) { const char *p; if (av_stristart(value, "Basic ", &p) && state->auth_type <= HTTP_AUTH_BASIC) { state->auth_type = HTTP_AUTH_BASIC; state->realm[0] = 0; parse_key_value(p, handle_basic_params, state); } else if (av_stristart(value, "Digest ", &p) && state->auth_type <= HTTP_AUTH_DIGEST) { state->auth_type = HTTP_AUTH_DIGEST; memset(&state->digest_params, 0, sizeof(DigestParams)); state->realm[0] = 0; parse_key_value(p, handle_digest_params, state); choose_qop(state->digest_params.qop, sizeof(state->digest_params.qop)); } } else if (!strcmp(key, "Authentication-Info")) { parse_key_value(value, handle_digest_update, state); } } static void update_md5_strings(struct AVMD5 *md5ctx, ...) { va_list vl; va_start(vl, md5ctx); while (1) { const char* str = va_arg(vl, const char*); if (!str) break; av_md5_update(md5ctx, str, strlen(str)); } va_end(vl); } /* Generate a digest reply, according to RFC 2617. */ static char *make_digest_auth(HTTPAuthState *state, const char *username, const char *password, const char *uri, const char *method) { DigestParams *digest = &state->digest_params; int len; uint32_t cnonce_buf[2]; char cnonce[17]; char nc[9]; int i; char A1hash[33], A2hash[33], response[33]; struct AVMD5 *md5ctx; uint8_t hash[16]; char *authstr; digest->nc++; snprintf(nc, sizeof(nc), "%08x", digest->nc); /* Generate a client nonce. */ for (i = 0; i < 2; i++) cnonce_buf[i] = ff_random_get_seed(); ff_data_to_hex(cnonce, (const uint8_t*) cnonce_buf, sizeof(cnonce_buf), 1); cnonce[2*sizeof(cnonce_buf)] = 0; md5ctx = av_malloc(av_md5_size); if (!md5ctx) return NULL; av_md5_init(md5ctx); update_md5_strings(md5ctx, username, ":", state->realm, ":", password, NULL); av_md5_final(md5ctx, hash); ff_data_to_hex(A1hash, hash, 16, 1); A1hash[32] = 0; if (!strcmp(digest->algorithm, "") || !strcmp(digest->algorithm, "MD5")) { } else if (!strcmp(digest->algorithm, "MD5-sess")) { av_md5_init(md5ctx); update_md5_strings(md5ctx, A1hash, ":", digest->nonce, ":", cnonce, NULL); av_md5_final(md5ctx, hash); ff_data_to_hex(A1hash, hash, 16, 1); A1hash[32] = 0; } else { /* Unsupported algorithm */ av_free(md5ctx); return NULL; } av_md5_init(md5ctx); update_md5_strings(md5ctx, method, ":", uri, NULL); av_md5_final(md5ctx, hash); ff_data_to_hex(A2hash, hash, 16, 1); A2hash[32] = 0; av_md5_init(md5ctx); update_md5_strings(md5ctx, A1hash, ":", digest->nonce, NULL); if (!strcmp(digest->qop, "auth") || !strcmp(digest->qop, "auth-int")) { update_md5_strings(md5ctx, ":", nc, ":", cnonce, ":", digest->qop, NULL); } update_md5_strings(md5ctx, ":", A2hash, NULL); av_md5_final(md5ctx, hash); ff_data_to_hex(response, hash, 16, 1); response[32] = 0; av_free(md5ctx); if (!strcmp(digest->qop, "") || !strcmp(digest->qop, "auth")) { } else if (!strcmp(digest->qop, "auth-int")) { /* qop=auth-int not supported */ return NULL; } else { /* Unsupported qop value. */ return NULL; } len = strlen(username) + strlen(state->realm) + strlen(digest->nonce) + strlen(uri) + strlen(response) + strlen(digest->algorithm) + strlen(digest->opaque) + strlen(digest->qop) + strlen(cnonce) + strlen(nc) + 150; authstr = av_malloc(len); if (!authstr) return NULL; snprintf(authstr, len, "Authorization: Digest "); /* TODO: Escape the quoted strings properly. */ av_strlcatf(authstr, len, "username=\"%s\"", username); av_strlcatf(authstr, len, ",realm=\"%s\"", state->realm); av_strlcatf(authstr, len, ",nonce=\"%s\"", digest->nonce); av_strlcatf(authstr, len, ",uri=\"%s\"", uri); av_strlcatf(authstr, len, ",response=\"%s\"", response); if (digest->algorithm[0]) av_strlcatf(authstr, len, ",algorithm=%s", digest->algorithm); if (digest->opaque[0]) av_strlcatf(authstr, len, ",opaque=\"%s\"", digest->opaque); if (digest->qop[0]) { av_strlcatf(authstr, len, ",qop=\"%s\"", digest->qop); av_strlcatf(authstr, len, ",cnonce=\"%s\"", cnonce); av_strlcatf(authstr, len, ",nc=%s", nc); } av_strlcatf(authstr, len, "\r\n"); return authstr; } char *ff_http_auth_create_response(HTTPAuthState *state, const char *auth, const char *path, const char *method) { char *authstr = NULL; if (!auth || !strchr(auth, ':')) return NULL; if (state->auth_type == HTTP_AUTH_BASIC) { int auth_b64_len = (strlen(auth) + 2) / 3 * 4 + 1; int len = auth_b64_len + 30; char *ptr; authstr = av_malloc(len); if (!authstr) return NULL; snprintf(authstr, len, "Authorization: Basic "); ptr = authstr + strlen(authstr); av_base64_encode(ptr, auth_b64_len, auth, strlen(auth)); av_strlcat(ptr, "\r\n", len); } else if (state->auth_type == HTTP_AUTH_DIGEST) { char *username = av_strdup(auth), *password; if (!username) return NULL; if ((password = strchr(username, ':'))) { *password++ = 0; authstr = make_digest_auth(state, username, password, path, method); } av_free(username); } return authstr; }
123linslouis-android-video-cutter
jni/libavformat/httpauth.c
C
asf20
10,435
/* * Realmedia RTSP protocol (RDT) support. * Copyright (c) 2007 Ronald S. Bultje * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Realmedia RTSP protocol (RDT) support * @author Ronald S. Bultje <rbultje@ronald.bitfreak.net> */ #include "avformat.h" #include "libavutil/avstring.h" #include "rtpdec.h" #include "rdt.h" #include "libavutil/base64.h" #include "libavutil/md5.h" #include "rm.h" #include "internal.h" #include "libavcodec/get_bits.h" struct RDTDemuxContext { AVFormatContext *ic; /**< the containing (RTSP) demux context */ /** Each RDT stream-set (represented by one RTSPStream) can contain * multiple streams (of the same content, but with possibly different * codecs/bitrates). Each such stream is represented by one AVStream * in the AVFormatContext, and this variable points to the offset in * that array such that the first is the first stream of this set. */ AVStream **streams; int n_streams; /**< streams with identifical content in this set */ void *dynamic_protocol_context; DynamicPayloadPacketHandlerProc parse_packet; uint32_t prev_timestamp; int prev_set_id, prev_stream_id; }; RDTDemuxContext * ff_rdt_parse_open(AVFormatContext *ic, int first_stream_of_set_idx, void *priv_data, RTPDynamicProtocolHandler *handler) { RDTDemuxContext *s = av_mallocz(sizeof(RDTDemuxContext)); if (!s) return NULL; s->ic = ic; s->streams = &ic->streams[first_stream_of_set_idx]; do { s->n_streams++; } while (first_stream_of_set_idx + s->n_streams < ic->nb_streams && s->streams[s->n_streams]->priv_data == s->streams[0]->priv_data); s->prev_set_id = -1; s->prev_stream_id = -1; s->prev_timestamp = -1; s->parse_packet = handler ? handler->parse_packet : NULL; s->dynamic_protocol_context = priv_data; return s; } void ff_rdt_parse_close(RDTDemuxContext *s) { int i; for (i = 1; i < s->n_streams; i++) s->streams[i]->priv_data = NULL; av_free(s); } struct PayloadContext { AVFormatContext *rmctx; RMStream *rmst[MAX_STREAMS]; uint8_t *mlti_data; unsigned int mlti_data_size; char buffer[RTP_MAX_PACKET_LENGTH + FF_INPUT_BUFFER_PADDING_SIZE]; int audio_pkt_cnt; /**< remaining audio packets in rmdec */ }; void ff_rdt_calc_response_and_checksum(char response[41], char chksum[9], const char *challenge) { int ch_len = strlen (challenge), i; unsigned char zres[16], buf[64] = { 0xa1, 0xe9, 0x14, 0x9d, 0x0e, 0x6b, 0x3b, 0x59 }; #define XOR_TABLE_SIZE 37 const unsigned char xor_table[XOR_TABLE_SIZE] = { 0x05, 0x18, 0x74, 0xd0, 0x0d, 0x09, 0x02, 0x53, 0xc0, 0x01, 0x05, 0x05, 0x67, 0x03, 0x19, 0x70, 0x08, 0x27, 0x66, 0x10, 0x10, 0x72, 0x08, 0x09, 0x63, 0x11, 0x03, 0x71, 0x08, 0x08, 0x70, 0x02, 0x10, 0x57, 0x05, 0x18, 0x54 }; /* some (length) checks */ if (ch_len == 40) /* what a hack... */ ch_len = 32; else if (ch_len > 56) ch_len = 56; memcpy(buf + 8, challenge, ch_len); /* xor challenge bytewise with xor_table */ for (i = 0; i < XOR_TABLE_SIZE; i++) buf[8 + i] ^= xor_table[i]; av_md5_sum(zres, buf, 64); ff_data_to_hex(response, zres, 16, 1); /* add tail */ strcpy (response + 32, "01d0a8e3"); /* calculate checksum */ for (i = 0; i < 8; i++) chksum[i] = response[i * 4]; chksum[8] = 0; } static int rdt_load_mdpr (PayloadContext *rdt, AVStream *st, int rule_nr) { ByteIOContext pb; int size; uint32_t tag; /** * Layout of the MLTI chunk: * 4:MLTI * 2:<number of streams> * Then for each stream ([number_of_streams] times): * 2:<mdpr index> * 2:<number of mdpr chunks> * Then for each mdpr chunk ([number_of_mdpr_chunks] times): * 4:<size> * [size]:<data> * we skip MDPR chunks until we reach the one of the stream * we're interested in, and forward that ([size]+[data]) to * the RM demuxer to parse the stream-specific header data. */ if (!rdt->mlti_data) return -1; init_put_byte(&pb, rdt->mlti_data, rdt->mlti_data_size, 0, NULL, NULL, NULL, NULL); tag = get_le32(&pb); if (tag == MKTAG('M', 'L', 'T', 'I')) { int num, chunk_nr; /* read index of MDPR chunk numbers */ num = get_be16(&pb); if (rule_nr < 0 || rule_nr >= num) return -1; url_fskip(&pb, rule_nr * 2); chunk_nr = get_be16(&pb); url_fskip(&pb, (num - 1 - rule_nr) * 2); /* read MDPR chunks */ num = get_be16(&pb); if (chunk_nr >= num) return -1; while (chunk_nr--) url_fskip(&pb, get_be32(&pb)); size = get_be32(&pb); } else { size = rdt->mlti_data_size; url_fseek(&pb, 0, SEEK_SET); } if (ff_rm_read_mdpr_codecdata(rdt->rmctx, &pb, st, rdt->rmst[st->index], size) < 0) return -1; return 0; } /** * Actual data handling. */ int ff_rdt_parse_header(const uint8_t *buf, int len, int *pset_id, int *pseq_no, int *pstream_id, int *pis_keyframe, uint32_t *ptimestamp) { GetBitContext gb; int consumed = 0, set_id, seq_no, stream_id, is_keyframe, len_included, need_reliable; uint32_t timestamp; /* skip status packets */ while (len >= 5 && buf[1] == 0xFF /* status packet */) { int pkt_len; if (!(buf[0] & 0x80)) return -1; /* not followed by a data packet */ pkt_len = AV_RB16(buf+3); buf += pkt_len; len -= pkt_len; consumed += pkt_len; } if (len < 16) return -1; /** * Layout of the header (in bits): * 1: len_included * Flag indicating whether this header includes a length field; * this can be used to concatenate multiple RDT packets in a * single UDP/TCP data frame and is used to precede RDT data * by stream status packets * 1: need_reliable * Flag indicating whether this header includes a "reliable * sequence number"; these are apparently sequence numbers of * data packets alone. For data packets, this flag is always * set, according to the Real documentation [1] * 5: set_id * ID of a set of streams of identical content, possibly with * different codecs or bitrates * 1: is_reliable * Flag set for certain streams deemed less tolerable for packet * loss * 16: seq_no * Packet sequence number; if >=0xFF00, this is a non-data packet * containing stream status info, the second byte indicates the * type of status packet (see wireshark docs / source code [2]) * if (len_included) { * 16: packet_len * } else { * packet_len = remainder of UDP/TCP frame * } * 1: is_back_to_back * Back-to-Back flag; used for timing, set for one in every 10 * packets, according to the Real documentation [1] * 1: is_slow_data * Slow-data flag; currently unused, according to Real docs [1] * 5: stream_id * ID of the stream within this particular set of streams * 1: is_no_keyframe * Non-keyframe flag (unset if packet belongs to a keyframe) * 32: timestamp (PTS) * if (set_id == 0x1F) { * 16: set_id (extended set-of-streams ID; see set_id) * } * if (need_reliable) { * 16: reliable_seq_no * Reliable sequence number (see need_reliable) * } * if (stream_id == 0x3F) { * 16: stream_id (extended stream ID; see stream_id) * } * [1] https://protocol.helixcommunity.org/files/2005/devdocs/RDT_Feature_Level_20.txt * [2] http://www.wireshark.org/docs/dfref/r/rdt.html and * http://anonsvn.wireshark.org/viewvc/trunk/epan/dissectors/packet-rdt.c */ init_get_bits(&gb, buf, len << 3); len_included = get_bits1(&gb); need_reliable = get_bits1(&gb); set_id = get_bits(&gb, 5); skip_bits(&gb, 1); seq_no = get_bits(&gb, 16); if (len_included) skip_bits(&gb, 16); skip_bits(&gb, 2); stream_id = get_bits(&gb, 5); is_keyframe = !get_bits1(&gb); timestamp = get_bits_long(&gb, 32); if (set_id == 0x1f) set_id = get_bits(&gb, 16); if (need_reliable) skip_bits(&gb, 16); if (stream_id == 0x1f) stream_id = get_bits(&gb, 16); if (pset_id) *pset_id = set_id; if (pseq_no) *pseq_no = seq_no; if (pstream_id) *pstream_id = stream_id; if (pis_keyframe) *pis_keyframe = is_keyframe; if (ptimestamp) *ptimestamp = timestamp; return consumed + (get_bits_count(&gb) >> 3); } /**< return 0 on packet, no more left, 1 on packet, 1 on partial packet... */ static int rdt_parse_packet (AVFormatContext *ctx, PayloadContext *rdt, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, int flags) { int seq = 1, res; ByteIOContext pb; if (rdt->audio_pkt_cnt == 0) { int pos; init_put_byte(&pb, buf, len, 0, NULL, NULL, NULL, NULL); flags = (flags & RTP_FLAG_KEY) ? 2 : 0; res = ff_rm_parse_packet (rdt->rmctx, &pb, st, rdt->rmst[st->index], len, pkt, &seq, flags, *timestamp); pos = url_ftell(&pb); if (res < 0) return res; if (res > 0) { if (st->codec->codec_id == CODEC_ID_AAC) { memcpy (rdt->buffer, buf + pos, len - pos); rdt->rmctx->pb = av_alloc_put_byte (rdt->buffer, len - pos, 0, NULL, NULL, NULL, NULL); } goto get_cache; } } else { get_cache: rdt->audio_pkt_cnt = ff_rm_retrieve_cache (rdt->rmctx, rdt->rmctx->pb, st, rdt->rmst[st->index], pkt); if (rdt->audio_pkt_cnt == 0 && st->codec->codec_id == CODEC_ID_AAC) av_freep(&rdt->rmctx->pb); } pkt->stream_index = st->index; pkt->pts = *timestamp; return rdt->audio_pkt_cnt > 0; } int ff_rdt_parse_packet(RDTDemuxContext *s, AVPacket *pkt, const uint8_t *buf, int len) { int seq_no, flags = 0, stream_id, set_id, is_keyframe; uint32_t timestamp; int rv= 0; if (!s->parse_packet) return -1; if (!buf && s->prev_stream_id != -1) { /* return the next packets, if any */ timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned.... rv= s->parse_packet(s->ic, s->dynamic_protocol_context, s->streams[s->prev_stream_id], pkt, &timestamp, NULL, 0, flags); return rv; } if (len < 12) return -1; rv = ff_rdt_parse_header(buf, len, &set_id, &seq_no, &stream_id, &is_keyframe, &timestamp); if (rv < 0) return rv; if (is_keyframe && (set_id != s->prev_set_id || timestamp != s->prev_timestamp || stream_id != s->prev_stream_id)) { flags |= RTP_FLAG_KEY; s->prev_set_id = set_id; s->prev_timestamp = timestamp; } s->prev_stream_id = stream_id; buf += rv; len -= rv; if (s->prev_stream_id >= s->n_streams) { s->prev_stream_id = -1; return -1; } rv = s->parse_packet(s->ic, s->dynamic_protocol_context, s->streams[s->prev_stream_id], pkt, &timestamp, buf, len, flags); return rv; } void ff_rdt_subscribe_rule (char *cmd, int size, int stream_nr, int rule_nr) { av_strlcatf(cmd, size, "stream=%d;rule=%d,stream=%d;rule=%d", stream_nr, rule_nr * 2, stream_nr, rule_nr * 2 + 1); } static unsigned char * rdt_parse_b64buf (unsigned int *target_len, const char *p) { unsigned char *target; int len = strlen(p); if (*p == '\"') { p++; len -= 2; /* skip embracing " at start/end */ } *target_len = len * 3 / 4; target = av_mallocz(*target_len + FF_INPUT_BUFFER_PADDING_SIZE); av_base64_decode(target, p, *target_len); return target; } static int rdt_parse_sdp_line (AVFormatContext *s, int st_index, PayloadContext *rdt, const char *line) { AVStream *stream = s->streams[st_index]; const char *p = line; if (av_strstart(p, "OpaqueData:buffer;", &p)) { rdt->mlti_data = rdt_parse_b64buf(&rdt->mlti_data_size, p); } else if (av_strstart(p, "StartTime:integer;", &p)) stream->first_dts = atoi(p); else if (av_strstart(p, "ASMRuleBook:string;", &p)) { int n, first = -1; for (n = 0; n < s->nb_streams; n++) if (s->streams[n]->priv_data == stream->priv_data) { if (first == -1) first = n; rdt->rmst[s->streams[n]->index] = ff_rm_alloc_rmstream(); rdt_load_mdpr(rdt, s->streams[n], (n - first) * 2); if (s->streams[n]->codec->codec_id == CODEC_ID_AAC) s->streams[n]->codec->frame_size = 1; // FIXME } } return 0; } static void real_parse_asm_rule(AVStream *st, const char *p, const char *end) { do { /* can be either averagebandwidth= or AverageBandwidth= */ if (sscanf(p, " %*1[Aa]verage%*1[Bb]andwidth=%d", &st->codec->bit_rate) == 1) break; if (!(p = strchr(p, ',')) || p > end) p = end; p++; } while (p < end); } static AVStream * add_dstream(AVFormatContext *s, AVStream *orig_st) { AVStream *st; if (!(st = av_new_stream(s, 0))) return NULL; st->codec->codec_type = orig_st->codec->codec_type; st->priv_data = orig_st->priv_data; st->first_dts = orig_st->first_dts; return st; } static void real_parse_asm_rulebook(AVFormatContext *s, AVStream *orig_st, const char *p) { const char *end; int n_rules, odd = 0; AVStream *st; /** * The ASMRuleBook contains a list of comma-separated strings per rule, * and each rule is separated by a ;. The last one also has a ; at the * end so we can use it as delimiter. * Every rule occurs twice, once for when the RTSP packet header marker * is set and once for if it isn't. We only read the first because we * don't care much (that's what the "odd" variable is for). * Each rule contains a set of one or more statements, optionally * preceeded by a single condition. If there's a condition, the rule * starts with a '#'. Multiple conditions are merged between brackets, * so there are never multiple conditions spread out over separate * statements. Generally, these conditions are bitrate limits (min/max) * for multi-bitrate streams. */ if (*p == '\"') p++; for (n_rules = 0; s->nb_streams < MAX_STREAMS;) { if (!(end = strchr(p, ';'))) break; if (!odd && end != p) { if (n_rules > 0) st = add_dstream(s, orig_st); else st = orig_st; real_parse_asm_rule(st, p, end); n_rules++; } p = end + 1; odd ^= 1; } } void ff_real_parse_sdp_a_line (AVFormatContext *s, int stream_index, const char *line) { const char *p = line; if (av_strstart(p, "ASMRuleBook:string;", &p)) real_parse_asm_rulebook(s, s->streams[stream_index], p); } static PayloadContext * rdt_new_context (void) { PayloadContext *rdt = av_mallocz(sizeof(PayloadContext)); av_open_input_stream(&rdt->rmctx, NULL, "", &rdt_demuxer, NULL); return rdt; } static void rdt_free_context (PayloadContext *rdt) { int i; for (i = 0; i < MAX_STREAMS; i++) if (rdt->rmst[i]) { ff_rm_free_rmstream(rdt->rmst[i]); av_freep(&rdt->rmst[i]); } if (rdt->rmctx) av_close_input_stream(rdt->rmctx); av_freep(&rdt->mlti_data); av_free(rdt); } #define RDT_HANDLER(n, s, t) \ static RTPDynamicProtocolHandler ff_rdt_ ## n ## _handler = { \ .enc_name = s, \ .codec_type = t, \ .codec_id = CODEC_ID_NONE, \ .parse_sdp_a_line = rdt_parse_sdp_line, \ .open = rdt_new_context, \ .close = rdt_free_context, \ .parse_packet = rdt_parse_packet \ }; RDT_HANDLER(live_video, "x-pn-multirate-realvideo-live", AVMEDIA_TYPE_VIDEO); RDT_HANDLER(live_audio, "x-pn-multirate-realaudio-live", AVMEDIA_TYPE_AUDIO); RDT_HANDLER(video, "x-pn-realvideo", AVMEDIA_TYPE_VIDEO); RDT_HANDLER(audio, "x-pn-realaudio", AVMEDIA_TYPE_AUDIO); void av_register_rdt_dynamic_payload_handlers(void) { ff_register_dynamic_payload_handler(&ff_rdt_video_handler); ff_register_dynamic_payload_handler(&ff_rdt_audio_handler); ff_register_dynamic_payload_handler(&ff_rdt_live_video_handler); ff_register_dynamic_payload_handler(&ff_rdt_live_audio_handler); }
123linslouis-android-video-cutter
jni/libavformat/rdt.c
C
asf20
18,122
/* * Westwood Studios Multimedia Formats Demuxer (VQA, AUD) * Copyright (c) 2003 The ffmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Westwood Studios VQA & AUD file demuxers * by Mike Melanson (melanson@pcisys.net) * for more information on the Westwood file formats, visit: * http://www.pcisys.net/~melanson/codecs/ * http://www.geocities.com/SiliconValley/8682/aud3.txt * * Implementation note: There is no definite file signature for AUD files. * The demuxer uses a probabilistic strategy for content detection. This * entails performing sanity checks on certain header values in order to * qualify a file. Refer to wsaud_probe() for the precise parameters. */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define AUD_HEADER_SIZE 12 #define AUD_CHUNK_PREAMBLE_SIZE 8 #define AUD_CHUNK_SIGNATURE 0x0000DEAF #define FORM_TAG MKBETAG('F', 'O', 'R', 'M') #define WVQA_TAG MKBETAG('W', 'V', 'Q', 'A') #define VQHD_TAG MKBETAG('V', 'Q', 'H', 'D') #define FINF_TAG MKBETAG('F', 'I', 'N', 'F') #define SND0_TAG MKBETAG('S', 'N', 'D', '0') #define SND1_TAG MKBETAG('S', 'N', 'D', '1') #define SND2_TAG MKBETAG('S', 'N', 'D', '2') #define VQFR_TAG MKBETAG('V', 'Q', 'F', 'R') /* don't know what these tags are for, but acknowledge their existence */ #define CINF_TAG MKBETAG('C', 'I', 'N', 'F') #define CINH_TAG MKBETAG('C', 'I', 'N', 'H') #define CIND_TAG MKBETAG('C', 'I', 'N', 'D') #define PINF_TAG MKBETAG('P', 'I', 'N', 'F') #define PINH_TAG MKBETAG('P', 'I', 'N', 'H') #define PIND_TAG MKBETAG('P', 'I', 'N', 'D') #define CMDS_TAG MKBETAG('C', 'M', 'D', 'S') #define VQA_HEADER_SIZE 0x2A #define VQA_FRAMERATE 15 #define VQA_PREAMBLE_SIZE 8 typedef struct WsAudDemuxContext { int audio_samplerate; int audio_channels; int audio_bits; enum CodecID audio_type; int audio_stream_index; int64_t audio_frame_counter; } WsAudDemuxContext; typedef struct WsVqaDemuxContext { int audio_samplerate; int audio_channels; int audio_bits; int audio_stream_index; int video_stream_index; int64_t audio_frame_counter; } WsVqaDemuxContext; static int wsaud_probe(AVProbeData *p) { int field; /* Probabilistic content detection strategy: There is no file signature * so perform sanity checks on various header parameters: * 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers * flags <= 0x03 (2 LSBs are used) ==> 4 acceptable numbers * compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers * first audio chunk signature (32 bits) ==> 1 acceptable number * The number space contains 2^64 numbers. There are 40001 * 4 * 2 * 1 = * 320008 acceptable number combinations. */ if (p->buf_size < AUD_HEADER_SIZE + AUD_CHUNK_PREAMBLE_SIZE) return 0; /* check sample rate */ field = AV_RL16(&p->buf[0]); if ((field < 8000) || (field > 48000)) return 0; /* enforce the rule that the top 6 bits of this flags field are reserved (0); * this might not be true, but enforce it until deemed unnecessary */ if (p->buf[10] & 0xFC) return 0; /* note: only check for WS IMA (type 99) right now since there is no * support for type 1 */ if (p->buf[11] != 99) return 0; /* read ahead to the first audio chunk and validate the first header signature */ if (AV_RL32(&p->buf[16]) != AUD_CHUNK_SIGNATURE) return 0; /* return 1/2 certainty since this file check is a little sketchy */ return AVPROBE_SCORE_MAX / 2; } static int wsaud_read_header(AVFormatContext *s, AVFormatParameters *ap) { WsAudDemuxContext *wsaud = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st; unsigned char header[AUD_HEADER_SIZE]; if (get_buffer(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE) return AVERROR(EIO); wsaud->audio_samplerate = AV_RL16(&header[0]); if (header[11] == 99) wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS; else return AVERROR_INVALIDDATA; /* flag 0 indicates stereo */ wsaud->audio_channels = (header[10] & 0x1) + 1; /* flag 1 indicates 16 bit audio */ wsaud->audio_bits = (((header[10] & 0x2) >> 1) + 1) * 8; /* initialize the audio decoder stream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, wsaud->audio_samplerate); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = wsaud->audio_type; st->codec->codec_tag = 0; /* no tag */ st->codec->channels = wsaud->audio_channels; st->codec->sample_rate = wsaud->audio_samplerate; st->codec->bits_per_coded_sample = wsaud->audio_bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample / 4; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; wsaud->audio_stream_index = st->index; wsaud->audio_frame_counter = 0; return 0; } static int wsaud_read_packet(AVFormatContext *s, AVPacket *pkt) { WsAudDemuxContext *wsaud = s->priv_data; ByteIOContext *pb = s->pb; unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE]; unsigned int chunk_size; int ret = 0; if (get_buffer(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) != AUD_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); /* validate the chunk */ if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE) return AVERROR_INVALIDDATA; chunk_size = AV_RL16(&preamble[0]); ret= av_get_packet(pb, pkt, chunk_size); if (ret != chunk_size) return AVERROR(EIO); pkt->stream_index = wsaud->audio_stream_index; pkt->pts = wsaud->audio_frame_counter; pkt->pts /= wsaud->audio_samplerate; /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ wsaud->audio_frame_counter += (chunk_size * 2) / wsaud->audio_channels; return ret; } static int wsvqa_probe(AVProbeData *p) { /* need 12 bytes to qualify */ if (p->buf_size < 12) return 0; /* check for the VQA signatures */ if ((AV_RB32(&p->buf[0]) != FORM_TAG) || (AV_RB32(&p->buf[8]) != WVQA_TAG)) return 0; return AVPROBE_SCORE_MAX; } static int wsvqa_read_header(AVFormatContext *s, AVFormatParameters *ap) { WsVqaDemuxContext *wsvqa = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st; unsigned char *header; unsigned char scratch[VQA_PREAMBLE_SIZE]; unsigned int chunk_tag; unsigned int chunk_size; /* initialize the video decoder stream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, VQA_FRAMERATE); wsvqa->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_WS_VQA; st->codec->codec_tag = 0; /* no fourcc */ /* skip to the start of the VQA header */ url_fseek(pb, 20, SEEK_SET); /* the VQA header needs to go to the decoder */ st->codec->extradata_size = VQA_HEADER_SIZE; st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); header = (unsigned char *)st->codec->extradata; if (get_buffer(pb, st->codec->extradata, VQA_HEADER_SIZE) != VQA_HEADER_SIZE) { av_free(st->codec->extradata); return AVERROR(EIO); } st->codec->width = AV_RL16(&header[6]); st->codec->height = AV_RL16(&header[8]); /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */ if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) { st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, VQA_FRAMERATE); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (AV_RL16(&header[0]) == 1) st->codec->codec_id = CODEC_ID_WESTWOOD_SND1; else st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS; st->codec->codec_tag = 0; /* no tag */ st->codec->sample_rate = AV_RL16(&header[24]); if (!st->codec->sample_rate) st->codec->sample_rate = 22050; st->codec->channels = header[26]; if (!st->codec->channels) st->codec->channels = 1; st->codec->bits_per_coded_sample = 16; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample / 4; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; wsvqa->audio_stream_index = st->index; wsvqa->audio_samplerate = st->codec->sample_rate; wsvqa->audio_channels = st->codec->channels; wsvqa->audio_frame_counter = 0; } /* there are 0 or more chunks before the FINF chunk; iterate until * FINF has been skipped and the file will be ready to be demuxed */ do { if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) { av_free(st->codec->extradata); return AVERROR(EIO); } chunk_tag = AV_RB32(&scratch[0]); chunk_size = AV_RB32(&scratch[4]); /* catch any unknown header tags, for curiousity */ switch (chunk_tag) { case CINF_TAG: case CINH_TAG: case CIND_TAG: case PINF_TAG: case PINH_TAG: case PIND_TAG: case FINF_TAG: case CMDS_TAG: break; default: av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n", scratch[0], scratch[1], scratch[2], scratch[3]); break; } url_fseek(pb, chunk_size, SEEK_CUR); } while (chunk_tag != FINF_TAG); return 0; } static int wsvqa_read_packet(AVFormatContext *s, AVPacket *pkt) { WsVqaDemuxContext *wsvqa = s->priv_data; ByteIOContext *pb = s->pb; int ret = -1; unsigned char preamble[VQA_PREAMBLE_SIZE]; unsigned int chunk_type; unsigned int chunk_size; int skip_byte; while (get_buffer(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) { chunk_type = AV_RB32(&preamble[0]); chunk_size = AV_RB32(&preamble[4]); skip_byte = chunk_size & 0x01; if ((chunk_type == SND1_TAG) || (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) { if (av_new_packet(pkt, chunk_size)) return AVERROR(EIO); ret = get_buffer(pb, pkt->data, chunk_size); if (ret != chunk_size) { av_free_packet(pkt); return AVERROR(EIO); } if (chunk_type == SND2_TAG) { pkt->stream_index = wsvqa->audio_stream_index; /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ wsvqa->audio_frame_counter += (chunk_size * 2) / wsvqa->audio_channels; } else if(chunk_type == SND1_TAG) { pkt->stream_index = wsvqa->audio_stream_index; /* unpacked size is stored in header */ wsvqa->audio_frame_counter += AV_RL16(pkt->data) / wsvqa->audio_channels; } else { pkt->stream_index = wsvqa->video_stream_index; } /* stay on 16-bit alignment */ if (skip_byte) url_fseek(pb, 1, SEEK_CUR); return ret; } else { switch(chunk_type){ case CMDS_TAG: case SND0_TAG: break; default: av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type); } url_fseek(pb, chunk_size + skip_byte, SEEK_CUR); } } return ret; } #if CONFIG_WSAUD_DEMUXER AVInputFormat wsaud_demuxer = { "wsaud", NULL_IF_CONFIG_SMALL("Westwood Studios audio format"), sizeof(WsAudDemuxContext), wsaud_probe, wsaud_read_header, wsaud_read_packet, }; #endif #if CONFIG_WSVQA_DEMUXER AVInputFormat wsvqa_demuxer = { "wsvqa", NULL_IF_CONFIG_SMALL("Westwood Studios VQA format"), sizeof(WsVqaDemuxContext), wsvqa_probe, wsvqa_read_header, wsvqa_read_packet, }; #endif
123linslouis-android-video-cutter
jni/libavformat/westwood.c
C
asf20
13,060
/* * General DV muxer/demuxer * Copyright (c) 2003 Roman Shaposhnik * * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth * of DV technical info. * * Raw DV format * Copyright (c) 2002 Fabrice Bellard * * 50 Mbps (DVCPRO50) support * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <time.h> #include <stdarg.h> #include "avformat.h" #include "internal.h" #include "libavcodec/dvdata.h" #include "dv.h" #include "libavutil/fifo.h" struct DVMuxContext { const DVprofile* sys; /* current DV profile, e.g.: 525/60, 625/50 */ int n_ast; /* number of stereo audio streams (up to 2) */ AVStream *ast[2]; /* stereo audio streams */ AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */ int frames; /* current frame number */ time_t start_time; /* recording start time */ int has_audio; /* frame under contruction has audio */ int has_video; /* frame under contruction has video */ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */ }; static const int dv_aaux_packs_dist[12][9] = { { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, }; static int dv_audio_frame_size(const DVprofile* sys, int frame) { return sys->audio_samples_dist[frame % (sizeof(sys->audio_samples_dist) / sizeof(sys->audio_samples_dist[0]))]; } static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf, ...) { struct tm tc; time_t ct; int ltc_frame; va_list ap; buf[0] = (uint8_t)pack_id; switch (pack_id) { case dv_timecode: ct = (time_t)av_rescale_rnd(c->frames, c->sys->time_base.num, c->sys->time_base.den, AV_ROUND_DOWN); brktimegm(ct, &tc); /* * LTC drop-frame frame counter drops two frames (0 and 1) every * minute, unless it is exactly divisible by 10 */ ltc_frame = (c->frames + 2 * ct / 60 - 2 * ct / 600) % c->sys->ltc_divisor; buf[1] = (0 << 7) | /* color frame: 0 - unsync; 1 - sync mode */ (1 << 6) | /* drop frame timecode: 0 - nondrop; 1 - drop */ ((ltc_frame / 10) << 4) | /* tens of frames */ (ltc_frame % 10); /* units of frames */ buf[2] = (1 << 7) | /* biphase mark polarity correction: 0 - even; 1 - odd */ ((tc.tm_sec / 10) << 4) | /* tens of seconds */ (tc.tm_sec % 10); /* units of seconds */ buf[3] = (1 << 7) | /* binary group flag BGF0 */ ((tc.tm_min / 10) << 4) | /* tens of minutes */ (tc.tm_min % 10); /* units of minutes */ buf[4] = (1 << 7) | /* binary group flag BGF2 */ (1 << 6) | /* binary group flag BGF1 */ ((tc.tm_hour / 10) << 4) | /* tens of hours */ (tc.tm_hour % 10); /* units of hours */ break; case dv_audio_source: /* AAUX source pack */ va_start(ap, buf); buf[1] = (1 << 7) | /* locked mode -- SMPTE only supports locked mode */ (1 << 6) | /* reserved -- always 1 */ (dv_audio_frame_size(c->sys, c->frames) - c->sys->audio_min_samples[0]); /* # of samples */ buf[2] = (0 << 7) | /* multi-stereo */ (0 << 5) | /* #of audio channels per block: 0 -- 1 channel */ (0 << 4) | /* pair bit: 0 -- one pair of channels */ !!va_arg(ap, int); /* audio mode */ buf[3] = (1 << 7) | /* res */ (1 << 6) | /* multi-language flag */ (c->sys->dsf << 5) | /* system: 60fields/50fields */ (c->sys->n_difchan & 2); /* definition: 0 -- 25Mbps, 2 -- 50Mbps */ buf[4] = (1 << 7) | /* emphasis: 1 -- off */ (0 << 6) | /* emphasis time constant: 0 -- reserved */ (0 << 3) | /* frequency: 0 -- 48kHz, 1 -- 44,1kHz, 2 -- 32kHz */ 0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */ va_end(ap); break; case dv_audio_control: buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */ (1 << 4) | /* input source: 1 -- digital input */ (3 << 2) | /* compression: 3 -- no information */ 0; /* misc. info/SMPTE emphasis off */ buf[2] = (1 << 7) | /* recording start point: 1 -- no */ (1 << 6) | /* recording end point: 1 -- no */ (1 << 3) | /* recording mode: 1 -- original */ 7; buf[3] = (1 << 7) | /* direction: 1 -- forward */ (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0x20 : /* speed */ c->sys->ltc_divisor * 4); buf[4] = (1 << 7) | /* reserved -- always 1 */ 0x7f; /* genre category */ break; case dv_audio_recdate: case dv_video_recdate: /* VAUX recording date */ ct = c->start_time + av_rescale_rnd(c->frames, c->sys->time_base.num, c->sys->time_base.den, AV_ROUND_DOWN); brktimegm(ct, &tc); buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */ /* 0xff is very likely to be "unknown" */ buf[2] = (3 << 6) | /* reserved -- always 1 */ ((tc.tm_mday / 10) << 4) | /* Tens of day */ (tc.tm_mday % 10); /* Units of day */ buf[3] = /* we set high 4 bits to 0, shouldn't we set them to week? */ ((tc.tm_mon / 10) << 4) | /* Tens of month */ (tc.tm_mon % 10); /* Units of month */ buf[4] = (((tc.tm_year % 100) / 10) << 4) | /* Tens of year */ (tc.tm_year % 10); /* Units of year */ break; case dv_audio_rectime: /* AAUX recording time */ case dv_video_rectime: /* VAUX recording time */ ct = c->start_time + av_rescale_rnd(c->frames, c->sys->time_base.num, c->sys->time_base.den, AV_ROUND_DOWN); brktimegm(ct, &tc); buf[1] = (3 << 6) | /* reserved -- always 1 */ 0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */ buf[2] = (1 << 7) | /* reserved -- always 1 */ ((tc.tm_sec / 10) << 4) | /* Tens of seconds */ (tc.tm_sec % 10); /* Units of seconds */ buf[3] = (1 << 7) | /* reserved -- always 1 */ ((tc.tm_min / 10) << 4) | /* Tens of minutes */ (tc.tm_min % 10); /* Units of minutes */ buf[4] = (3 << 6) | /* reserved -- always 1 */ ((tc.tm_hour / 10) << 4) | /* Tens of hours */ (tc.tm_hour % 10); /* Units of hours */ break; default: buf[1] = buf[2] = buf[3] = buf[4] = 0xff; } return 5; } static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr) { int i, j, d, of, size; size = 4 * dv_audio_frame_size(c->sys, c->frames); frame_ptr += channel * c->sys->difseg_size * 150 * 80; for (i = 0; i < c->sys->difseg_size; i++) { frame_ptr += 6 * 80; /* skip DIF segment header */ for (j = 0; j < 9; j++) { dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3], i >= c->sys->difseg_size/2); for (d = 8; d < 80; d+=2) { of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride; if (of*2 >= size) continue; frame_ptr[d] = av_fifo_peek(c->audio_data[channel], of*2+1); // FIXME: maybe we have to admit frame_ptr[d+1] = av_fifo_peek(c->audio_data[channel], of*2); // that DV is a big-endian PCM } frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } } } static void dv_inject_metadata(DVMuxContext *c, uint8_t* frame) { int j, k; uint8_t* buf; for (buf = frame; buf < frame + c->sys->frame_size; buf += 150 * 80) { /* DV subcode: 2nd and 3d DIFs */ for (j = 80; j < 80 * 3; j += 80) { for (k = 6; k < 6 * 8; k += 8) dv_write_pack(dv_timecode, c, &buf[j+k]); if (((long)(buf-frame)/(c->sys->frame_size/(c->sys->difseg_size*c->sys->n_difchan))%c->sys->difseg_size) > 5) { /* FIXME: is this really needed ? */ dv_write_pack(dv_video_recdate, c, &buf[j+14]); dv_write_pack(dv_video_rectime, c, &buf[j+22]); dv_write_pack(dv_video_recdate, c, &buf[j+38]); dv_write_pack(dv_video_rectime, c, &buf[j+46]); } } /* DV VAUX: 4th, 5th and 6th 3DIFs */ for (j = 80*3 + 3; j < 80*6; j += 80) { dv_write_pack(dv_video_recdate, c, &buf[j+5*2]); dv_write_pack(dv_video_rectime, c, &buf[j+5*3]); dv_write_pack(dv_video_recdate, c, &buf[j+5*11]); dv_write_pack(dv_video_rectime, c, &buf[j+5*12]); } } } /* * The following 3 functions constitute our interface to the world */ int dv_assemble_frame(DVMuxContext *c, AVStream* st, uint8_t* data, int data_size, uint8_t** frame) { int i, reqasize; *frame = &c->frame_buf[0]; reqasize = 4 * dv_audio_frame_size(c->sys, c->frames); switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: /* FIXME: we have to have more sensible approach than this one */ if (c->has_video) av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames); memcpy(*frame, data, c->sys->frame_size); c->has_video = 1; break; case AVMEDIA_TYPE_AUDIO: for (i = 0; i < c->n_ast && st != c->ast[i]; i++); /* FIXME: we have to have more sensible approach than this one */ if (av_fifo_size(c->audio_data[i]) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames); av_fifo_generic_write(c->audio_data[i], data, data_size, NULL); /* Let us see if we've got enough audio for one DV frame. */ c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i); break; default: break; } /* Let us see if we have enough data to construct one DV frame. */ if (c->has_video == 1 && c->has_audio + 1 == 1 << c->n_ast) { dv_inject_metadata(c, *frame); c->has_audio = 0; for (i=0; i < c->n_ast; i++) { dv_inject_audio(c, i, *frame); av_fifo_drain(c->audio_data[i], reqasize); c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i); } c->has_video = 0; c->frames++; return c->sys->frame_size; } return 0; } DVMuxContext* dv_init_mux(AVFormatContext* s) { DVMuxContext *c = s->priv_data; AVStream *vst = NULL; int i; /* we support at most 1 video and 2 audio streams */ if (s->nb_streams > 3) return NULL; c->n_ast = 0; c->ast[0] = c->ast[1] = NULL; /* We have to sort out where audio and where video stream is */ for (i=0; i<s->nb_streams; i++) { switch (s->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: if (vst) return NULL; vst = s->streams[i]; break; case AVMEDIA_TYPE_AUDIO: if (c->n_ast > 1) return NULL; c->ast[c->n_ast++] = s->streams[i]; break; default: goto bail_out; } } /* Some checks -- DV format is very picky about its incoming streams */ if (!vst || vst->codec->codec_id != CODEC_ID_DVVIDEO) goto bail_out; for (i=0; i<c->n_ast; i++) { if (c->ast[i] && (c->ast[i]->codec->codec_id != CODEC_ID_PCM_S16LE || c->ast[i]->codec->sample_rate != 48000 || c->ast[i]->codec->channels != 2)) goto bail_out; } c->sys = ff_dv_codec_profile(vst->codec); if (!c->sys) goto bail_out; if ((c->n_ast > 1) && (c->sys->n_difchan < 2)) { /* only 1 stereo pair is allowed in 25Mbps mode */ goto bail_out; } /* Ok, everything seems to be in working order */ c->frames = 0; c->has_audio = 0; c->has_video = 0; c->start_time = (time_t)s->timestamp; for (i=0; i < c->n_ast; i++) { if (c->ast[i] && !(c->audio_data[i]=av_fifo_alloc(100*AVCODEC_MAX_AUDIO_FRAME_SIZE))) { while (i > 0) { i--; av_fifo_free(c->audio_data[i]); } goto bail_out; } } return c; bail_out: return NULL; } void dv_delete_mux(DVMuxContext *c) { int i; for (i=0; i < c->n_ast; i++) av_fifo_free(c->audio_data[i]); } #if CONFIG_DV_MUXER static int dv_write_header(AVFormatContext *s) { if (!dv_init_mux(s)) { av_log(s, AV_LOG_ERROR, "Can't initialize DV format!\n" "Make sure that you supply exactly two streams:\n" " video: 25fps or 29.97fps, audio: 2ch/48kHz/PCM\n" " (50Mbps allows an optional second audio stream)\n"); return -1; } return 0; } static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt) { uint8_t* frame; int fsize; fsize = dv_assemble_frame(s->priv_data, s->streams[pkt->stream_index], pkt->data, pkt->size, &frame); if (fsize > 0) { put_buffer(s->pb, frame, fsize); put_flush_packet(s->pb); } return 0; } /* * We might end up with some extra A/V data without matching counterpart. * E.g. video data without enough audio to write the complete frame. * Currently we simply drop the last frame. I don't know whether this * is the best strategy of all */ static int dv_write_trailer(struct AVFormatContext *s) { dv_delete_mux(s->priv_data); return 0; } AVOutputFormat dv_muxer = { "dv", NULL_IF_CONFIG_SMALL("DV video format"), NULL, "dv", sizeof(DVMuxContext), CODEC_ID_PCM_S16LE, CODEC_ID_DVVIDEO, dv_write_header, dv_write_packet, dv_write_trailer, }; #endif /* CONFIG_DV_MUXER */
123linslouis-android-video-cutter
jni/libavformat/dvenc.c
C
asf20
16,302
/* * MOV demuxer * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <limits.h> //#define DEBUG //#define DEBUG_METADATA //#define MOV_EXPORT_ALL_METADATA #include "libavutil/intreadwrite.h" #include "libavutil/avstring.h" #include "avformat.h" #include "riff.h" #include "isom.h" #include "libavcodec/mpeg4audio.h" #include "libavcodec/mpegaudiodata.h" #include "libavcodec/get_bits.h" #if CONFIG_ZLIB #include <zlib.h> #endif /* * First version by Francois Revol revol@free.fr * Seek function by Gael Chardon gael.dev@4now.net * * Features and limitations: * - reads most of the QT files I have (at least the structure), * Sample QuickTime files with mp3 audio can be found at: http://www.3ivx.com/showcase.html * - the code is quite ugly... maybe I won't do it recursive next time :-) * * Funny I didn't know about http://sourceforge.net/projects/qt-ffmpeg/ * when coding this :) (it's a writer anyway) * * Reference documents: * http://www.geocities.com/xhelmboyx/quicktime/formats/qtm-layout.txt * Apple: * http://developer.apple.com/documentation/QuickTime/QTFF/ * http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf * QuickTime is a trademark of Apple (AFAIK :)) */ #include "qtpalette.h" #undef NDEBUG #include <assert.h> /* XXX: it's the first time I make a recursive parser I think... sorry if it's ugly :P */ /* those functions parse an atom */ /* return code: 0: continue to parse next atom <0: error occurred, exit */ /* links atom IDs to parse functions */ typedef struct MOVParseTableEntry { uint32_t type; int (*parse)(MOVContext *ctx, ByteIOContext *pb, MOVAtom atom); } MOVParseTableEntry; static const MOVParseTableEntry mov_default_parse_table[]; static int mov_metadata_trkn(MOVContext *c, ByteIOContext *pb, unsigned len) { char buf[16]; get_be16(pb); // unknown snprintf(buf, sizeof(buf), "%d", get_be16(pb)); av_metadata_set2(&c->fc->metadata, "track", buf, 0); get_be16(pb); // total tracks return 0; } static const uint32_t mac_to_unicode[128] = { 0x00C4,0x00C5,0x00C7,0x00C9,0x00D1,0x00D6,0x00DC,0x00E1, 0x00E0,0x00E2,0x00E4,0x00E3,0x00E5,0x00E7,0x00E9,0x00E8, 0x00EA,0x00EB,0x00ED,0x00EC,0x00EE,0x00EF,0x00F1,0x00F3, 0x00F2,0x00F4,0x00F6,0x00F5,0x00FA,0x00F9,0x00FB,0x00FC, 0x2020,0x00B0,0x00A2,0x00A3,0x00A7,0x2022,0x00B6,0x00DF, 0x00AE,0x00A9,0x2122,0x00B4,0x00A8,0x2260,0x00C6,0x00D8, 0x221E,0x00B1,0x2264,0x2265,0x00A5,0x00B5,0x2202,0x2211, 0x220F,0x03C0,0x222B,0x00AA,0x00BA,0x03A9,0x00E6,0x00F8, 0x00BF,0x00A1,0x00AC,0x221A,0x0192,0x2248,0x2206,0x00AB, 0x00BB,0x2026,0x00A0,0x00C0,0x00C3,0x00D5,0x0152,0x0153, 0x2013,0x2014,0x201C,0x201D,0x2018,0x2019,0x00F7,0x25CA, 0x00FF,0x0178,0x2044,0x20AC,0x2039,0x203A,0xFB01,0xFB02, 0x2021,0x00B7,0x201A,0x201E,0x2030,0x00C2,0x00CA,0x00C1, 0x00CB,0x00C8,0x00CD,0x00CE,0x00CF,0x00CC,0x00D3,0x00D4, 0xF8FF,0x00D2,0x00DA,0x00DB,0x00D9,0x0131,0x02C6,0x02DC, 0x00AF,0x02D8,0x02D9,0x02DA,0x00B8,0x02DD,0x02DB,0x02C7, }; static int mov_read_mac_string(MOVContext *c, ByteIOContext *pb, int len, char *dst, int dstlen) { char *p = dst; char *end = dst+dstlen-1; int i; for (i = 0; i < len; i++) { uint8_t t, c = get_byte(pb); if (c < 0x80 && p < end) *p++ = c; else PUT_UTF8(mac_to_unicode[c-0x80], t, if (p < end) *p++ = t;); } *p = 0; return p - dst; } static int mov_read_udta_string(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { #ifdef MOV_EXPORT_ALL_METADATA char tmp_key[5]; #endif char str[1024], key2[16], language[4] = {0}; const char *key = NULL; uint16_t str_size, langcode = 0; uint32_t data_type = 0; int (*parse)(MOVContext*, ByteIOContext*, unsigned) = NULL; switch (atom.type) { case MKTAG(0xa9,'n','a','m'): key = "title"; break; case MKTAG(0xa9,'a','u','t'): case MKTAG(0xa9,'A','R','T'): key = "artist"; break; case MKTAG(0xa9,'w','r','t'): key = "composer"; break; case MKTAG( 'c','p','r','t'): case MKTAG(0xa9,'c','p','y'): key = "copyright"; break; case MKTAG(0xa9,'c','m','t'): case MKTAG(0xa9,'i','n','f'): key = "comment"; break; case MKTAG(0xa9,'a','l','b'): key = "album"; break; case MKTAG(0xa9,'d','a','y'): key = "date"; break; case MKTAG(0xa9,'g','e','n'): key = "genre"; break; case MKTAG(0xa9,'t','o','o'): case MKTAG(0xa9,'e','n','c'): key = "encoder"; break; case MKTAG( 'd','e','s','c'): key = "description";break; case MKTAG( 'l','d','e','s'): key = "synopsis"; break; case MKTAG( 't','v','s','h'): key = "show"; break; case MKTAG( 't','v','e','n'): key = "episode_id";break; case MKTAG( 't','v','n','n'): key = "network"; break; case MKTAG( 't','r','k','n'): key = "track"; parse = mov_metadata_trkn; break; } if (c->itunes_metadata && atom.size > 8) { int data_size = get_be32(pb); int tag = get_le32(pb); if (tag == MKTAG('d','a','t','a')) { data_type = get_be32(pb); // type get_be32(pb); // unknown str_size = data_size - 16; atom.size -= 16; } else return 0; } else if (atom.size > 4 && key && !c->itunes_metadata) { str_size = get_be16(pb); // string length langcode = get_be16(pb); ff_mov_lang_to_iso639(langcode, language); atom.size -= 4; } else str_size = atom.size; #ifdef MOV_EXPORT_ALL_METADATA if (!key) { snprintf(tmp_key, 5, "%.4s", (char*)&atom.type); key = tmp_key; } #endif if (!key) return 0; if (atom.size < 0) return -1; str_size = FFMIN3(sizeof(str)-1, str_size, atom.size); if (parse) parse(c, pb, str_size); else { if (data_type == 3 || (data_type == 0 && langcode < 0x800)) { // MAC Encoded mov_read_mac_string(c, pb, str_size, str, sizeof(str)); } else { get_buffer(pb, str, str_size); str[str_size] = 0; } av_metadata_set2(&c->fc->metadata, key, str, 0); if (*language && strcmp(language, "und")) { snprintf(key2, sizeof(key2), "%s-%s", key, language); av_metadata_set2(&c->fc->metadata, key2, str, 0); } } #ifdef DEBUG_METADATA av_log(c->fc, AV_LOG_DEBUG, "lang \"%3s\" ", language); av_log(c->fc, AV_LOG_DEBUG, "tag \"%s\" value \"%s\" atom \"%.4s\" %d %lld\n", key, str, (char*)&atom.type, str_size, atom.size); #endif return 0; } static int mov_read_chpl(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { int64_t start; int i, nb_chapters, str_len; char str[256+1]; if ((atom.size -= 5) < 0) return 0; get_be32(pb); // version + flags get_be32(pb); // ??? nb_chapters = get_byte(pb); for (i = 0; i < nb_chapters; i++) { if (atom.size < 9) return 0; start = get_be64(pb); str_len = get_byte(pb); if ((atom.size -= 9+str_len) < 0) return 0; get_buffer(pb, str, str_len); str[str_len] = 0; ff_new_chapter(c->fc, i, (AVRational){1,10000000}, start, AV_NOPTS_VALUE, str); } return 0; } static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { int64_t total_size = 0; MOVAtom a; int i; if (atom.size < 0) atom.size = INT64_MAX; while (total_size + 8 < atom.size && !url_feof(pb)) { int (*parse)(MOVContext*, ByteIOContext*, MOVAtom) = NULL; a.size = atom.size; a.type=0; if(atom.size >= 8) { a.size = get_be32(pb); a.type = get_le32(pb); } total_size += 8; dprintf(c->fc, "type: %08x %.4s sz: %"PRIx64" %"PRIx64" %"PRIx64"\n", a.type, (char*)&a.type, a.size, atom.size, total_size); if (a.size == 1) { /* 64 bit extended size */ a.size = get_be64(pb) - 8; total_size += 8; } if (a.size == 0) { a.size = atom.size - total_size; if (a.size <= 8) break; } a.size -= 8; if(a.size < 0) break; a.size = FFMIN(a.size, atom.size - total_size); for (i = 0; mov_default_parse_table[i].type; i++) if (mov_default_parse_table[i].type == a.type) { parse = mov_default_parse_table[i].parse; break; } // container is user data if (!parse && (atom.type == MKTAG('u','d','t','a') || atom.type == MKTAG('i','l','s','t'))) parse = mov_read_udta_string; if (!parse) { /* skip leaf atoms data */ url_fskip(pb, a.size); } else { int64_t start_pos = url_ftell(pb); int64_t left; int err = parse(c, pb, a); if (err < 0) return err; if (c->found_moov && c->found_mdat && (url_is_streamed(pb) || start_pos + a.size == url_fsize(pb))) return 0; left = a.size - url_ftell(pb) + start_pos; if (left > 0) /* skip garbage at atom end */ url_fskip(pb, left); } total_size += a.size; } if (total_size < atom.size && atom.size < 0x7ffff) url_fskip(pb, atom.size - total_size); return 0; } static int mov_read_dref(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int entries, i, j; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_be32(pb); // version + flags entries = get_be32(pb); if (entries >= UINT_MAX / sizeof(*sc->drefs)) return -1; sc->drefs = av_mallocz(entries * sizeof(*sc->drefs)); if (!sc->drefs) return AVERROR(ENOMEM); sc->drefs_count = entries; for (i = 0; i < sc->drefs_count; i++) { MOVDref *dref = &sc->drefs[i]; uint32_t size = get_be32(pb); int64_t next = url_ftell(pb) + size - 4; dref->type = get_le32(pb); get_be32(pb); // version + flags dprintf(c->fc, "type %.4s size %d\n", (char*)&dref->type, size); if (dref->type == MKTAG('a','l','i','s') && size > 150) { /* macintosh alias record */ uint16_t volume_len, len; int16_t type; url_fskip(pb, 10); volume_len = get_byte(pb); volume_len = FFMIN(volume_len, 27); get_buffer(pb, dref->volume, 27); dref->volume[volume_len] = 0; av_log(c->fc, AV_LOG_DEBUG, "volume %s, len %d\n", dref->volume, volume_len); url_fskip(pb, 12); len = get_byte(pb); len = FFMIN(len, 63); get_buffer(pb, dref->filename, 63); dref->filename[len] = 0; av_log(c->fc, AV_LOG_DEBUG, "filename %s, len %d\n", dref->filename, len); url_fskip(pb, 16); /* read next level up_from_alias/down_to_target */ dref->nlvl_from = get_be16(pb); dref->nlvl_to = get_be16(pb); av_log(c->fc, AV_LOG_DEBUG, "nlvl from %d, nlvl to %d\n", dref->nlvl_from, dref->nlvl_to); url_fskip(pb, 16); for (type = 0; type != -1 && url_ftell(pb) < next; ) { type = get_be16(pb); len = get_be16(pb); av_log(c->fc, AV_LOG_DEBUG, "type %d, len %d\n", type, len); if (len&1) len += 1; if (type == 2) { // absolute path av_free(dref->path); dref->path = av_mallocz(len+1); if (!dref->path) return AVERROR(ENOMEM); get_buffer(pb, dref->path, len); if (len > volume_len && !strncmp(dref->path, dref->volume, volume_len)) { len -= volume_len; memmove(dref->path, dref->path+volume_len, len); dref->path[len] = 0; } for (j = 0; j < len; j++) if (dref->path[j] == ':') dref->path[j] = '/'; av_log(c->fc, AV_LOG_DEBUG, "path %s\n", dref->path); } else if (type == 0) { // directory name av_free(dref->dir); dref->dir = av_malloc(len+1); if (!dref->dir) return AVERROR(ENOMEM); get_buffer(pb, dref->dir, len); dref->dir[len] = 0; for (j = 0; j < len; j++) if (dref->dir[j] == ':') dref->dir[j] = '/'; av_log(c->fc, AV_LOG_DEBUG, "dir %s\n", dref->dir); } else url_fskip(pb, len); } } url_fseek(pb, next, SEEK_SET); } return 0; } static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; uint32_t type; uint32_t ctype; if (c->fc->nb_streams < 1) // meta before first trak return 0; st = c->fc->streams[c->fc->nb_streams-1]; get_byte(pb); /* version */ get_be24(pb); /* flags */ /* component type */ ctype = get_le32(pb); type = get_le32(pb); /* component subtype */ dprintf(c->fc, "ctype= %.4s (0x%08x)\n", (char*)&ctype, ctype); dprintf(c->fc, "stype= %.4s\n", (char*)&type); if (type == MKTAG('v','i','d','e')) st->codec->codec_type = AVMEDIA_TYPE_VIDEO; else if(type == MKTAG('s','o','u','n')) st->codec->codec_type = AVMEDIA_TYPE_AUDIO; else if(type == MKTAG('m','1','a',' ')) st->codec->codec_id = CODEC_ID_MP2; else if(type == MKTAG('s','u','b','p')) st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; get_be32(pb); /* component manufacture */ get_be32(pb); /* component flags */ get_be32(pb); /* component flags mask */ return 0; } int ff_mp4_read_descr_len(ByteIOContext *pb) { int len = 0; int count = 4; while (count--) { int c = get_byte(pb); len = (len << 7) | (c & 0x7f); if (!(c & 0x80)) break; } return len; } static int mp4_read_descr(AVFormatContext *fc, ByteIOContext *pb, int *tag) { int len; *tag = get_byte(pb); len = ff_mp4_read_descr_len(pb); dprintf(fc, "MPEG4 description: tag=0x%02x len=%d\n", *tag, len); return len; } #define MP4ESDescrTag 0x03 #define MP4DecConfigDescrTag 0x04 #define MP4DecSpecificDescrTag 0x05 static const AVCodecTag mp4_audio_types[] = { { CODEC_ID_MP3ON4, AOT_PS }, /* old mp3on4 draft */ { CODEC_ID_MP3ON4, AOT_L1 }, /* layer 1 */ { CODEC_ID_MP3ON4, AOT_L2 }, /* layer 2 */ { CODEC_ID_MP3ON4, AOT_L3 }, /* layer 3 */ { CODEC_ID_MP4ALS, AOT_ALS }, /* MPEG-4 ALS */ { CODEC_ID_NONE, AOT_NULL }, }; int ff_mov_read_esds(AVFormatContext *fc, ByteIOContext *pb, MOVAtom atom) { AVStream *st; int tag, len; if (fc->nb_streams < 1) return 0; st = fc->streams[fc->nb_streams-1]; get_be32(pb); /* version + flags */ len = mp4_read_descr(fc, pb, &tag); if (tag == MP4ESDescrTag) { get_be16(pb); /* ID */ get_byte(pb); /* priority */ } else get_be16(pb); /* ID */ len = mp4_read_descr(fc, pb, &tag); if (tag == MP4DecConfigDescrTag) { int object_type_id = get_byte(pb); get_byte(pb); /* stream type */ get_be24(pb); /* buffer size db */ get_be32(pb); /* max bitrate */ get_be32(pb); /* avg bitrate */ st->codec->codec_id= ff_codec_get_id(ff_mp4_obj_type, object_type_id); dprintf(fc, "esds object type id 0x%02x\n", object_type_id); len = mp4_read_descr(fc, pb, &tag); if (tag == MP4DecSpecificDescrTag) { dprintf(fc, "Specific MPEG4 header len=%d\n", len); if((uint64_t)len > (1<<30)) return -1; st->codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); get_buffer(pb, st->codec->extradata, len); st->codec->extradata_size = len; if (st->codec->codec_id == CODEC_ID_AAC) { MPEG4AudioConfig cfg; ff_mpeg4audio_get_config(&cfg, st->codec->extradata, st->codec->extradata_size); st->codec->channels = cfg.channels; if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4 st->codec->sample_rate = ff_mpa_freq_tab[cfg.sampling_index]; else st->codec->sample_rate = cfg.sample_rate; // ext sample rate ? dprintf(fc, "mp4a config channels %d obj %d ext obj %d " "sample rate %d ext sample rate %d\n", st->codec->channels, cfg.object_type, cfg.ext_object_type, cfg.sample_rate, cfg.ext_sample_rate); if (!(st->codec->codec_id = ff_codec_get_id(mp4_audio_types, cfg.object_type))) st->codec->codec_id = CODEC_ID_AAC; } } } return 0; } static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { return ff_mov_read_esds(c->fc, pb, atom); } static int mov_read_pasp(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { const int num = get_be32(pb); const int den = get_be32(pb); AVStream *st; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if (den != 0) { if ((st->sample_aspect_ratio.den != 1 || st->sample_aspect_ratio.num) && // default (den != st->sample_aspect_ratio.den || num != st->sample_aspect_ratio.num)) av_log(c->fc, AV_LOG_WARNING, "sample aspect ratio already set to %d:%d, overriding by 'pasp' atom\n", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den); st->sample_aspect_ratio.num = num; st->sample_aspect_ratio.den = den; } return 0; } /* this atom contains actual media data */ static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { if(atom.size == 0) /* wrong one (MP4) */ return 0; c->found_mdat=1; return 0; /* now go for moov */ } /* read major brand, minor version and compatible brands and store them as metadata */ static int mov_read_ftyp(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { uint32_t minor_ver; int comp_brand_size; char minor_ver_str[11]; /* 32 bit integer -> 10 digits + null */ char* comp_brands_str; uint8_t type[5] = {0}; get_buffer(pb, type, 4); if (strcmp(type, "qt ")) c->isom = 1; av_log(c->fc, AV_LOG_DEBUG, "ISO: File Type Major Brand: %.4s\n",(char *)&type); av_metadata_set2(&c->fc->metadata, "major_brand", type, 0); minor_ver = get_be32(pb); /* minor version */ snprintf(minor_ver_str, sizeof(minor_ver_str), "%d", minor_ver); av_metadata_set2(&c->fc->metadata, "minor_version", minor_ver_str, 0); comp_brand_size = atom.size - 8; if (comp_brand_size < 0) return -1; comp_brands_str = av_malloc(comp_brand_size + 1); /* Add null terminator */ if (!comp_brands_str) return AVERROR(ENOMEM); get_buffer(pb, comp_brands_str, comp_brand_size); comp_brands_str[comp_brand_size] = 0; av_metadata_set2(&c->fc->metadata, "compatible_brands", comp_brands_str, 0); av_freep(&comp_brands_str); return 0; } /* this atom should contain all header atoms */ static int mov_read_moov(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { if (mov_read_default(c, pb, atom) < 0) return -1; /* we parsed the 'moov' atom, we can terminate the parsing as soon as we find the 'mdat' */ /* so we don't parse the whole file if over a network */ c->found_moov=1; return 0; /* now go for mdat */ } static int mov_read_moof(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { c->fragment.moof_offset = url_ftell(pb) - 8; dprintf(c->fc, "moof offset %llx\n", c->fragment.moof_offset); return mov_read_default(c, pb, atom); } static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int version; char language[4] = {0}; unsigned lang; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; version = get_byte(pb); if (version > 1) return -1; /* unsupported */ get_be24(pb); /* flags */ if (version == 1) { get_be64(pb); get_be64(pb); } else { get_be32(pb); /* creation time */ get_be32(pb); /* modification time */ } sc->time_scale = get_be32(pb); st->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */ lang = get_be16(pb); /* language */ if (ff_mov_lang_to_iso639(lang, language)) av_metadata_set2(&st->metadata, "language", language, 0); get_be16(pb); /* quality */ return 0; } static int mov_read_mvhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { int version = get_byte(pb); /* version */ get_be24(pb); /* flags */ if (version == 1) { get_be64(pb); get_be64(pb); } else { get_be32(pb); /* creation time */ get_be32(pb); /* modification time */ } c->time_scale = get_be32(pb); /* time scale */ dprintf(c->fc, "time scale = %i\n", c->time_scale); c->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */ get_be32(pb); /* preferred scale */ get_be16(pb); /* preferred volume */ url_fskip(pb, 10); /* reserved */ url_fskip(pb, 36); /* display matrix */ get_be32(pb); /* preview time */ get_be32(pb); /* preview duration */ get_be32(pb); /* poster time */ get_be32(pb); /* selection time */ get_be32(pb); /* selection duration */ get_be32(pb); /* current time */ get_be32(pb); /* next track ID */ return 0; } static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if((uint64_t)atom.size > (1<<30)) return -1; // currently SVQ3 decoder expect full STSD header - so let's fake it // this should be fixed and just SMI header should be passed av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size + 0x5a + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = 0x5a + atom.size; memcpy(st->codec->extradata, "SVQ3", 4); // fake get_buffer(pb, st->codec->extradata + 0x5a, atom.size); dprintf(c->fc, "Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a); return 0; } static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; int little_endian; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; little_endian = get_be16(pb); dprintf(c->fc, "enda %d\n", little_endian); if (little_endian == 1) { switch (st->codec->codec_id) { case CODEC_ID_PCM_S24BE: st->codec->codec_id = CODEC_ID_PCM_S24LE; break; case CODEC_ID_PCM_S32BE: st->codec->codec_id = CODEC_ID_PCM_S32LE; break; case CODEC_ID_PCM_F32BE: st->codec->codec_id = CODEC_ID_PCM_F32LE; break; case CODEC_ID_PCM_F64BE: st->codec->codec_id = CODEC_ID_PCM_F64LE; break; default: break; } } return 0; } /* FIXME modify qdm2/svq3/h264 decoders to take full atom as extradata */ static int mov_read_extradata(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; uint64_t size; uint8_t *buf; if (c->fc->nb_streams < 1) // will happen with jp2 files return 0; st= c->fc->streams[c->fc->nb_streams-1]; size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE; if(size > INT_MAX || (uint64_t)atom.size > INT_MAX) return -1; buf= av_realloc(st->codec->extradata, size); if(!buf) return -1; st->codec->extradata= buf; buf+= st->codec->extradata_size; st->codec->extradata_size= size - FF_INPUT_BUFFER_PADDING_SIZE; AV_WB32( buf , atom.size + 8); AV_WL32( buf + 4, atom.type); get_buffer(pb, buf + 8, atom.size); return 0; } static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if((uint64_t)atom.size > (1<<30)) return -1; if (st->codec->codec_id == CODEC_ID_QDM2) { // pass all frma atom to codec, needed at least for QDM2 av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size; get_buffer(pb, st->codec->extradata, atom.size); } else if (atom.size > 8) { /* to read frma, esds atoms */ if (mov_read_default(c, pb, atom) < 0) return -1; } else url_fskip(pb, atom.size); return 0; } /** * This function reads atom content and puts data in extradata without tag * nor size unlike mov_read_extradata. */ static int mov_read_glbl(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if((uint64_t)atom.size > (1<<30)) return -1; av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size; get_buffer(pb, st->codec->extradata, atom.size); return 0; } /** * An strf atom is a BITMAPINFOHEADER struct. This struct is 40 bytes itself, * but can have extradata appended at the end after the 40 bytes belonging * to the struct. */ static int mov_read_strf(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; if (atom.size <= 40) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if((uint64_t)atom.size > (1<<30)) return -1; av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size - 40 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size - 40; url_fskip(pb, 40); get_buffer(pb, st->codec->extradata, atom.size - 40); return 0; } static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); if(entries >= UINT_MAX/sizeof(int64_t)) return -1; sc->chunk_offsets = av_malloc(entries * sizeof(int64_t)); if (!sc->chunk_offsets) return AVERROR(ENOMEM); sc->chunk_count = entries; if (atom.type == MKTAG('s','t','c','o')) for(i=0; i<entries; i++) sc->chunk_offsets[i] = get_be32(pb); else if (atom.type == MKTAG('c','o','6','4')) for(i=0; i<entries; i++) sc->chunk_offsets[i] = get_be64(pb); else return -1; return 0; } /** * Compute codec id for 'lpcm' tag. * See CoreAudioTypes and AudioStreamBasicDescription at Apple. */ enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags) { if (flags & 1) { // floating point if (flags & 2) { // big endian if (bps == 32) return CODEC_ID_PCM_F32BE; else if (bps == 64) return CODEC_ID_PCM_F64BE; } else { if (bps == 32) return CODEC_ID_PCM_F32LE; else if (bps == 64) return CODEC_ID_PCM_F64LE; } } else { if (flags & 2) { if (bps == 8) // signed integer if (flags & 4) return CODEC_ID_PCM_S8; else return CODEC_ID_PCM_U8; else if (bps == 16) return CODEC_ID_PCM_S16BE; else if (bps == 24) return CODEC_ID_PCM_S24BE; else if (bps == 32) return CODEC_ID_PCM_S32BE; } else { if (bps == 8) if (flags & 4) return CODEC_ID_PCM_S8; else return CODEC_ID_PCM_U8; else if (bps == 16) return CODEC_ID_PCM_S16LE; else if (bps == 24) return CODEC_ID_PCM_S24LE; else if (bps == 32) return CODEC_ID_PCM_S32LE; } } return CODEC_ID_NONE; } static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int j, entries, pseudo_stream_id; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); for(pseudo_stream_id=0; pseudo_stream_id<entries; pseudo_stream_id++) { //Parsing Sample description table enum CodecID id; int dref_id = 1; MOVAtom a = { 0 }; int64_t start_pos = url_ftell(pb); int size = get_be32(pb); /* size */ uint32_t format = get_le32(pb); /* data format */ if (size >= 16) { get_be32(pb); /* reserved */ get_be16(pb); /* reserved */ dref_id = get_be16(pb); } if (st->codec->codec_tag && st->codec->codec_tag != format && (c->fc->video_codec_id ? ff_codec_get_id(codec_movvideo_tags, format) != c->fc->video_codec_id : st->codec->codec_tag != MKTAG('j','p','e','g')) ){ /* Multiple fourcc, we skip JPEG. This is not correct, we should * export it as a separate AVStream but this needs a few changes * in the MOV demuxer, patch welcome. */ av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n"); url_fskip(pb, size - (url_ftell(pb) - start_pos)); continue; } sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id; sc->dref_id= dref_id; st->codec->codec_tag = format; id = ff_codec_get_id(codec_movaudio_tags, format); if (id<=0 && ((format&0xFFFF) == 'm'+('s'<<8) || (format&0xFFFF) == 'T'+('S'<<8))) id = ff_codec_get_id(ff_codec_wav_tags, bswap_32(format)&0xFFFF); if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO && id > 0) { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } else if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO && /* do not overwrite codec type */ format && format != MKTAG('m','p','4','s')) { /* skip old asf mpeg4 tag */ id = ff_codec_get_id(codec_movvideo_tags, format); if (id <= 0) id = ff_codec_get_id(ff_codec_bmp_tags, format); if (id > 0) st->codec->codec_type = AVMEDIA_TYPE_VIDEO; else if(st->codec->codec_type == AVMEDIA_TYPE_DATA){ id = ff_codec_get_id(ff_codec_movsubtitle_tags, format); if(id > 0) st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; } } dprintf(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n", size, (format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff, (format >> 24) & 0xff, st->codec->codec_type); if(st->codec->codec_type==AVMEDIA_TYPE_VIDEO) { unsigned int color_depth, len; int color_greyscale; st->codec->codec_id = id; get_be16(pb); /* version */ get_be16(pb); /* revision level */ get_be32(pb); /* vendor */ get_be32(pb); /* temporal quality */ get_be32(pb); /* spatial quality */ st->codec->width = get_be16(pb); /* width */ st->codec->height = get_be16(pb); /* height */ get_be32(pb); /* horiz resolution */ get_be32(pb); /* vert resolution */ get_be32(pb); /* data size, always 0 */ get_be16(pb); /* frames per samples */ len = get_byte(pb); /* codec name, pascal string */ if (len > 31) len = 31; mov_read_mac_string(c, pb, len, st->codec->codec_name, 32); if (len < 31) url_fskip(pb, 31 - len); /* codec_tag YV12 triggers an UV swap in rawdec.c */ if (!memcmp(st->codec->codec_name, "Planar Y'CbCr 8-bit 4:2:0", 25)) st->codec->codec_tag=MKTAG('I', '4', '2', '0'); st->codec->bits_per_coded_sample = get_be16(pb); /* depth */ st->codec->color_table_id = get_be16(pb); /* colortable id */ dprintf(c->fc, "depth %d, ctab id %d\n", st->codec->bits_per_coded_sample, st->codec->color_table_id); /* figure out the palette situation */ color_depth = st->codec->bits_per_coded_sample & 0x1F; color_greyscale = st->codec->bits_per_coded_sample & 0x20; /* if the depth is 2, 4, or 8 bpp, file is palettized */ if ((color_depth == 2) || (color_depth == 4) || (color_depth == 8)) { /* for palette traversal */ unsigned int color_start, color_count, color_end; unsigned char r, g, b; st->codec->palctrl = av_malloc(sizeof(*st->codec->palctrl)); if (color_greyscale) { int color_index, color_dec; /* compute the greyscale palette */ st->codec->bits_per_coded_sample = color_depth; color_count = 1 << color_depth; color_index = 255; color_dec = 256 / (color_count - 1); for (j = 0; j < color_count; j++) { r = g = b = color_index; st->codec->palctrl->palette[j] = (r << 16) | (g << 8) | (b); color_index -= color_dec; if (color_index < 0) color_index = 0; } } else if (st->codec->color_table_id) { const uint8_t *color_table; /* if flag bit 3 is set, use the default palette */ color_count = 1 << color_depth; if (color_depth == 2) color_table = ff_qt_default_palette_4; else if (color_depth == 4) color_table = ff_qt_default_palette_16; else color_table = ff_qt_default_palette_256; for (j = 0; j < color_count; j++) { r = color_table[j * 3 + 0]; g = color_table[j * 3 + 1]; b = color_table[j * 3 + 2]; st->codec->palctrl->palette[j] = (r << 16) | (g << 8) | (b); } } else { /* load the palette from the file */ color_start = get_be32(pb); color_count = get_be16(pb); color_end = get_be16(pb); if ((color_start <= 255) && (color_end <= 255)) { for (j = color_start; j <= color_end; j++) { /* each R, G, or B component is 16 bits; * only use the top 8 bits; skip alpha bytes * up front */ get_byte(pb); get_byte(pb); r = get_byte(pb); get_byte(pb); g = get_byte(pb); get_byte(pb); b = get_byte(pb); get_byte(pb); st->codec->palctrl->palette[j] = (r << 16) | (g << 8) | (b); } } } st->codec->palctrl->palette_changed = 1; } } else if(st->codec->codec_type==AVMEDIA_TYPE_AUDIO) { int bits_per_sample, flags; uint16_t version = get_be16(pb); st->codec->codec_id = id; get_be16(pb); /* revision level */ get_be32(pb); /* vendor */ st->codec->channels = get_be16(pb); /* channel count */ dprintf(c->fc, "audio channels %d\n", st->codec->channels); st->codec->bits_per_coded_sample = get_be16(pb); /* sample size */ sc->audio_cid = get_be16(pb); get_be16(pb); /* packet size = 0 */ st->codec->sample_rate = ((get_be32(pb) >> 16)); //Read QT version 1 fields. In version 0 these do not exist. dprintf(c->fc, "version =%d, isom =%d\n",version,c->isom); if(!c->isom) { if(version==1) { sc->samples_per_frame = get_be32(pb); get_be32(pb); /* bytes per packet */ sc->bytes_per_frame = get_be32(pb); get_be32(pb); /* bytes per sample */ } else if(version==2) { get_be32(pb); /* sizeof struct only */ st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */ st->codec->channels = get_be32(pb); get_be32(pb); /* always 0x7F000000 */ st->codec->bits_per_coded_sample = get_be32(pb); /* bits per channel if sound is uncompressed */ flags = get_be32(pb); /* lpcm format specific flag */ sc->bytes_per_frame = get_be32(pb); /* bytes per audio packet if constant */ sc->samples_per_frame = get_be32(pb); /* lpcm frames per audio packet if constant */ if (format == MKTAG('l','p','c','m')) st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags); } } switch (st->codec->codec_id) { case CODEC_ID_PCM_S8: case CODEC_ID_PCM_U8: if (st->codec->bits_per_coded_sample == 16) st->codec->codec_id = CODEC_ID_PCM_S16BE; break; case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: if (st->codec->bits_per_coded_sample == 8) st->codec->codec_id = CODEC_ID_PCM_S8; else if (st->codec->bits_per_coded_sample == 24) st->codec->codec_id = st->codec->codec_id == CODEC_ID_PCM_S16BE ? CODEC_ID_PCM_S24BE : CODEC_ID_PCM_S24LE; break; /* set values for old format before stsd version 1 appeared */ case CODEC_ID_MACE3: sc->samples_per_frame = 6; sc->bytes_per_frame = 2*st->codec->channels; break; case CODEC_ID_MACE6: sc->samples_per_frame = 6; sc->bytes_per_frame = 1*st->codec->channels; break; case CODEC_ID_ADPCM_IMA_QT: sc->samples_per_frame = 64; sc->bytes_per_frame = 34*st->codec->channels; break; case CODEC_ID_GSM: sc->samples_per_frame = 160; sc->bytes_per_frame = 33; break; default: break; } bits_per_sample = av_get_bits_per_sample(st->codec->codec_id); if (bits_per_sample) { st->codec->bits_per_coded_sample = bits_per_sample; sc->sample_size = (bits_per_sample >> 3) * st->codec->channels; } } else if(st->codec->codec_type==AVMEDIA_TYPE_SUBTITLE){ // ttxt stsd contains display flags, justification, background // color, fonts, and default styles, so fake an atom to read it MOVAtom fake_atom = { .size = size - (url_ftell(pb) - start_pos) }; if (format != AV_RL32("mp4s")) // mp4s contains a regular esds atom mov_read_glbl(c, pb, fake_atom); st->codec->codec_id= id; st->codec->width = sc->width; st->codec->height = sc->height; } else { /* other codec type, just skip (rtp, mp4s, tmcd ...) */ url_fskip(pb, size - (url_ftell(pb) - start_pos)); } /* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */ a.size = size - (url_ftell(pb) - start_pos); if (a.size > 8) { if (mov_read_default(c, pb, a) < 0) return -1; } else if (a.size > 0) url_fskip(pb, a.size); } if(st->codec->codec_type==AVMEDIA_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) st->codec->sample_rate= sc->time_scale; /* special codec parameters handling */ switch (st->codec->codec_id) { #if CONFIG_DV_DEMUXER case CODEC_ID_DVAUDIO: c->dv_fctx = avformat_alloc_context(); c->dv_demux = dv_init_demux(c->dv_fctx); if (!c->dv_demux) { av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n"); return -1; } sc->dv_audio_container = 1; st->codec->codec_id = CODEC_ID_PCM_S16LE; break; #endif /* no ifdef since parameters are always those */ case CODEC_ID_QCELP: // force sample rate for qcelp when not stored in mov if (st->codec->codec_tag != MKTAG('Q','c','l','p')) st->codec->sample_rate = 8000; st->codec->frame_size= 160; st->codec->channels= 1; /* really needed */ break; case CODEC_ID_AMR_NB: case CODEC_ID_AMR_WB: st->codec->frame_size= sc->samples_per_frame; st->codec->channels= 1; /* really needed */ /* force sample rate for amr, stsd in 3gp does not store sample rate */ if (st->codec->codec_id == CODEC_ID_AMR_NB) st->codec->sample_rate = 8000; else if (st->codec->codec_id == CODEC_ID_AMR_WB) st->codec->sample_rate = 16000; break; case CODEC_ID_MP2: case CODEC_ID_MP3: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; /* force type after stsd for m1a hdlr */ st->need_parsing = AVSTREAM_PARSE_FULL; break; case CODEC_ID_GSM: case CODEC_ID_ADPCM_MS: case CODEC_ID_ADPCM_IMA_WAV: st->codec->block_align = sc->bytes_per_frame; break; case CODEC_ID_ALAC: if (st->codec->extradata_size == 36) { st->codec->frame_size = AV_RB32(st->codec->extradata+12); st->codec->channels = AV_RB8 (st->codec->extradata+21); } break; default: break; } return 0; } static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); dprintf(c->fc, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries); if(entries >= UINT_MAX / sizeof(*sc->stsc_data)) return -1; sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data)); if (!sc->stsc_data) return AVERROR(ENOMEM); sc->stsc_count = entries; for(i=0; i<entries; i++) { sc->stsc_data[i].first = get_be32(pb); sc->stsc_data[i].count = get_be32(pb); sc->stsc_data[i].id = get_be32(pb); } return 0; } static int mov_read_stps(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_be32(pb); // version + flags entries = get_be32(pb); if (entries >= UINT_MAX / sizeof(*sc->stps_data)) return -1; sc->stps_data = av_malloc(entries * sizeof(*sc->stps_data)); if (!sc->stps_data) return AVERROR(ENOMEM); sc->stps_count = entries; for (i = 0; i < entries; i++) { sc->stps_data[i] = get_be32(pb); //dprintf(c->fc, "stps %d\n", sc->stps_data[i]); } return 0; } static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); dprintf(c->fc, "keyframe_count = %d\n", entries); if(entries >= UINT_MAX / sizeof(int)) return -1; sc->keyframes = av_malloc(entries * sizeof(int)); if (!sc->keyframes) return AVERROR(ENOMEM); sc->keyframe_count = entries; for(i=0; i<entries; i++) { sc->keyframes[i] = get_be32(pb); //dprintf(c->fc, "keyframes[]=%d\n", sc->keyframes[i]); } return 0; } static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries, sample_size, field_size, num_bytes; GetBitContext gb; unsigned char* buf; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ if (atom.type == MKTAG('s','t','s','z')) { sample_size = get_be32(pb); if (!sc->sample_size) /* do not overwrite value computed in stsd */ sc->sample_size = sample_size; field_size = 32; } else { sample_size = 0; get_be24(pb); /* reserved */ field_size = get_byte(pb); } entries = get_be32(pb); dprintf(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, entries); sc->sample_count = entries; if (sample_size) return 0; if (field_size != 4 && field_size != 8 && field_size != 16 && field_size != 32) { av_log(c->fc, AV_LOG_ERROR, "Invalid sample field size %d\n", field_size); return -1; } if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size) return -1; sc->sample_sizes = av_malloc(entries * sizeof(int)); if (!sc->sample_sizes) return AVERROR(ENOMEM); num_bytes = (entries*field_size+4)>>3; buf = av_malloc(num_bytes+FF_INPUT_BUFFER_PADDING_SIZE); if (!buf) { av_freep(&sc->sample_sizes); return AVERROR(ENOMEM); } if (get_buffer(pb, buf, num_bytes) < num_bytes) { av_freep(&sc->sample_sizes); av_free(buf); return -1; } init_get_bits(&gb, buf, 8*num_bytes); for(i=0; i<entries; i++) sc->sample_sizes[i] = get_bits_long(&gb, field_size); av_free(buf); return 0; } static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; int64_t duration=0; int64_t total_sample_count=0; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); dprintf(c->fc, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1, entries); if(entries >= UINT_MAX / sizeof(*sc->stts_data)) return -1; sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data)); if (!sc->stts_data) return AVERROR(ENOMEM); sc->stts_count = entries; for(i=0; i<entries; i++) { int sample_duration; int sample_count; sample_count=get_be32(pb); sample_duration = get_be32(pb); sc->stts_data[i].count= sample_count; sc->stts_data[i].duration= sample_duration; dprintf(c->fc, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration); duration+=(int64_t)sample_duration*sample_count; total_sample_count+=sample_count; } st->nb_frames= total_sample_count; if(duration) st->duration= duration; return 0; } static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); dprintf(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries); if(entries >= UINT_MAX / sizeof(*sc->ctts_data)) return -1; sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data)); if (!sc->ctts_data) return AVERROR(ENOMEM); sc->ctts_count = entries; for(i=0; i<entries; i++) { int count =get_be32(pb); int duration =get_be32(pb); sc->ctts_data[i].count = count; sc->ctts_data[i].duration= duration; if (duration < 0) sc->dts_shift = FFMAX(sc->dts_shift, -duration); } dprintf(c->fc, "dts shift %d\n", sc->dts_shift); return 0; } static void mov_build_index(MOVContext *mov, AVStream *st) { MOVStreamContext *sc = st->priv_data; int64_t current_offset; int64_t current_dts = 0; unsigned int stts_index = 0; unsigned int stsc_index = 0; unsigned int stss_index = 0; unsigned int stps_index = 0; unsigned int i, j; uint64_t stream_size = 0; /* adjust first dts according to edit list */ if (sc->time_offset) { int rescaled = sc->time_offset < 0 ? av_rescale(sc->time_offset, sc->time_scale, mov->time_scale) : sc->time_offset; current_dts = -rescaled; if (sc->ctts_data && sc->ctts_data[0].duration / sc->stts_data[0].duration > 16) { /* more than 16 frames delay, dts are likely wrong this happens with files created by iMovie */ sc->wrong_dts = 1; st->codec->has_b_frames = 1; } } /* only use old uncompressed audio chunk demuxing when stts specifies it */ if (!(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && sc->stts_count == 1 && sc->stts_data[0].duration == 1)) { unsigned int current_sample = 0; unsigned int stts_sample = 0; unsigned int sample_size; unsigned int distance = 0; int key_off = sc->keyframes && sc->keyframes[0] == 1; current_dts -= sc->dts_shift; if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries)) return; st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries)); if (!st->index_entries) return; st->index_entries_allocated_size = sc->sample_count*sizeof(*st->index_entries); for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; if (stsc_index + 1 < sc->stsc_count && i + 1 == sc->stsc_data[stsc_index + 1].first) stsc_index++; for (j = 0; j < sc->stsc_data[stsc_index].count; j++) { int keyframe = 0; if (current_sample >= sc->sample_count) { av_log(mov->fc, AV_LOG_ERROR, "wrong sample count\n"); return; } if (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index]) { keyframe = 1; if (stss_index + 1 < sc->keyframe_count) stss_index++; } else if (sc->stps_count && current_sample+key_off == sc->stps_data[stps_index]) { keyframe = 1; if (stps_index + 1 < sc->stps_count) stps_index++; } if (keyframe) distance = 0; sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample]; if(sc->pseudo_stream_id == -1 || sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) { AVIndexEntry *e = &st->index_entries[st->nb_index_entries++]; e->pos = current_offset; e->timestamp = current_dts; e->size = sample_size; e->min_distance = distance; e->flags = keyframe ? AVINDEX_KEYFRAME : 0; dprintf(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", " "size %d, distance %d, keyframe %d\n", st->index, current_sample, current_offset, current_dts, sample_size, distance, keyframe); } current_offset += sample_size; stream_size += sample_size; current_dts += sc->stts_data[stts_index].duration; distance++; stts_sample++; current_sample++; if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) { stts_sample = 0; stts_index++; } } } if (st->duration > 0) st->codec->bit_rate = stream_size*8*sc->time_scale/st->duration; } else { unsigned chunk_samples, total = 0; // compute total chunk count for (i = 0; i < sc->stsc_count; i++) { unsigned count, chunk_count; chunk_samples = sc->stsc_data[i].count; if (sc->samples_per_frame && chunk_samples % sc->samples_per_frame) { av_log(mov->fc, AV_LOG_ERROR, "error unaligned chunk\n"); return; } if (sc->samples_per_frame >= 160) { // gsm count = chunk_samples / sc->samples_per_frame; } else if (sc->samples_per_frame > 1) { unsigned samples = (1024/sc->samples_per_frame)*sc->samples_per_frame; count = (chunk_samples+samples-1) / samples; } else { count = (chunk_samples+1023) / 1024; } if (i < sc->stsc_count - 1) chunk_count = sc->stsc_data[i+1].first - sc->stsc_data[i].first; else chunk_count = sc->chunk_count - (sc->stsc_data[i].first - 1); total += chunk_count * count; } dprintf(mov->fc, "chunk count %d\n", total); if (total >= UINT_MAX / sizeof(*st->index_entries)) return; st->index_entries = av_malloc(total*sizeof(*st->index_entries)); if (!st->index_entries) return; st->index_entries_allocated_size = total*sizeof(*st->index_entries); // populate index for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; if (stsc_index + 1 < sc->stsc_count && i + 1 == sc->stsc_data[stsc_index + 1].first) stsc_index++; chunk_samples = sc->stsc_data[stsc_index].count; while (chunk_samples > 0) { AVIndexEntry *e; unsigned size, samples; if (sc->samples_per_frame >= 160) { // gsm samples = sc->samples_per_frame; size = sc->bytes_per_frame; } else { if (sc->samples_per_frame > 1) { samples = FFMIN((1024 / sc->samples_per_frame)* sc->samples_per_frame, chunk_samples); size = (samples / sc->samples_per_frame) * sc->bytes_per_frame; } else { samples = FFMIN(1024, chunk_samples); size = samples * sc->sample_size; } } if (st->nb_index_entries >= total) { av_log(mov->fc, AV_LOG_ERROR, "wrong chunk count %d\n", total); return; } e = &st->index_entries[st->nb_index_entries++]; e->pos = current_offset; e->timestamp = current_dts; e->size = size; e->min_distance = 0; e->flags = AVINDEX_KEYFRAME; dprintf(mov->fc, "AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", " "size %d, duration %d\n", st->index, i, current_offset, current_dts, size, samples); current_offset += size; current_dts += samples; chunk_samples -= samples; } } } } static int mov_open_dref(ByteIOContext **pb, char *src, MOVDref *ref) { /* try relative path, we do not try the absolute because it can leak information about our system to an attacker */ if (ref->nlvl_to > 0 && ref->nlvl_from > 0) { char filename[1024]; char *src_path; int i, l; /* find a source dir */ src_path = strrchr(src, '/'); if (src_path) src_path++; else src_path = src; /* find a next level down to target */ for (i = 0, l = strlen(ref->path) - 1; l >= 0; l--) if (ref->path[l] == '/') { if (i == ref->nlvl_to - 1) break; else i++; } /* compose filename if next level down to target was found */ if (i == ref->nlvl_to - 1 && src_path - src < sizeof(filename)) { memcpy(filename, src, src_path - src); filename[src_path - src] = 0; for (i = 1; i < ref->nlvl_from; i++) av_strlcat(filename, "../", 1024); av_strlcat(filename, ref->path + l + 1, 1024); if (!url_fopen(pb, filename, URL_RDONLY)) return 0; } } return AVERROR(ENOENT); }; static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int ret; st = av_new_stream(c->fc, c->fc->nb_streams); if (!st) return AVERROR(ENOMEM); sc = av_mallocz(sizeof(MOVStreamContext)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; st->codec->codec_type = AVMEDIA_TYPE_DATA; sc->ffindex = st->index; if ((ret = mov_read_default(c, pb, atom)) < 0) return ret; /* sanity checks */ if (sc->chunk_count && (!sc->stts_count || !sc->stsc_count || (!sc->sample_size && !sc->sample_count))) { av_log(c->fc, AV_LOG_ERROR, "stream %d, missing mandatory atoms, broken header\n", st->index); return 0; } if (!sc->time_scale) { av_log(c->fc, AV_LOG_WARNING, "stream %d, timescale not set\n", st->index); sc->time_scale = c->time_scale; if (!sc->time_scale) sc->time_scale = 1; } av_set_pts_info(st, 64, 1, sc->time_scale); if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !st->codec->frame_size && sc->stts_count == 1) { st->codec->frame_size = av_rescale(sc->stts_data[0].duration, st->codec->sample_rate, sc->time_scale); dprintf(c->fc, "frame size %d\n", st->codec->frame_size); } mov_build_index(c, st); if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) { MOVDref *dref = &sc->drefs[sc->dref_id - 1]; if (mov_open_dref(&sc->pb, c->fc->filename, dref) < 0) av_log(c->fc, AV_LOG_ERROR, "stream %d, error opening alias: path='%s', dir='%s', " "filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n", st->index, dref->path, dref->dir, dref->filename, dref->volume, dref->nlvl_from, dref->nlvl_to); } else sc->pb = c->fc->pb; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->codec->width != sc->width || st->codec->height != sc->height) { AVRational r = av_d2q(((double)st->codec->height * sc->width) / ((double)st->codec->width * sc->height), INT_MAX); if (st->sample_aspect_ratio.num) st->sample_aspect_ratio = av_mul_q(st->sample_aspect_ratio, r); else st->sample_aspect_ratio = r; } av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, sc->time_scale*st->nb_frames, st->duration, INT_MAX); } switch (st->codec->codec_id) { #if CONFIG_H261_DECODER case CODEC_ID_H261: #endif #if CONFIG_H263_DECODER case CODEC_ID_H263: #endif #if CONFIG_H264_DECODER case CODEC_ID_H264: #endif #if CONFIG_MPEG4_DECODER case CODEC_ID_MPEG4: #endif st->codec->width = 0; /* let decoder init width/height */ st->codec->height= 0; break; } /* Do not need those anymore. */ av_freep(&sc->chunk_offsets); av_freep(&sc->stsc_data); av_freep(&sc->sample_sizes); av_freep(&sc->keyframes); av_freep(&sc->stts_data); av_freep(&sc->stps_data); return 0; } static int mov_read_ilst(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { int ret; c->itunes_metadata = 1; ret = mov_read_default(c, pb, atom); c->itunes_metadata = 0; return ret; } static int mov_read_meta(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { while (atom.size > 8) { uint32_t tag = get_le32(pb); atom.size -= 4; if (tag == MKTAG('h','d','l','r')) { url_fseek(pb, -8, SEEK_CUR); atom.size += 8; return mov_read_default(c, pb, atom); } } return 0; } static int mov_read_tkhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { int i; int width; int height; int64_t disp_transform[2]; int display_matrix[3][2]; AVStream *st; MOVStreamContext *sc; int version; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; version = get_byte(pb); get_be24(pb); /* flags */ /* MOV_TRACK_ENABLED 0x0001 MOV_TRACK_IN_MOVIE 0x0002 MOV_TRACK_IN_PREVIEW 0x0004 MOV_TRACK_IN_POSTER 0x0008 */ if (version == 1) { get_be64(pb); get_be64(pb); } else { get_be32(pb); /* creation time */ get_be32(pb); /* modification time */ } st->id = (int)get_be32(pb); /* track id (NOT 0 !)*/ get_be32(pb); /* reserved */ /* highlevel (considering edits) duration in movie timebase */ (version == 1) ? get_be64(pb) : get_be32(pb); get_be32(pb); /* reserved */ get_be32(pb); /* reserved */ get_be16(pb); /* layer */ get_be16(pb); /* alternate group */ get_be16(pb); /* volume */ get_be16(pb); /* reserved */ //read in the display matrix (outlined in ISO 14496-12, Section 6.2.2) // they're kept in fixed point format through all calculations // ignore u,v,z b/c we don't need the scale factor to calc aspect ratio for (i = 0; i < 3; i++) { display_matrix[i][0] = get_be32(pb); // 16.16 fixed point display_matrix[i][1] = get_be32(pb); // 16.16 fixed point get_be32(pb); // 2.30 fixed point (not used) } width = get_be32(pb); // 16.16 fixed point track width height = get_be32(pb); // 16.16 fixed point track height sc->width = width >> 16; sc->height = height >> 16; // transform the display width/height according to the matrix // skip this if the display matrix is the default identity matrix // or if it is rotating the picture, ex iPhone 3GS // to keep the same scale, use [width height 1<<16] if (width && height && ((display_matrix[0][0] != 65536 || display_matrix[1][1] != 65536) && !display_matrix[0][1] && !display_matrix[1][0] && !display_matrix[2][0] && !display_matrix[2][1])) { for (i = 0; i < 2; i++) disp_transform[i] = (int64_t) width * display_matrix[0][i] + (int64_t) height * display_matrix[1][i] + ((int64_t) display_matrix[2][i] << 16); //sample aspect ratio is new width/height divided by old width/height st->sample_aspect_ratio = av_d2q( ((double) disp_transform[0] * height) / ((double) disp_transform[1] * width), INT_MAX); } return 0; } static int mov_read_tfhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { MOVFragment *frag = &c->fragment; MOVTrackExt *trex = NULL; int flags, track_id, i; get_byte(pb); /* version */ flags = get_be24(pb); track_id = get_be32(pb); if (!track_id) return -1; frag->track_id = track_id; for (i = 0; i < c->trex_count; i++) if (c->trex_data[i].track_id == frag->track_id) { trex = &c->trex_data[i]; break; } if (!trex) { av_log(c->fc, AV_LOG_ERROR, "could not find corresponding trex\n"); return -1; } if (flags & 0x01) frag->base_data_offset = get_be64(pb); else frag->base_data_offset = frag->moof_offset; if (flags & 0x02) frag->stsd_id = get_be32(pb); else frag->stsd_id = trex->stsd_id; frag->duration = flags & 0x08 ? get_be32(pb) : trex->duration; frag->size = flags & 0x10 ? get_be32(pb) : trex->size; frag->flags = flags & 0x20 ? get_be32(pb) : trex->flags; dprintf(c->fc, "frag flags 0x%x\n", frag->flags); return 0; } static int mov_read_chap(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { c->chapter_track = get_be32(pb); return 0; } static int mov_read_trex(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { MOVTrackExt *trex; if ((uint64_t)c->trex_count+1 >= UINT_MAX / sizeof(*c->trex_data)) return -1; trex = av_realloc(c->trex_data, (c->trex_count+1)*sizeof(*c->trex_data)); if (!trex) return AVERROR(ENOMEM); c->trex_data = trex; trex = &c->trex_data[c->trex_count++]; get_byte(pb); /* version */ get_be24(pb); /* flags */ trex->track_id = get_be32(pb); trex->stsd_id = get_be32(pb); trex->duration = get_be32(pb); trex->size = get_be32(pb); trex->flags = get_be32(pb); return 0; } static int mov_read_trun(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { MOVFragment *frag = &c->fragment; AVStream *st = NULL; MOVStreamContext *sc; uint64_t offset; int64_t dts; int data_offset = 0; unsigned entries, first_sample_flags = frag->flags; int flags, distance, i; for (i = 0; i < c->fc->nb_streams; i++) { if (c->fc->streams[i]->id == frag->track_id) { st = c->fc->streams[i]; break; } } if (!st) { av_log(c->fc, AV_LOG_ERROR, "could not find corresponding track id %d\n", frag->track_id); return -1; } sc = st->priv_data; if (sc->pseudo_stream_id+1 != frag->stsd_id) return 0; get_byte(pb); /* version */ flags = get_be24(pb); entries = get_be32(pb); dprintf(c->fc, "flags 0x%x entries %d\n", flags, entries); if (flags & 0x001) data_offset = get_be32(pb); if (flags & 0x004) first_sample_flags = get_be32(pb); if (flags & 0x800) { MOVStts *ctts_data; if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data)) return -1; ctts_data = av_realloc(sc->ctts_data, (entries+sc->ctts_count)*sizeof(*sc->ctts_data)); if (!ctts_data) return AVERROR(ENOMEM); sc->ctts_data = ctts_data; } dts = st->duration; offset = frag->base_data_offset + data_offset; distance = 0; dprintf(c->fc, "first sample flags 0x%x\n", first_sample_flags); for (i = 0; i < entries; i++) { unsigned sample_size = frag->size; int sample_flags = i ? frag->flags : first_sample_flags; unsigned sample_duration = frag->duration; int keyframe; if (flags & 0x100) sample_duration = get_be32(pb); if (flags & 0x200) sample_size = get_be32(pb); if (flags & 0x400) sample_flags = get_be32(pb); if (flags & 0x800) { sc->ctts_data[sc->ctts_count].count = 1; sc->ctts_data[sc->ctts_count].duration = get_be32(pb); sc->ctts_count++; } if ((keyframe = st->codec->codec_type == AVMEDIA_TYPE_AUDIO || (flags & 0x004 && !i && !sample_flags) || sample_flags & 0x2000000)) distance = 0; av_add_index_entry(st, offset, dts, sample_size, distance, keyframe ? AVINDEX_KEYFRAME : 0); dprintf(c->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", " "size %d, distance %d, keyframe %d\n", st->index, sc->sample_count+i, offset, dts, sample_size, distance, keyframe); distance++; dts += sample_duration; offset += sample_size; } frag->moof_offset = offset; st->duration = dts; return 0; } /* this atom should be null (from specs), but some buggy files put the 'moov' atom inside it... */ /* like the files created with Adobe Premiere 5.0, for samples see */ /* http://graphics.tudelft.nl/~wouter/publications/soundtests/ */ static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { int err; if (atom.size < 8) return 0; /* continue */ if (get_be32(pb) != 0) { /* 0 sized mdat atom... use the 'wide' atom size */ url_fskip(pb, atom.size - 4); return 0; } atom.type = get_le32(pb); atom.size -= 8; if (atom.type != MKTAG('m','d','a','t')) { url_fskip(pb, atom.size); return 0; } err = mov_read_mdat(c, pb, atom); return err; } static int mov_read_cmov(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { #if CONFIG_ZLIB ByteIOContext ctx; uint8_t *cmov_data; uint8_t *moov_data; /* uncompressed data */ long cmov_len, moov_len; int ret = -1; get_be32(pb); /* dcom atom */ if (get_le32(pb) != MKTAG('d','c','o','m')) return -1; if (get_le32(pb) != MKTAG('z','l','i','b')) { av_log(c->fc, AV_LOG_ERROR, "unknown compression for cmov atom !"); return -1; } get_be32(pb); /* cmvd atom */ if (get_le32(pb) != MKTAG('c','m','v','d')) return -1; moov_len = get_be32(pb); /* uncompressed size */ cmov_len = atom.size - 6 * 4; cmov_data = av_malloc(cmov_len); if (!cmov_data) return AVERROR(ENOMEM); moov_data = av_malloc(moov_len); if (!moov_data) { av_free(cmov_data); return AVERROR(ENOMEM); } get_buffer(pb, cmov_data, cmov_len); if(uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK) goto free_and_return; if(init_put_byte(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0) goto free_and_return; atom.type = MKTAG('m','o','o','v'); atom.size = moov_len; #ifdef DEBUG // { int fd = open("/tmp/uncompheader.mov", O_WRONLY | O_CREAT); write(fd, moov_data, moov_len); close(fd); } #endif ret = mov_read_default(c, &ctx, atom); free_and_return: av_free(moov_data); av_free(cmov_data); return ret; #else av_log(c->fc, AV_LOG_ERROR, "this file requires zlib support compiled in\n"); return -1; #endif } /* edit list atom */ static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { MOVStreamContext *sc; int i, edit_count; if (c->fc->nb_streams < 1) return 0; sc = c->fc->streams[c->fc->nb_streams-1]->priv_data; get_byte(pb); /* version */ get_be24(pb); /* flags */ edit_count = get_be32(pb); /* entries */ if((uint64_t)edit_count*12+8 > atom.size) return -1; for(i=0; i<edit_count; i++){ int time; int duration = get_be32(pb); /* Track duration */ time = get_be32(pb); /* Media time */ get_be32(pb); /* Media rate */ if (i == 0 && time >= -1) { sc->time_offset = time != -1 ? time : -duration; } } if(edit_count > 1) av_log(c->fc, AV_LOG_WARNING, "multiple edit list entries, " "a/v desync might occur, patch welcome\n"); dprintf(c->fc, "track[%i].edit_count = %i\n", c->fc->nb_streams-1, edit_count); return 0; } static const MOVParseTableEntry mov_default_parse_table[] = { { MKTAG('a','v','s','s'), mov_read_extradata }, { MKTAG('c','h','p','l'), mov_read_chpl }, { MKTAG('c','o','6','4'), mov_read_stco }, { MKTAG('c','t','t','s'), mov_read_ctts }, /* composition time to sample */ { MKTAG('d','i','n','f'), mov_read_default }, { MKTAG('d','r','e','f'), mov_read_dref }, { MKTAG('e','d','t','s'), mov_read_default }, { MKTAG('e','l','s','t'), mov_read_elst }, { MKTAG('e','n','d','a'), mov_read_enda }, { MKTAG('f','i','e','l'), mov_read_extradata }, { MKTAG('f','t','y','p'), mov_read_ftyp }, { MKTAG('g','l','b','l'), mov_read_glbl }, { MKTAG('h','d','l','r'), mov_read_hdlr }, { MKTAG('i','l','s','t'), mov_read_ilst }, { MKTAG('j','p','2','h'), mov_read_extradata }, { MKTAG('m','d','a','t'), mov_read_mdat }, { MKTAG('m','d','h','d'), mov_read_mdhd }, { MKTAG('m','d','i','a'), mov_read_default }, { MKTAG('m','e','t','a'), mov_read_meta }, { MKTAG('m','i','n','f'), mov_read_default }, { MKTAG('m','o','o','f'), mov_read_moof }, { MKTAG('m','o','o','v'), mov_read_moov }, { MKTAG('m','v','e','x'), mov_read_default }, { MKTAG('m','v','h','d'), mov_read_mvhd }, { MKTAG('S','M','I',' '), mov_read_smi }, /* Sorenson extension ??? */ { MKTAG('a','l','a','c'), mov_read_extradata }, /* alac specific atom */ { MKTAG('a','v','c','C'), mov_read_glbl }, { MKTAG('p','a','s','p'), mov_read_pasp }, { MKTAG('s','t','b','l'), mov_read_default }, { MKTAG('s','t','c','o'), mov_read_stco }, { MKTAG('s','t','p','s'), mov_read_stps }, { MKTAG('s','t','r','f'), mov_read_strf }, { MKTAG('s','t','s','c'), mov_read_stsc }, { MKTAG('s','t','s','d'), mov_read_stsd }, /* sample description */ { MKTAG('s','t','s','s'), mov_read_stss }, /* sync sample */ { MKTAG('s','t','s','z'), mov_read_stsz }, /* sample size */ { MKTAG('s','t','t','s'), mov_read_stts }, { MKTAG('s','t','z','2'), mov_read_stsz }, /* compact sample size */ { MKTAG('t','k','h','d'), mov_read_tkhd }, /* track header */ { MKTAG('t','f','h','d'), mov_read_tfhd }, /* track fragment header */ { MKTAG('t','r','a','k'), mov_read_trak }, { MKTAG('t','r','a','f'), mov_read_default }, { MKTAG('t','r','e','f'), mov_read_default }, { MKTAG('c','h','a','p'), mov_read_chap }, { MKTAG('t','r','e','x'), mov_read_trex }, { MKTAG('t','r','u','n'), mov_read_trun }, { MKTAG('u','d','t','a'), mov_read_default }, { MKTAG('w','a','v','e'), mov_read_wave }, { MKTAG('e','s','d','s'), mov_read_esds }, { MKTAG('w','i','d','e'), mov_read_wide }, /* place holder */ { MKTAG('c','m','o','v'), mov_read_cmov }, { 0, NULL } }; static int mov_probe(AVProbeData *p) { unsigned int offset; uint32_t tag; int score = 0; /* check file header */ offset = 0; for(;;) { /* ignore invalid offset */ if ((offset + 8) > (unsigned int)p->buf_size) return score; tag = AV_RL32(p->buf + offset + 4); switch(tag) { /* check for obvious tags */ case MKTAG('j','P',' ',' '): /* jpeg 2000 signature */ case MKTAG('m','o','o','v'): case MKTAG('m','d','a','t'): case MKTAG('p','n','o','t'): /* detect movs with preview pics like ew.mov and april.mov */ case MKTAG('u','d','t','a'): /* Packet Video PVAuthor adds this and a lot of more junk */ case MKTAG('f','t','y','p'): return AVPROBE_SCORE_MAX; /* those are more common words, so rate then a bit less */ case MKTAG('e','d','i','w'): /* xdcam files have reverted first tags */ case MKTAG('w','i','d','e'): case MKTAG('f','r','e','e'): case MKTAG('j','u','n','k'): case MKTAG('p','i','c','t'): return AVPROBE_SCORE_MAX - 5; case MKTAG(0x82,0x82,0x7f,0x7d): case MKTAG('s','k','i','p'): case MKTAG('u','u','i','d'): case MKTAG('p','r','f','l'): offset = AV_RB32(p->buf+offset) + offset; /* if we only find those cause probedata is too small at least rate them */ score = AVPROBE_SCORE_MAX - 50; break; default: /* unrecognized tag */ return score; } } return score; } // must be done after parsing all trak because there's no order requirement static void mov_read_chapters(AVFormatContext *s) { MOVContext *mov = s->priv_data; AVStream *st = NULL; MOVStreamContext *sc; int64_t cur_pos; uint8_t *title = NULL; int i, len, i8, i16; for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->id == mov->chapter_track) { st = s->streams[i]; break; } if (!st) { av_log(s, AV_LOG_ERROR, "Referenced QT chapter track not found\n"); return; } st->discard = AVDISCARD_ALL; sc = st->priv_data; cur_pos = url_ftell(sc->pb); for (i = 0; i < st->nb_index_entries; i++) { AVIndexEntry *sample = &st->index_entries[i]; int64_t end = i+1 < st->nb_index_entries ? st->index_entries[i+1].timestamp : st->duration; if (url_fseek(sc->pb, sample->pos, SEEK_SET) != sample->pos) { av_log(s, AV_LOG_ERROR, "Chapter %d not found in file\n", i); goto finish; } title = av_malloc(sample->size+2); get_buffer(sc->pb, title, sample->size); // the first two bytes are the length of the title len = AV_RB16(title); if (len > sample->size-2) continue; // The samples could theoretically be in any encoding if there's an encd // atom following, but in practice are only utf-8 or utf-16, distinguished // instead by the presence of a BOM if (AV_RB16(title+2) == 0xfeff) { uint8_t *utf8 = av_malloc(2*len+3); i8 = i16 = 0; while (i16 < len) { uint32_t ch; uint8_t tmp; GET_UTF16(ch, i16 < len ? AV_RB16(title + (i16+=2)) : 0, break;) PUT_UTF8(ch, tmp, if (i8 < 2*len) utf8[2+i8++] = tmp;) } utf8[2+i8] = 0; av_freep(&title); title = utf8; } ff_new_chapter(s, i, st->time_base, sample->timestamp, end, title+2); av_freep(&title); } finish: av_free(title); url_fseek(sc->pb, cur_pos, SEEK_SET); } static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap) { MOVContext *mov = s->priv_data; ByteIOContext *pb = s->pb; int err; MOVAtom atom = { 0 }; mov->fc = s; /* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */ if(!url_is_streamed(pb)) atom.size = url_fsize(pb); else atom.size = INT64_MAX; /* check MOV header */ if ((err = mov_read_default(mov, pb, atom)) < 0) { av_log(s, AV_LOG_ERROR, "error reading header: %d\n", err); return err; } if (!mov->found_moov) { av_log(s, AV_LOG_ERROR, "moov atom not found\n"); return -1; } dprintf(mov->fc, "on_parse_exit_offset=%lld\n", url_ftell(pb)); if (!url_is_streamed(pb) && mov->chapter_track > 0) mov_read_chapters(s); return 0; } static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st) { AVIndexEntry *sample = NULL; int64_t best_dts = INT64_MAX; int i; for (i = 0; i < s->nb_streams; i++) { AVStream *avst = s->streams[i]; MOVStreamContext *msc = avst->priv_data; if (msc->pb && msc->current_sample < avst->nb_index_entries) { AVIndexEntry *current_sample = &avst->index_entries[msc->current_sample]; int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale); dprintf(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts); if (!sample || (url_is_streamed(s->pb) && current_sample->pos < sample->pos) || (!url_is_streamed(s->pb) && ((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb && ((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) || (FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) { sample = current_sample; best_dts = dts; *st = avst; } } } return sample; } static int mov_read_packet(AVFormatContext *s, AVPacket *pkt) { MOVContext *mov = s->priv_data; MOVStreamContext *sc; AVIndexEntry *sample; AVStream *st = NULL; int ret; retry: sample = mov_find_next_sample(s, &st); if (!sample) { mov->found_mdat = 0; if (!url_is_streamed(s->pb) || mov_read_default(mov, s->pb, (MOVAtom){ 0, INT64_MAX }) < 0 || url_feof(s->pb)) return AVERROR_EOF; dprintf(s, "read fragments, offset 0x%llx\n", url_ftell(s->pb)); goto retry; } sc = st->priv_data; /* must be done just before reading, to avoid infinite loop on sample */ sc->current_sample++; if (st->discard != AVDISCARD_ALL) { if (url_fseek(sc->pb, sample->pos, SEEK_SET) != sample->pos) { av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n", sc->ffindex, sample->pos); return -1; } ret = av_get_packet(sc->pb, pkt, sample->size); if (ret < 0) return ret; #if CONFIG_DV_DEMUXER if (mov->dv_demux && sc->dv_audio_container) { dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size); av_free(pkt->data); pkt->size = 0; ret = dv_get_packet(mov->dv_demux, pkt); if (ret < 0) return ret; } #endif } pkt->stream_index = sc->ffindex; pkt->dts = sample->timestamp; if (sc->ctts_data) { pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration; /* update ctts context */ sc->ctts_sample++; if (sc->ctts_index < sc->ctts_count && sc->ctts_data[sc->ctts_index].count == sc->ctts_sample) { sc->ctts_index++; sc->ctts_sample = 0; } if (sc->wrong_dts) pkt->dts = AV_NOPTS_VALUE; } else { int64_t next_dts = (sc->current_sample < st->nb_index_entries) ? st->index_entries[sc->current_sample].timestamp : st->duration; pkt->duration = next_dts - pkt->dts; pkt->pts = pkt->dts; } if (st->discard == AVDISCARD_ALL) goto retry; pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? AV_PKT_FLAG_KEY : 0; pkt->pos = sample->pos; dprintf(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration); return 0; } static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags) { MOVStreamContext *sc = st->priv_data; int sample, time_sample; int i; sample = av_index_search_timestamp(st, timestamp, flags); dprintf(s, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample); if (sample < 0) /* not sure what to do */ return -1; sc->current_sample = sample; dprintf(s, "stream %d, found sample %d\n", st->index, sc->current_sample); /* adjust ctts index */ if (sc->ctts_data) { time_sample = 0; for (i = 0; i < sc->ctts_count; i++) { int next = time_sample + sc->ctts_data[i].count; if (next > sc->current_sample) { sc->ctts_index = i; sc->ctts_sample = sc->current_sample - time_sample; break; } time_sample = next; } } return sample; } static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags) { AVStream *st; int64_t seek_timestamp, timestamp; int sample; int i; if (stream_index >= s->nb_streams) return -1; if (sample_time < 0) sample_time = 0; st = s->streams[stream_index]; sample = mov_seek_stream(s, st, sample_time, flags); if (sample < 0) return -1; /* adjust seek timestamp to found sample timestamp */ seek_timestamp = st->index_entries[sample].timestamp; for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (stream_index == i) continue; timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base); mov_seek_stream(s, st, timestamp, flags); } return 0; } static int mov_read_close(AVFormatContext *s) { MOVContext *mov = s->priv_data; int i, j; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; av_freep(&sc->ctts_data); for (j = 0; j < sc->drefs_count; j++) { av_freep(&sc->drefs[j].path); av_freep(&sc->drefs[j].dir); } av_freep(&sc->drefs); if (sc->pb && sc->pb != s->pb) url_fclose(sc->pb); av_freep(&st->codec->palctrl); } if (mov->dv_demux) { for(i = 0; i < mov->dv_fctx->nb_streams; i++) { av_freep(&mov->dv_fctx->streams[i]->codec); av_freep(&mov->dv_fctx->streams[i]); } av_freep(&mov->dv_fctx); av_freep(&mov->dv_demux); } av_freep(&mov->trex_data); return 0; } AVInputFormat mov_demuxer = { "mov,mp4,m4a,3gp,3g2,mj2", NULL_IF_CONFIG_SMALL("QuickTime/MPEG-4/Motion JPEG 2000 format"), sizeof(MOVContext), mov_probe, mov_read_header, mov_read_packet, mov_read_close, mov_read_seek, };
123linslouis-android-video-cutter
jni/libavformat/mov.c
C
asf20
88,726
/* * RTMP definitions * Copyright (c) 2009 Kostya Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_RTMP_H #define AVFORMAT_RTMP_H #include "avformat.h" #define RTMP_DEFAULT_PORT 1935 #define RTMP_HANDSHAKE_PACKET_SIZE 1536 /** * emulated Flash client version - 9.0.124.2 on Linux * @{ */ #define RTMP_CLIENT_PLATFORM "LNX" #define RTMP_CLIENT_VER1 9 #define RTMP_CLIENT_VER2 0 #define RTMP_CLIENT_VER3 124 #define RTMP_CLIENT_VER4 2 /** @} */ //version defines #endif /* AVFORMAT_RTMP_H */
123linslouis-android-video-cutter
jni/libavformat/rtmp.h
C
asf20
1,251
/* * RTP H.263 Depacketizer, RFC 4629 * Copyright (c) 2010 Martin Storsjo * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "rtpdec_h263.h" #include "libavutil/intreadwrite.h" static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket * pkt, uint32_t * timestamp, const uint8_t * buf, int len, int flags) { uint8_t *ptr; uint16_t header; int startcode, vrc, picture_header; if (len < 2) { av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n"); return AVERROR_INVALIDDATA; } /* Decode the 16 bit H.263+ payload header, as described in section * 5.1 of RFC 4629. The fields of this header are: * - 5 reserved bits, should be ignored. * - One bit (P, startcode), indicating a picture start, picture segment * start or video sequence end. If set, two zero bytes should be * prepended to the payload. * - One bit (V, vrc), indicating the presence of an 8 bit Video * Redundancy Coding field after this 16 bit header. * - 6 bits (PLEN, picture_header), the length (in bytes) of an extra * picture header, following the VRC field. * - 3 bits (PEBIT), the number of bits to ignore of the last byte * of the extra picture header. (Not used at the moment.) */ header = AV_RB16(buf); startcode = (header & 0x0400) >> 9; vrc = header & 0x0200; picture_header = (header & 0x01f8) >> 3; buf += 2; len -= 2; if (vrc) { /* Skip VRC header if present, not used at the moment. */ buf += 1; len -= 1; } if (picture_header) { /* Skip extra picture header if present, not used at the moment. */ buf += picture_header; len -= picture_header; } if (len < 0) { av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n"); return AVERROR_INVALIDDATA; } if (av_new_packet(pkt, len + startcode)) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); return AVERROR(ENOMEM); } pkt->stream_index = st->index; ptr = pkt->data; if (startcode) { *ptr++ = 0; *ptr++ = 0; } memcpy(ptr, buf, len); return 0; } RTPDynamicProtocolHandler ff_h263_1998_dynamic_handler = { .enc_name = "H263-1998", .codec_type = AVMEDIA_TYPE_VIDEO, .codec_id = CODEC_ID_H263, .parse_packet = h263_handle_packet, }; RTPDynamicProtocolHandler ff_h263_2000_dynamic_handler = { .enc_name = "H263-2000", .codec_type = AVMEDIA_TYPE_VIDEO, .codec_id = CODEC_ID_H263, .parse_packet = h263_handle_packet, };
123linslouis-android-video-cutter
jni/libavformat/rtpdec_h263.c
C
asf20
3,602
/* * NC camera feed demuxer * Copyright (c) 2009 Nicolas Martin (martinic at iro dot umontreal dot ca) * Edouard Auvinet * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define NC_VIDEO_FLAG 0x1A5 static int nc_probe(AVProbeData *probe_packet) { int size; if (AV_RB32(probe_packet->buf) != NC_VIDEO_FLAG) return 0; size = AV_RL16(probe_packet->buf + 5); if (size + 20 > probe_packet->buf_size) return AVPROBE_SCORE_MAX/4; if (AV_RB32(probe_packet->buf+16+size) == NC_VIDEO_FLAG) return AVPROBE_SCORE_MAX; return 0; } static int nc_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_MPEG4; st->need_parsing = AVSTREAM_PARSE_FULL; av_set_pts_info(st, 64, 1, 100); return 0; } static int nc_read_packet(AVFormatContext *s, AVPacket *pkt) { int size; int ret; uint32_t state=-1; while (state != NC_VIDEO_FLAG) { if (url_feof(s->pb)) return AVERROR(EIO); state = (state<<8) + get_byte(s->pb); } get_byte(s->pb); size = get_le16(s->pb); url_fskip(s->pb, 9); if (size == 0) { av_log(s, AV_LOG_DEBUG, "Next packet size is zero\n"); return AVERROR(EAGAIN); } ret = av_get_packet(s->pb, pkt, size); if (ret != size) { if (ret > 0) av_free_packet(pkt); return AVERROR(EIO); } pkt->stream_index = 0; return size; } AVInputFormat nc_demuxer = { "nc", NULL_IF_CONFIG_SMALL("NC camera feed format"), 0, nc_probe, nc_read_header, nc_read_packet, .extensions = "v", };
123linslouis-android-video-cutter
jni/libavformat/ncdec.c
C
asf20
2,559
/* * American Laser Games MM Format Demuxer * Copyright (c) 2006 Peter Ross * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * American Laser Games MM Format Demuxer * by Peter Ross (pross@xvid.org) * * The MM format was used by IBM-PC ports of ALG's "arcade shooter" games, * including Mad Dog McCree and Crime Patrol. * * Technical details here: * http://wiki.multimedia.cx/index.php?title=American_Laser_Games_MM */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define MM_PREAMBLE_SIZE 6 #define MM_TYPE_HEADER 0x0 #define MM_TYPE_INTER 0x5 #define MM_TYPE_INTRA 0x8 #define MM_TYPE_INTRA_HH 0xc #define MM_TYPE_INTER_HH 0xd #define MM_TYPE_INTRA_HHV 0xe #define MM_TYPE_INTER_HHV 0xf #define MM_TYPE_AUDIO 0x15 #define MM_TYPE_PALETTE 0x31 #define MM_HEADER_LEN_V 0x16 /* video only */ #define MM_HEADER_LEN_AV 0x18 /* video + audio */ #define MM_PALETTE_COUNT 128 #define MM_PALETTE_SIZE (MM_PALETTE_COUNT*3) typedef struct { unsigned int audio_pts, video_pts; } MmDemuxContext; static int probe(AVProbeData *p) { int len, type, fps, w, h; if (p->buf_size < MM_HEADER_LEN_AV + MM_PREAMBLE_SIZE) return 0; /* the first chunk is always the header */ if (AV_RL16(&p->buf[0]) != MM_TYPE_HEADER) return 0; len = AV_RL32(&p->buf[2]); if (len != MM_HEADER_LEN_V && len != MM_HEADER_LEN_AV) return 0; fps = AV_RL16(&p->buf[8]); w = AV_RL16(&p->buf[12]); h = AV_RL16(&p->buf[14]); if (!fps || fps > 60 || !w || w > 2048 || !h || h > 2048) return 0; type = AV_RL16(&p->buf[len]); if (!type || type > 0x31) return 0; /* only return half certainty since this check is a bit sketchy */ return AVPROBE_SCORE_MAX / 2; } static int read_header(AVFormatContext *s, AVFormatParameters *ap) { MmDemuxContext *mm = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st; unsigned int type, length; unsigned int frame_rate, width, height; type = get_le16(pb); length = get_le32(pb); if (type != MM_TYPE_HEADER) return AVERROR_INVALIDDATA; /* read header */ get_le16(pb); /* total number of chunks */ frame_rate = get_le16(pb); get_le16(pb); /* ibm-pc video bios mode */ width = get_le16(pb); height = get_le16(pb); url_fseek(pb, length - 10, SEEK_CUR); /* unknown data */ /* video stream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_MMVIDEO; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = width; st->codec->height = height; av_set_pts_info(st, 64, 1, frame_rate); /* audio stream */ if (length == MM_HEADER_LEN_AV) { st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 0; /* no fourcc */ st->codec->codec_id = CODEC_ID_PCM_U8; st->codec->channels = 1; st->codec->sample_rate = 8000; av_set_pts_info(st, 64, 1, 8000); /* 8000 hz */ } mm->audio_pts = 0; mm->video_pts = 0; return 0; } static int read_packet(AVFormatContext *s, AVPacket *pkt) { MmDemuxContext *mm = s->priv_data; ByteIOContext *pb = s->pb; unsigned char preamble[MM_PREAMBLE_SIZE]; unsigned int type, length; while(1) { if (get_buffer(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) { return AVERROR(EIO); } type = AV_RL16(&preamble[0]); length = AV_RL16(&preamble[2]); switch(type) { case MM_TYPE_PALETTE : case MM_TYPE_INTER : case MM_TYPE_INTRA : case MM_TYPE_INTRA_HH : case MM_TYPE_INTER_HH : case MM_TYPE_INTRA_HHV : case MM_TYPE_INTER_HHV : /* output preamble + data */ if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE)) return AVERROR(ENOMEM); memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE); if (get_buffer(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length) return AVERROR(EIO); pkt->size = length + MM_PREAMBLE_SIZE; pkt->stream_index = 0; pkt->pts = mm->video_pts; if (type!=MM_TYPE_PALETTE) mm->video_pts++; return 0; case MM_TYPE_AUDIO : if (av_get_packet(s->pb, pkt, length)<0) return AVERROR(ENOMEM); pkt->size = length; pkt->stream_index = 1; pkt->pts = mm->audio_pts++; return 0; default : av_log(s, AV_LOG_INFO, "unknown chunk type 0x%x\n", type); url_fseek(pb, length, SEEK_CUR); } } return 0; } AVInputFormat mm_demuxer = { "mm", NULL_IF_CONFIG_SMALL("American Laser Games MM format"), sizeof(MmDemuxContext), probe, read_header, read_packet, };
123linslouis-android-video-cutter
jni/libavformat/mm.c
C
asf20
5,847
/* * Gopher protocol * * Copyright (c) 2009 Toshimitsu Kimura * * based on libavformat/http.c, Copyright (c) 2000, 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/avstring.h" #include "avformat.h" #include "internal.h" #include "network.h" typedef struct { URLContext *hd; } GopherContext; static int gopher_write(URLContext *h, uint8_t *buf, int size) { GopherContext *s = h->priv_data; return url_write(s->hd, buf, size); } static int gopher_connect(URLContext *h, const char *path) { char buffer[1024]; if (!*path) return AVERROR(EINVAL); switch (*++path) { case '5': case '9': path = strchr(path, '/'); if (!path) return AVERROR(EINVAL); break; default: av_log(NULL, AV_LOG_WARNING, "Gopher protocol type '%c' not supported yet!\n", *path); return AVERROR(EINVAL); } /* send gopher sector */ snprintf(buffer, sizeof(buffer), "%s\r\n", path); if (gopher_write(h, buffer, strlen(buffer)) < 0) return AVERROR(EIO); return 0; } static int gopher_close(URLContext *h) { GopherContext *s = h->priv_data; if (s->hd) { url_close(s->hd); s->hd = NULL; } av_freep(&h->priv_data); return 0; } static int gopher_open(URLContext *h, const char *uri, int flags) { GopherContext *s; char hostname[1024], auth[1024], path[1024], buf[1024]; int port, err; h->is_streamed = 1; s = av_malloc(sizeof(GopherContext)); if (!s) { return AVERROR(ENOMEM); } h->priv_data = s; /* needed in any case to build the host string */ ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port, path, sizeof(path), uri); if (port < 0) port = 70; ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL); s->hd = NULL; err = url_open(&s->hd, buf, URL_RDWR); if (err < 0) goto fail; if ((err = gopher_connect(h, path)) < 0) goto fail; return 0; fail: gopher_close(h); return err; } static int gopher_read(URLContext *h, uint8_t *buf, int size) { GopherContext *s = h->priv_data; int len = url_read(s->hd, buf, size); return len; } URLProtocol gopher_protocol = { "gopher", gopher_open, gopher_read, gopher_write, NULL, /*seek*/ gopher_close, };
123linslouis-android-video-cutter
jni/libavformat/gopher.c
C
asf20
3,191
/* * General DV muxer/demuxer * Copyright (c) 2003 Roman Shaposhnik * * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth * of DV technical info. * * Raw DV format * Copyright (c) 2002 Fabrice Bellard * * 50 Mbps (DVCPRO50) and 100 Mbps (DVCPRO HD) support * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com> * Funded by BBC Research & Development * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <time.h> #include "avformat.h" #include "libavcodec/dvdata.h" #include "libavutil/intreadwrite.h" #include "dv.h" struct DVDemuxContext { const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */ AVFormatContext* fctx; AVStream* vst; AVStream* ast[4]; AVPacket audio_pkt[4]; uint8_t audio_buf[4][8192]; int ach; int frames; uint64_t abytes; }; static inline uint16_t dv_audio_12to16(uint16_t sample) { uint16_t shift, result; sample = (sample < 0x800) ? sample : sample | 0xf000; shift = (sample & 0xf00) >> 8; if (shift < 0x2 || shift > 0xd) { result = sample; } else if (shift < 0x8) { shift--; result = (sample - (256 * shift)) << shift; } else { shift = 0xe - shift; result = ((sample + ((256 * shift) + 1)) << shift) - 1; } return result; } /* * This is the dumbest implementation of all -- it simply looks at * a fixed offset and if pack isn't there -- fails. We might want * to have a fallback mechanism for complete search of missing packs. */ static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t) { int offs; switch (t) { case dv_audio_source: offs = (80*6 + 80*16*3 + 3); break; case dv_audio_control: offs = (80*6 + 80*16*4 + 3); break; case dv_video_control: offs = (80*5 + 48 + 5); break; default: return NULL; } return frame[offs] == t ? &frame[offs] : NULL; } /* * There's a couple of assumptions being made here: * 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples. * We can pass them upwards when ffmpeg will be ready to deal with them. * 2. We don't do software emphasis. * 3. Audio is always returned as 16bit linear samples: 12bit nonlinear samples * are converted into 16bit linear ones. */ static int dv_extract_audio(uint8_t* frame, uint8_t* ppcm[4], const DVprofile *sys) { int size, chan, i, j, d, of, smpls, freq, quant, half_ch; uint16_t lc, rc; const uint8_t* as_pack; uint8_t *pcm, ipcm; as_pack = dv_extract_pack(frame, dv_audio_source); if (!as_pack) /* No audio ? */ return 0; smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ if (quant > 1) return -1; /* unsupported quantization */ size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */ half_ch = sys->difseg_size / 2; /* We work with 720p frames split in half, thus even frames have * channels 0,1 and odd 2,3. */ ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0; pcm = ppcm[ipcm++]; /* for each DIF channel */ for (chan = 0; chan < sys->n_difchan; chan++) { /* for each DIF segment */ for (i = 0; i < sys->difseg_size; i++) { frame += 6 * 80; /* skip DIF segment header */ if (quant == 1 && i == half_ch) { /* next stereo channel (12bit mode only) */ pcm = ppcm[ipcm++]; if (!pcm) break; } /* for each AV sequence */ for (j = 0; j < 9; j++) { for (d = 8; d < 80; d += 2) { if (quant == 0) { /* 16bit quantization */ of = sys->audio_shuffle[i][j] + (d - 8) / 2 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = frame[d+1]; // FIXME: maybe we have to admit pcm[of*2+1] = frame[d]; // that DV is a big-endian PCM if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00) pcm[of*2+1] = 0; } else { /* 12bit quantization */ lc = ((uint16_t)frame[d] << 4) | ((uint16_t)frame[d+2] >> 4); rc = ((uint16_t)frame[d+1] << 4) | ((uint16_t)frame[d+2] & 0x0f); lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); of = sys->audio_shuffle[i%half_ch][j] + (d - 8) / 3 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = lc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = lc >> 8; // that DV is a big-endian PCM of = sys->audio_shuffle[i%half_ch+half_ch][j] + (d - 8) / 3 * sys->audio_stride; pcm[of*2] = rc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = rc >> 8; // that DV is a big-endian PCM ++d; } } frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } } /* next stereo channel (50Mbps and 100Mbps only) */ pcm = ppcm[ipcm++]; if (!pcm) break; } return size; } static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame) { const uint8_t* as_pack; int freq, stype, smpls, quant, i, ach; as_pack = dv_extract_pack(frame, dv_audio_source); if (!as_pack || !c->sys) { /* No audio ? */ c->ach = 0; return 0; } smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */ stype = (as_pack[3] & 0x1f); /* 0 - 2CH, 2 - 4CH, 3 - 8CH */ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ /* note: ach counts PAIRS of channels (i.e. stereo channels) */ ach = ((int[4]){ 1, 0, 2, 4})[stype]; if (ach == 1 && quant && freq == 2) ach = 2; /* Dynamic handling of the audio streams in DV */ for (i = 0; i < ach; i++) { if (!c->ast[i]) { c->ast[i] = av_new_stream(c->fctx, 0); if (!c->ast[i]) break; av_set_pts_info(c->ast[i], 64, 1, 30000); c->ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO; c->ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE; av_init_packet(&c->audio_pkt[i]); c->audio_pkt[i].size = 0; c->audio_pkt[i].data = c->audio_buf[i]; c->audio_pkt[i].stream_index = c->ast[i]->index; c->audio_pkt[i].flags |= AV_PKT_FLAG_KEY; } c->ast[i]->codec->sample_rate = dv_audio_frequency[freq]; c->ast[i]->codec->channels = 2; c->ast[i]->codec->bit_rate = 2 * dv_audio_frequency[freq] * 16; c->ast[i]->start_time = 0; } c->ach = i; return (c->sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */; } static int dv_extract_video_info(DVDemuxContext *c, uint8_t* frame) { const uint8_t* vsc_pack; AVCodecContext* avctx; int apt, is16_9; int size = 0; if (c->sys) { avctx = c->vst->codec; av_set_pts_info(c->vst, 64, c->sys->time_base.num, c->sys->time_base.den); avctx->time_base= c->sys->time_base; if (!avctx->width){ avctx->width = c->sys->width; avctx->height = c->sys->height; } avctx->pix_fmt = c->sys->pix_fmt; /* finding out SAR is a little bit messy */ vsc_pack = dv_extract_pack(frame, dv_video_control); apt = frame[4] & 0x07; is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07))); c->vst->sample_aspect_ratio = c->sys->sar[is16_9]; avctx->bit_rate = av_rescale_q(c->sys->frame_size, (AVRational){8,1}, c->sys->time_base); size = c->sys->frame_size; } return size; } /* * The following 3 functions constitute our interface to the world */ DVDemuxContext* dv_init_demux(AVFormatContext *s) { DVDemuxContext *c; c = av_mallocz(sizeof(DVDemuxContext)); if (!c) return NULL; c->vst = av_new_stream(s, 0); if (!c->vst) { av_free(c); return NULL; } c->sys = NULL; c->fctx = s; memset(c->ast, 0, sizeof(c->ast)); c->ach = 0; c->frames = 0; c->abytes = 0; c->vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; c->vst->codec->codec_id = CODEC_ID_DVVIDEO; c->vst->codec->bit_rate = 25000000; c->vst->start_time = 0; return c; } int dv_get_packet(DVDemuxContext *c, AVPacket *pkt) { int size = -1; int i; for (i = 0; i < c->ach; i++) { if (c->ast[i] && c->audio_pkt[i].size) { *pkt = c->audio_pkt[i]; c->audio_pkt[i].size = 0; size = pkt->size; break; } } return size; } int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt, uint8_t* buf, int buf_size) { int size, i; uint8_t *ppcm[4] = {0}; if (buf_size < DV_PROFILE_BYTES || !(c->sys = ff_dv_frame_profile(c->sys, buf, buf_size)) || buf_size < c->sys->frame_size) { return -1; /* Broken frame, or not enough data */ } /* Queueing audio packet */ /* FIXME: in case of no audio/bad audio we have to do something */ size = dv_extract_audio_info(c, buf); for (i = 0; i < c->ach; i++) { c->audio_pkt[i].size = size; c->audio_pkt[i].pts = c->abytes * 30000*8 / c->ast[i]->codec->bit_rate; ppcm[i] = c->audio_buf[i]; } dv_extract_audio(buf, ppcm, c->sys); /* We work with 720p frames split in half, thus even frames have * channels 0,1 and odd 2,3. */ if (c->sys->height == 720) { if (buf[1] & 0x0C) { c->audio_pkt[2].size = c->audio_pkt[3].size = 0; } else { c->audio_pkt[0].size = c->audio_pkt[1].size = 0; c->abytes += size; } } else { c->abytes += size; } /* Now it's time to return video packet */ size = dv_extract_video_info(c, buf); av_init_packet(pkt); pkt->data = buf; pkt->size = size; pkt->flags |= AV_PKT_FLAG_KEY; pkt->stream_index = c->vst->id; pkt->pts = c->frames; c->frames++; return size; } static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c, int64_t timestamp, int flags) { // FIXME: sys may be wrong if last dv_read_packet() failed (buffer is junk) const DVprofile* sys = ff_dv_codec_profile(c->vst->codec); int64_t offset; int64_t size = url_fsize(s->pb); int64_t max_offset = ((size-1) / sys->frame_size) * sys->frame_size; offset = sys->frame_size * timestamp; if (size >= 0 && offset > max_offset) offset = max_offset; else if (offset < 0) offset = 0; return offset; } void dv_offset_reset(DVDemuxContext *c, int64_t frame_offset) { c->frames= frame_offset; if (c->ach) c->abytes= av_rescale_q(c->frames, c->sys->time_base, (AVRational){8, c->ast[0]->codec->bit_rate}); c->audio_pkt[0].size = c->audio_pkt[1].size = 0; c->audio_pkt[2].size = c->audio_pkt[3].size = 0; } /************************************************************ * Implementation of the easiest DV storage of all -- raw DV. ************************************************************/ typedef struct RawDVContext { DVDemuxContext* dv_demux; uint8_t buf[DV_MAX_FRAME_SIZE]; } RawDVContext; static int dv_read_header(AVFormatContext *s, AVFormatParameters *ap) { unsigned state, marker_pos = 0; RawDVContext *c = s->priv_data; c->dv_demux = dv_init_demux(s); if (!c->dv_demux) return -1; state = get_be32(s->pb); while ((state & 0xffffff7f) != 0x1f07003f) { if (url_feof(s->pb)) { av_log(s, AV_LOG_ERROR, "Cannot find DV header.\n"); return -1; } if (state == 0x003f0700 || state == 0xff3f0700) marker_pos = url_ftell(s->pb); if (state == 0xff3f0701 && url_ftell(s->pb) - marker_pos == 80) { url_fseek(s->pb, -163, SEEK_CUR); state = get_be32(s->pb); break; } state = (state << 8) | get_byte(s->pb); } AV_WB32(c->buf, state); if (get_buffer(s->pb, c->buf + 4, DV_PROFILE_BYTES - 4) <= 0 || url_fseek(s->pb, -DV_PROFILE_BYTES, SEEK_CUR) < 0) return AVERROR(EIO); c->dv_demux->sys = ff_dv_frame_profile(c->dv_demux->sys, c->buf, DV_PROFILE_BYTES); if (!c->dv_demux->sys) { av_log(s, AV_LOG_ERROR, "Can't determine profile of DV input stream.\n"); return -1; } s->bit_rate = av_rescale_q(c->dv_demux->sys->frame_size, (AVRational){8,1}, c->dv_demux->sys->time_base); return 0; } static int dv_read_packet(AVFormatContext *s, AVPacket *pkt) { int size; RawDVContext *c = s->priv_data; size = dv_get_packet(c->dv_demux, pkt); if (size < 0) { if (!c->dv_demux->sys) return AVERROR(EIO); size = c->dv_demux->sys->frame_size; if (get_buffer(s->pb, c->buf, size) <= 0) return AVERROR(EIO); size = dv_produce_packet(c->dv_demux, pkt, c->buf, size); } return size; } static int dv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { RawDVContext *r = s->priv_data; DVDemuxContext *c = r->dv_demux; int64_t offset = dv_frame_offset(s, c, timestamp, flags); dv_offset_reset(c, offset / c->sys->frame_size); offset = url_fseek(s->pb, offset, SEEK_SET); return (offset < 0) ? offset : 0; } static int dv_read_close(AVFormatContext *s) { RawDVContext *c = s->priv_data; av_free(c->dv_demux); return 0; } static int dv_probe(AVProbeData *p) { unsigned state, marker_pos = 0; int i; int matches = 0; int secondary_matches = 0; if (p->buf_size < 5) return 0; state = AV_RB32(p->buf); for (i = 4; i < p->buf_size; i++) { if ((state & 0xffffff7f) == 0x1f07003f) matches++; // any section header, also with seq/chan num != 0, // should appear around every 12000 bytes, at least 10 per frame if ((state & 0xff07ff7f) == 0x1f07003f) secondary_matches++; if (state == 0x003f0700 || state == 0xff3f0700) marker_pos = i; if (state == 0xff3f0701 && i - marker_pos == 80) matches++; state = (state << 8) | p->buf[i]; } if (matches && p->buf_size / matches < 1024*1024) { if (matches > 4 || (secondary_matches >= 10 && p->buf_size / secondary_matches < 24000)) return AVPROBE_SCORE_MAX*3/4; // not max to avoid dv in mov to match return AVPROBE_SCORE_MAX/4; } return 0; } #if CONFIG_DV_DEMUXER AVInputFormat dv_demuxer = { "dv", NULL_IF_CONFIG_SMALL("DV video format"), sizeof(RawDVContext), dv_probe, dv_read_header, dv_read_packet, dv_read_close, dv_read_seek, .extensions = "dv,dif", }; #endif
123linslouis-android-video-cutter
jni/libavformat/dv.c
C
asf20
16,783
/* * RAW muxer and demuxer * Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_RAW_H #define AVFORMAT_RAW_H #include "avformat.h" int pcm_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags); int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt); #endif /* AVFORMAT_RAW_H */
123linslouis-android-video-cutter
jni/libavformat/raw.h
C
asf20
1,122
/* * Flash Compatible Streaming Format common header. * Copyright (c) 2000 Fabrice Bellard * Copyright (c) 2003 Tinic Uro * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_SWF_H #define AVFORMAT_SWF_H #include "libavutil/fifo.h" #include "avformat.h" #include "avio.h" #include "riff.h" /* for CodecTag */ /* should have a generic way to indicate probable size */ #define DUMMY_FILE_SIZE (100 * 1024 * 1024) #define DUMMY_DURATION 600 /* in seconds */ #define TAG_END 0 #define TAG_SHOWFRAME 1 #define TAG_DEFINESHAPE 2 #define TAG_FREECHARACTER 3 #define TAG_PLACEOBJECT 4 #define TAG_REMOVEOBJECT 5 #define TAG_STREAMHEAD 18 #define TAG_STREAMBLOCK 19 #define TAG_JPEG2 21 #define TAG_PLACEOBJECT2 26 #define TAG_STREAMHEAD2 45 #define TAG_VIDEOSTREAM 60 #define TAG_VIDEOFRAME 61 #define TAG_FILEATTRIBUTES 69 #define TAG_LONG 0x100 /* flags for shape definition */ #define FLAG_MOVETO 0x01 #define FLAG_SETFILL0 0x02 #define FLAG_SETFILL1 0x04 #define AUDIO_FIFO_SIZE 65536 /* character id used */ #define BITMAP_ID 0 #define VIDEO_ID 0 #define SHAPE_ID 1 #undef NDEBUG #include <assert.h> typedef struct { int64_t duration_pos; int64_t tag_pos; int64_t vframes_pos; int samples_per_frame; int sound_samples; int swf_frame_number; int video_frame_number; int frame_rate; int tag; AVFifoBuffer *audio_fifo; AVCodecContext *audio_enc, *video_enc; } SWFContext; static const AVCodecTag swf_codec_tags[] = { {CODEC_ID_FLV1, 0x02}, {CODEC_ID_VP6F, 0x04}, {CODEC_ID_NONE, 0}, }; static const AVCodecTag swf_audio_codec_tags[] = { {CODEC_ID_PCM_S16LE, 0x00}, {CODEC_ID_ADPCM_SWF, 0x01}, {CODEC_ID_MP3, 0x02}, {CODEC_ID_PCM_S16LE, 0x03}, //{CODEC_ID_NELLYMOSER, 0x06}, {CODEC_ID_NONE, 0}, }; #endif /* AVFORMAT_SWF_H */
123linslouis-android-video-cutter
jni/libavformat/swf.h
C
asf20
2,630
/* * Unbuffered io for ffmpeg system * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* needed for usleep() */ #define _XOPEN_SOURCE 600 #include <unistd.h> #include "libavutil/avstring.h" #include "libavcodec/opt.h" #include "os_support.h" #include "avformat.h" #if CONFIG_NETWORK #include "network.h" #endif #if LIBAVFORMAT_VERSION_MAJOR >= 53 /** @name Logging context. */ /*@{*/ static const char *urlcontext_to_name(void *ptr) { URLContext *h = (URLContext *)ptr; if(h->prot) return h->prot->name; else return "NULL"; } static const AVOption options[] = {{NULL}}; static const AVClass urlcontext_class = { "URLContext", urlcontext_to_name, options, LIBAVUTIL_VERSION_INT }; /*@}*/ #endif static int default_interrupt_cb(void); URLProtocol *first_protocol = NULL; URLInterruptCB *url_interrupt_cb = default_interrupt_cb; URLProtocol *av_protocol_next(URLProtocol *p) { if(p) return p->next; else return first_protocol; } int av_register_protocol(URLProtocol *protocol) { URLProtocol **p; p = &first_protocol; while (*p != NULL) p = &(*p)->next; *p = protocol; protocol->next = NULL; return 0; } #if LIBAVFORMAT_VERSION_MAJOR < 53 int register_protocol(URLProtocol *protocol) { return av_register_protocol(protocol); } #endif int url_open_protocol (URLContext **puc, struct URLProtocol *up, const char *filename, int flags) { URLContext *uc; int err; #if CONFIG_NETWORK if (!ff_network_init()) return AVERROR(EIO); #endif uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1); if (!uc) { err = AVERROR(ENOMEM); goto fail; } #if LIBAVFORMAT_VERSION_MAJOR >= 53 uc->av_class = &urlcontext_class; #endif uc->filename = (char *) &uc[1]; strcpy(uc->filename, filename); uc->prot = up; uc->flags = flags; uc->is_streamed = 0; /* default = not streamed */ uc->max_packet_size = 0; /* default: stream file */ err = up->url_open(uc, filename, flags); if (err < 0) { av_free(uc); goto fail; } //We must be careful here as url_seek() could be slow, for example for http if( (flags & (URL_WRONLY | URL_RDWR)) || !strcmp(up->name, "file")) if(!uc->is_streamed && url_seek(uc, 0, SEEK_SET) < 0) uc->is_streamed= 1; *puc = uc; return 0; fail: *puc = NULL; #if CONFIG_NETWORK ff_network_close(); #endif return err; } int url_open(URLContext **puc, const char *filename, int flags) { URLProtocol *up; const char *p; char proto_str[128], *q; p = filename; q = proto_str; while (*p != '\0' && *p != ':') { /* protocols can only contain alphabetic chars */ if (!isalpha(*p)) goto file_proto; if ((q - proto_str) < sizeof(proto_str) - 1) *q++ = *p; p++; } /* if the protocol has length 1, we consider it is a dos drive */ if (*p == '\0' || is_dos_path(filename)) { file_proto: strcpy(proto_str, "file"); } else { *q = '\0'; } up = first_protocol; while (up != NULL) { if (!strcmp(proto_str, up->name)) return url_open_protocol (puc, up, filename, flags); up = up->next; } *puc = NULL; return AVERROR(ENOENT); } int url_read(URLContext *h, unsigned char *buf, int size) { int ret; if (h->flags & URL_WRONLY) return AVERROR(EIO); ret = h->prot->url_read(h, buf, size); return ret; } int url_read_complete(URLContext *h, unsigned char *buf, int size) { int ret, len; int fast_retries = 5; len = 0; while (len < size) { ret = url_read(h, buf+len, size-len); if (ret == AVERROR(EAGAIN)) { ret = 0; if (fast_retries) fast_retries--; else usleep(1000); } else if (ret < 1) return ret < 0 ? ret : len; if (ret) fast_retries = FFMAX(fast_retries, 2); len += ret; } return len; } int url_write(URLContext *h, unsigned char *buf, int size) { int ret; if (!(h->flags & (URL_WRONLY | URL_RDWR))) return AVERROR(EIO); /* avoid sending too big packets */ if (h->max_packet_size && size > h->max_packet_size) return AVERROR(EIO); ret = h->prot->url_write(h, buf, size); return ret; } int64_t url_seek(URLContext *h, int64_t pos, int whence) { int64_t ret; if (!h->prot->url_seek) return AVERROR(ENOSYS); ret = h->prot->url_seek(h, pos, whence & ~AVSEEK_FORCE); return ret; } int url_close(URLContext *h) { int ret = 0; if (!h) return 0; /* can happen when url_open fails */ if (h->prot->url_close) ret = h->prot->url_close(h); #if CONFIG_NETWORK ff_network_close(); #endif av_free(h); return ret; } int url_exist(const char *filename) { URLContext *h; if (url_open(&h, filename, URL_RDONLY) < 0) return 0; url_close(h); return 1; } int64_t url_filesize(URLContext *h) { int64_t pos, size; size= url_seek(h, 0, AVSEEK_SIZE); if(size<0){ pos = url_seek(h, 0, SEEK_CUR); if ((size = url_seek(h, -1, SEEK_END)) < 0) return size; size++; url_seek(h, pos, SEEK_SET); } return size; } int url_get_file_handle(URLContext *h) { if (!h->prot->url_get_file_handle) return -1; return h->prot->url_get_file_handle(h); } int url_get_max_packet_size(URLContext *h) { return h->max_packet_size; } void url_get_filename(URLContext *h, char *buf, int buf_size) { av_strlcpy(buf, h->filename, buf_size); } static int default_interrupt_cb(void) { return 0; } void url_set_interrupt_cb(URLInterruptCB *interrupt_cb) { if (!interrupt_cb) interrupt_cb = default_interrupt_cb; url_interrupt_cb = interrupt_cb; } int av_url_read_pause(URLContext *h, int pause) { if (!h->prot->url_read_pause) return AVERROR(ENOSYS); return h->prot->url_read_pause(h, pause); } int64_t av_url_read_seek(URLContext *h, int stream_index, int64_t timestamp, int flags) { if (!h->prot->url_read_seek) return AVERROR(ENOSYS); return h->prot->url_read_seek(h, stream_index, timestamp, flags); }
123linslouis-android-video-cutter
jni/libavformat/avio.c
C
asf20
7,068
/* * NSV demuxer * Copyright (c) 2004 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "riff.h" //#define DEBUG //#define DEBUG_DUMP_INDEX // XXX dumbdriving-271.nsv breaks with it commented!! //#define DEBUG_SEEK #define CHECK_SUBSEQUENT_NSVS //#define DISABLE_AUDIO /* max bytes to crawl for trying to resync * stupid streaming servers don't start at chunk boundaries... */ #define NSV_MAX_RESYNC (500*1024) #define NSV_MAX_RESYNC_TRIES 300 /* * First version by Francois Revol - revol@free.fr * References: * (1) http://www.multimedia.cx/nsv-format.txt * seems someone came to the same conclusions as me, and updated it: * (2) http://www.stud.ktu.lt/~vitslav/nsv/nsv-format.txt * http://www.stud.ktu.lt/~vitslav/nsv/ * official docs * (3) http://ultravox.aol.com/NSVFormat.rtf * Sample files: * (S1) http://www.nullsoft.com/nsv/samples/ * http://www.nullsoft.com/nsv/samples/faster.nsv * http://streamripper.sourceforge.net/openbb/read.php?TID=492&page=4 */ /* * notes on the header (Francois Revol): * * It is followed by strings, then a table, but nothing tells * where the table begins according to (1). After checking faster.nsv, * I believe NVSf[16-19] gives the size of the strings data * (that is the offset of the data table after the header). * After checking all samples from (S1) all confirms this. * * Then, about NSVf[12-15], faster.nsf has 179700. When veiwing it in VLC, * I noticed there was about 1 NVSs chunk/s, so I ran * strings faster.nsv | grep NSVs | wc -l * which gave me 180. That leads me to think that NSVf[12-15] might be the * file length in milliseconds. * Let's try that: * for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done * except for nstrailer (which doesn't have an NSVf header), it repports correct time. * * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks, * so the header seems to not be mandatory. (for streaming). * * index slice duration check (excepts nsvtrailer.nsv): * for f in [^n]*.nsv; do DUR="$(ffmpeg -i "$f" 2>/dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)"; IC="$(ffmpeg -i "$f" 2>/dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)"; echo "duration $DUR, slite time $(($DUR/$IC))"; done */ /* * TODO: * - handle timestamps !!! * - use index * - mime-type in probe() * - seek */ #ifdef DEBUG #define PRINT(_v) printf _v #else #define PRINT(_v) #endif #if 0 struct NSVf_header { uint32_t chunk_tag; /* 'NSVf' */ uint32_t chunk_size; uint32_t file_size; /* max 4GB ??? no one learns anything it seems :^) */ uint32_t file_length; //unknown1; /* what about MSB of file_size ? */ uint32_t info_strings_size; /* size of the info strings */ //unknown2; uint32_t table_entries; uint32_t table_entries_used; /* the left ones should be -1 */ }; struct NSVs_header { uint32_t chunk_tag; /* 'NSVs' */ uint32_t v4cc; /* or 'NONE' */ uint32_t a4cc; /* or 'NONE' */ uint16_t vwidth; /* assert(vwidth%16==0) */ uint16_t vheight; /* assert(vheight%16==0) */ uint8_t framerate; /* value = (framerate&0x80)?frtable[frameratex0x7f]:framerate */ uint16_t unknown; }; struct nsv_avchunk_header { uint8_t vchunk_size_lsb; uint16_t vchunk_size_msb; /* value = (vchunk_size_msb << 4) | (vchunk_size_lsb >> 4) */ uint16_t achunk_size; }; struct nsv_pcm_header { uint8_t bits_per_sample; uint8_t channel_count; uint16_t sample_rate; }; #endif /* variation from avi.h */ /*typedef struct CodecTag { int id; unsigned int tag; } CodecTag;*/ /* tags */ #define T_NSVF MKTAG('N', 'S', 'V', 'f') /* file header */ #define T_NSVS MKTAG('N', 'S', 'V', 's') /* chunk header */ #define T_TOC2 MKTAG('T', 'O', 'C', '2') /* extra index marker */ #define T_NONE MKTAG('N', 'O', 'N', 'E') /* null a/v 4CC */ #define T_SUBT MKTAG('S', 'U', 'B', 'T') /* subtitle aux data */ #define T_ASYN MKTAG('A', 'S', 'Y', 'N') /* async a/v aux marker */ #define T_KEYF MKTAG('K', 'E', 'Y', 'F') /* video keyframe aux marker (addition) */ #define TB_NSVF MKBETAG('N', 'S', 'V', 'f') #define TB_NSVS MKBETAG('N', 'S', 'V', 's') /* hardcoded stream indexes */ #define NSV_ST_VIDEO 0 #define NSV_ST_AUDIO 1 #define NSV_ST_SUBT 2 enum NSVStatus { NSV_UNSYNC, NSV_FOUND_NSVF, NSV_HAS_READ_NSVF, NSV_FOUND_NSVS, NSV_HAS_READ_NSVS, NSV_FOUND_BEEF, NSV_GOT_VIDEO, NSV_GOT_AUDIO, }; typedef struct NSVStream { int frame_offset; /* current frame (video) or byte (audio) counter (used to compute the pts) */ int scale; int rate; int sample_size; /* audio only data */ int start; int new_frame_offset; /* temporary storage (used during seek) */ int cum_len; /* temporary storage (used during seek) */ } NSVStream; typedef struct { int base_offset; int NSVf_end; uint32_t *nsvs_file_offset; int index_entries; enum NSVStatus state; AVPacket ahead[2]; /* [v, a] if .data is !NULL there is something */ /* cached */ int64_t duration; uint32_t vtag, atag; uint16_t vwidth, vheight; int16_t avsync; AVRational framerate; uint32_t *nsvs_timestamps; //DVDemuxContext* dv_demux; } NSVContext; static const AVCodecTag nsv_codec_video_tags[] = { { CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') }, { CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') }, { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, { CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') }, { CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') }, /* { CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') }, { CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') }, */ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */ { CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') }, { CODEC_ID_NONE, 0 }, }; static const AVCodecTag nsv_codec_audio_tags[] = { { CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') }, { CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') }, { CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') }, { CODEC_ID_SPEEX, MKTAG('S', 'P', 'X', ' ') }, { CODEC_ID_PCM_U16LE, MKTAG('P', 'C', 'M', ' ') }, { CODEC_ID_NONE, 0 }, }; //static int nsv_load_index(AVFormatContext *s); static int nsv_read_chunk(AVFormatContext *s, int fill_header); #ifdef DEBUG static void print_tag(const char *str, unsigned int tag, int size) { printf("%s: tag=%c%c%c%c\n", str, tag & 0xff, (tag >> 8) & 0xff, (tag >> 16) & 0xff, (tag >> 24) & 0xff); } #endif /* try to find something we recognize, and set the state accordingly */ static int nsv_resync(AVFormatContext *s) { NSVContext *nsv = s->priv_data; ByteIOContext *pb = s->pb; uint32_t v = 0; int i; PRINT(("%s(), offset = %"PRId64", state = %d\n", __FUNCTION__, url_ftell(pb), nsv->state)); //nsv->state = NSV_UNSYNC; for (i = 0; i < NSV_MAX_RESYNC; i++) { if (url_feof(pb)) { PRINT(("NSV EOF\n")); nsv->state = NSV_UNSYNC; return -1; } v <<= 8; v |= get_byte(pb); /* if (i < 8) { PRINT(("NSV resync: [%d] = %02x\n", i, v & 0x0FF)); } */ if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */ PRINT(("NSV resynced on BEEF after %d bytes\n", i+1)); nsv->state = NSV_FOUND_BEEF; return 0; } /* we read as big endian, thus the MK*BE* */ if (v == TB_NSVF) { /* NSVf */ PRINT(("NSV resynced on NSVf after %d bytes\n", i+1)); nsv->state = NSV_FOUND_NSVF; return 0; } if (v == MKBETAG('N', 'S', 'V', 's')) { /* NSVs */ PRINT(("NSV resynced on NSVs after %d bytes\n", i+1)); nsv->state = NSV_FOUND_NSVS; return 0; } } PRINT(("NSV sync lost\n")); return -1; } static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap) { NSVContext *nsv = s->priv_data; ByteIOContext *pb = s->pb; unsigned int file_size, size; int64_t duration; int strings_size; int table_entries; int table_entries_used; PRINT(("%s()\n", __FUNCTION__)); nsv->state = NSV_UNSYNC; /* in case we fail */ size = get_le32(pb); if (size < 28) return -1; nsv->NSVf_end = size; //s->file_size = (uint32_t)get_le32(pb); file_size = (uint32_t)get_le32(pb); PRINT(("NSV NSVf chunk_size %u\n", size)); PRINT(("NSV NSVf file_size %u\n", file_size)); nsv->duration = duration = get_le32(pb); /* in ms */ PRINT(("NSV NSVf duration %"PRId64" ms\n", duration)); // XXX: store it in AVStreams strings_size = get_le32(pb); table_entries = get_le32(pb); table_entries_used = get_le32(pb); PRINT(("NSV NSVf info-strings size: %d, table entries: %d, bis %d\n", strings_size, table_entries, table_entries_used)); if (url_feof(pb)) return -1; PRINT(("NSV got header; filepos %"PRId64"\n", url_ftell(pb))); if (strings_size > 0) { char *strings; /* last byte will be '\0' to play safe with str*() */ char *p, *endp; char *token, *value; char quote; p = strings = av_mallocz(strings_size + 1); endp = strings + strings_size; get_buffer(pb, strings, strings_size); while (p < endp) { while (*p == ' ') p++; /* strip out spaces */ if (p >= endp-2) break; token = p; p = strchr(p, '='); if (!p || p >= endp-2) break; *p++ = '\0'; quote = *p++; value = p; p = strchr(p, quote); if (!p || p >= endp) break; *p++ = '\0'; PRINT(("NSV NSVf INFO: %s='%s'\n", token, value)); av_metadata_set2(&s->metadata, token, value, 0); } av_free(strings); } if (url_feof(pb)) return -1; PRINT(("NSV got infos; filepos %"PRId64"\n", url_ftell(pb))); if (table_entries_used > 0) { int i; nsv->index_entries = table_entries_used; if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t)) return -1; nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t)); for(i=0;i<table_entries_used;i++) nsv->nsvs_file_offset[i] = get_le32(pb) + size; if(table_entries > table_entries_used && get_le32(pb) == MKTAG('T','O','C','2')) { nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t)); for(i=0;i<table_entries_used;i++) { nsv->nsvs_timestamps[i] = get_le32(pb); } } } PRINT(("NSV got index; filepos %"PRId64"\n", url_ftell(pb))); #ifdef DEBUG_DUMP_INDEX #define V(v) ((v<0x20 || v > 127)?'.':v) /* dump index */ PRINT(("NSV %d INDEX ENTRIES:\n", table_entries)); PRINT(("NSV [dataoffset][fileoffset]\n", table_entries)); for (i = 0; i < table_entries; i++) { unsigned char b[8]; url_fseek(pb, size + nsv->nsvs_file_offset[i], SEEK_SET); get_buffer(pb, b, 8); PRINT(("NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x" "%c%c%c%c%c%c%c%c\n", nsv->nsvs_file_offset[i], size + nsv->nsvs_file_offset[i], b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) )); } //url_fseek(pb, size, SEEK_SET); /* go back to end of header */ #undef V #endif url_fseek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */ if (url_feof(pb)) return -1; nsv->state = NSV_HAS_READ_NSVF; return 0; } static int nsv_parse_NSVs_header(AVFormatContext *s, AVFormatParameters *ap) { NSVContext *nsv = s->priv_data; ByteIOContext *pb = s->pb; uint32_t vtag, atag; uint16_t vwidth, vheight; AVRational framerate; int i; AVStream *st; NSVStream *nst; PRINT(("%s()\n", __FUNCTION__)); vtag = get_le32(pb); atag = get_le32(pb); vwidth = get_le16(pb); vheight = get_le16(pb); i = get_byte(pb); PRINT(("NSV NSVs framerate code %2x\n", i)); if(i&0x80) { /* odd way of giving native framerates from docs */ int t=(i & 0x7F)>>2; if(t<16) framerate = (AVRational){1, t+1}; else framerate = (AVRational){t-15, 1}; if(i&1){ framerate.num *= 1000; framerate.den *= 1001; } if((i&3)==3) framerate.num *= 24; else if((i&3)==2) framerate.num *= 25; else framerate.num *= 30; } else framerate= (AVRational){i, 1}; nsv->avsync = get_le16(pb); nsv->framerate = framerate; #ifdef DEBUG print_tag("NSV NSVs vtag", vtag, 0); print_tag("NSV NSVs atag", atag, 0); PRINT(("NSV NSVs vsize %dx%d\n", vwidth, vheight)); #endif /* XXX change to ap != NULL ? */ if (s->nb_streams == 0) { /* streams not yet published, let's do that */ nsv->vtag = vtag; nsv->atag = atag; nsv->vwidth = vwidth; nsv->vheight = vwidth; if (vtag != T_NONE) { int i; st = av_new_stream(s, NSV_ST_VIDEO); if (!st) goto fail; nst = av_mallocz(sizeof(NSVStream)); if (!nst) goto fail; st->priv_data = nst; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_tag = vtag; st->codec->codec_id = ff_codec_get_id(nsv_codec_video_tags, vtag); st->codec->width = vwidth; st->codec->height = vheight; st->codec->bits_per_coded_sample = 24; /* depth XXX */ av_set_pts_info(st, 64, framerate.den, framerate.num); st->start_time = 0; st->duration = av_rescale(nsv->duration, framerate.num, 1000*framerate.den); for(i=0;i<nsv->index_entries;i++) { if(nsv->nsvs_timestamps) { av_add_index_entry(st, nsv->nsvs_file_offset[i], nsv->nsvs_timestamps[i], 0, 0, AVINDEX_KEYFRAME); } else { int64_t ts = av_rescale(i*nsv->duration/nsv->index_entries, framerate.num, 1000*framerate.den); av_add_index_entry(st, nsv->nsvs_file_offset[i], ts, 0, 0, AVINDEX_KEYFRAME); } } } if (atag != T_NONE) { #ifndef DISABLE_AUDIO st = av_new_stream(s, NSV_ST_AUDIO); if (!st) goto fail; nst = av_mallocz(sizeof(NSVStream)); if (!nst) goto fail; st->priv_data = nst; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = atag; st->codec->codec_id = ff_codec_get_id(nsv_codec_audio_tags, atag); st->need_parsing = AVSTREAM_PARSE_FULL; /* for PCM we will read a chunk later and put correct info */ /* set timebase to common denominator of ms and framerate */ av_set_pts_info(st, 64, 1, framerate.num*1000); st->start_time = 0; st->duration = (int64_t)nsv->duration * framerate.num; #endif } #ifdef CHECK_SUBSEQUENT_NSVS } else { if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) { PRINT(("NSV NSVs header values differ from the first one!!!\n")); //return -1; } #endif /* CHECK_SUBSEQUENT_NSVS */ } nsv->state = NSV_HAS_READ_NSVS; return 0; fail: /* XXX */ nsv->state = NSV_UNSYNC; return -1; } static int nsv_read_header(AVFormatContext *s, AVFormatParameters *ap) { NSVContext *nsv = s->priv_data; int i, err; PRINT(("%s()\n", __FUNCTION__)); PRINT(("filename '%s'\n", s->filename)); nsv->state = NSV_UNSYNC; nsv->ahead[0].data = nsv->ahead[1].data = NULL; for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) { if (nsv_resync(s) < 0) return -1; if (nsv->state == NSV_FOUND_NSVF) err = nsv_parse_NSVf_header(s, ap); /* we need the first NSVs also... */ if (nsv->state == NSV_FOUND_NSVS) { err = nsv_parse_NSVs_header(s, ap); break; /* we just want the first one */ } } if (s->nb_streams < 1) /* no luck so far */ return -1; /* now read the first chunk, so we can attempt to decode more info */ err = nsv_read_chunk(s, 1); PRINT(("parsed header\n")); return 0; } static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; uint32_t auxtag; PRINT(("%s(%d)\n", __FUNCTION__, fill_header)); if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (url_feof(pb)) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s, NULL); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = get_byte(pb); vsize = get_le16(pb); asize = get_le16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; PRINT(("NSV CHUNK %d aux, %u bytes video, %d bytes audio\n", auxcount, vsize, asize)); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { auxsize = get_le16(pb); auxtag = get_le32(pb); PRINT(("NSV aux data: '%c%c%c%c', %d bytes\n", (auxtag & 0x0ff), ((auxtag >> 8) & 0x0ff), ((auxtag >> 16) & 0x0ff), ((auxtag >> 24) & 0x0ff), auxsize)); url_fskip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming braindead */ } if (url_feof(pb)) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->streams[0]) st[s->streams[0]->id] = s->streams[0]; if (s->streams[1]) st[s->streams[1]->id] = s->streams[1]; if (vsize/* && st[NSV_ST_VIDEO]*/) { nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; av_get_packet(pb, pkt, vsize); pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ /* for (i = 0; i < MIN(8, vsize); i++) PRINT(("NSV video: [%d] = %02x\n", i, pkt->data[i])); */ } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize/*st[NSV_ST_AUDIO]*/) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codec->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = get_byte(pb); channels = get_byte(pb); samplerate = get_le16(pb); asize-=4; PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate)); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { PRINT(("NSV AUDIO bit/sample != 16 (%d)!!!\n", bps)); } bps /= channels; // ??? if (bps == 8) st[NSV_ST_AUDIO]->codec->codec_id = CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codec->channels = channels; st[NSV_ST_AUDIO]->codec->sample_rate = samplerate; PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate)); } } av_get_packet(pb, pkt, asize); pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; PRINT(("NSV AUDIO: sync:%d, dts:%"PRId64, nsv->avsync, pkt->dts)); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; } static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt) { NSVContext *nsv = s->priv_data; int i, err = 0; PRINT(("%s()\n", __FUNCTION__)); /* in case we don't already have something to eat ... */ if (nsv->ahead[0].data == NULL && nsv->ahead[1].data == NULL) err = nsv_read_chunk(s, 0); if (err < 0) return err; /* now pick one of the plates */ for (i = 0; i < 2; i++) { if (nsv->ahead[i].data) { PRINT(("%s: using cached packet[%d]\n", __FUNCTION__, i)); /* avoid the cost of new_packet + memcpy(->data) */ memcpy(pkt, &nsv->ahead[i], sizeof(AVPacket)); nsv->ahead[i].data = NULL; /* we ate that one */ return pkt->size; } } /* this restaurant is not approvisionned :^] */ return -1; } static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { NSVContext *nsv = s->priv_data; AVStream *st = s->streams[stream_index]; NSVStream *nst = st->priv_data; int index; index = av_index_search_timestamp(st, timestamp, flags); if(index < 0) return -1; url_fseek(s->pb, st->index_entries[index].pos, SEEK_SET); nst->frame_offset = st->index_entries[index].timestamp; nsv->state = NSV_UNSYNC; return 0; } static int nsv_read_close(AVFormatContext *s) { /* int i; */ NSVContext *nsv = s->priv_data; av_freep(&nsv->nsvs_file_offset); av_freep(&nsv->nsvs_timestamps); if (nsv->ahead[0].data) av_free_packet(&nsv->ahead[0]); if (nsv->ahead[1].data) av_free_packet(&nsv->ahead[1]); #if 0 for(i=0;i<s->nb_streams;i++) { AVStream *st = s->streams[i]; NSVStream *ast = st->priv_data; if(ast){ av_free(ast->index_entries); av_free(ast); } av_free(st->codec->palctrl); } #endif return 0; } static int nsv_probe(AVProbeData *p) { int i; // PRINT(("nsv_probe(), buf_size %d\n", p->buf_size)); /* check file header */ /* streamed files might not have any header */ if (p->buf[0] == 'N' && p->buf[1] == 'S' && p->buf[2] == 'V' && (p->buf[3] == 'f' || p->buf[3] == 's')) return AVPROBE_SCORE_MAX; /* XXX: do streamed files always start at chunk boundary ?? */ /* or do we need to search NSVs in the byte stream ? */ /* seems the servers don't bother starting clean chunks... */ /* sometimes even the first header is at 9KB or something :^) */ for (i = 1; i < p->buf_size - 3; i++) { if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' && p->buf[i+2] == 'V' && p->buf[i+3] == 's') return AVPROBE_SCORE_MAX-20; } /* so we'll have more luck on extension... */ if (av_match_ext(p->filename, "nsv")) return AVPROBE_SCORE_MAX/2; /* FIXME: add mime-type check */ return 0; } AVInputFormat nsv_demuxer = { "nsv", NULL_IF_CONFIG_SMALL("Nullsoft Streaming Video"), sizeof(NSVContext), nsv_probe, nsv_read_header, nsv_read_packet, nsv_read_close, nsv_read_seek, };
123linslouis-android-video-cutter
jni/libavformat/nsvdec.c
C
asf20
25,718
/* * SSA/ASS demuxer * Copyright (c) 2008 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #define MAX_LINESIZE 2000 typedef struct ASSContext{ uint8_t *event_buffer; uint8_t **event; unsigned int event_count; unsigned int event_index; }ASSContext; static void get_line(ByteIOContext *s, char *buf, int maxlen) { int i = 0; char c; do{ c = get_byte(s); if (i < maxlen-1) buf[i++] = c; }while(c != '\n' && c); buf[i] = 0; } static int probe(AVProbeData *p) { const char *header= "[Script Info]"; if( !memcmp(p->buf , header, strlen(header)) || !memcmp(p->buf+3, header, strlen(header))) return AVPROBE_SCORE_MAX; return 0; } static int read_close(AVFormatContext *s) { ASSContext *ass = s->priv_data; av_freep(&ass->event_buffer); av_freep(&ass->event); return 0; } static int64_t get_pts(const uint8_t *p) { int hour, min, sec, hsec; if(sscanf(p, "%*[^,],%d:%d:%d%*c%d", &hour, &min, &sec, &hsec) != 4) return AV_NOPTS_VALUE; // av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d [%s]\n", i, hour, min, sec, hsec, p); min+= 60*hour; sec+= 60*min; return sec*100+hsec; } static int event_cmp(uint8_t **a, uint8_t **b) { return get_pts(*a) - get_pts(*b); } static int read_header(AVFormatContext *s, AVFormatParameters *ap) { int i, header_remaining; ASSContext *ass = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st; int allocated[2]={0}; uint8_t *p, **dst[2]={0}; int pos[2]={0}; st = av_new_stream(s, 0); if (!st) return -1; av_set_pts_info(st, 64, 1, 100); st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codec->codec_id= CODEC_ID_SSA; header_remaining= INT_MAX; dst[0] = &st->codec->extradata; dst[1] = &ass->event_buffer; while(!url_feof(pb)){ uint8_t line[MAX_LINESIZE]; get_line(pb, line, sizeof(line)); if(!memcmp(line, "[Events]", 8)) header_remaining= 2; else if(line[0]=='[') header_remaining= INT_MAX; i= header_remaining==0; if(i && get_pts(line) == AV_NOPTS_VALUE) continue; p = av_fast_realloc(*(dst[i]), &allocated[i], pos[i]+MAX_LINESIZE); if(!p) goto fail; *(dst[i])= p; memcpy(p + pos[i], line, strlen(line)+1); pos[i] += strlen(line); if(i) ass->event_count++; else header_remaining--; } st->codec->extradata_size= pos[0]; if(ass->event_count >= UINT_MAX / sizeof(*ass->event)) goto fail; ass->event= av_malloc(ass->event_count * sizeof(*ass->event)); p= ass->event_buffer; for(i=0; i<ass->event_count; i++){ ass->event[i]= p; while(*p && *p != '\n') p++; p++; } qsort(ass->event, ass->event_count, sizeof(*ass->event), (void*)event_cmp); return 0; fail: read_close(s); return -1; } static int read_packet(AVFormatContext *s, AVPacket *pkt) { ASSContext *ass = s->priv_data; uint8_t *p, *end; if(ass->event_index >= ass->event_count) return AVERROR(EIO); p= ass->event[ ass->event_index ]; end= strchr(p, '\n'); av_new_packet(pkt, end ? end-p+1 : strlen(p)); pkt->flags |= AV_PKT_FLAG_KEY; pkt->pos= p - ass->event_buffer + s->streams[0]->codec->extradata_size; pkt->pts= pkt->dts= get_pts(p); memcpy(pkt->data, p, pkt->size); ass->event_index++; return 0; } AVInputFormat ass_demuxer = { "ass", NULL_IF_CONFIG_SMALL("SSA/ASS format"), sizeof(ASSContext), probe, read_header, read_packet, read_close, // read_seek, };
123linslouis-android-video-cutter
jni/libavformat/assdec.c
C
asf20
4,480
/* * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_INTERNAL_H #define AVFORMAT_INTERNAL_H #include <stdint.h> #include "avformat.h" void ff_dynarray_add(intptr_t **tab_ptr, int *nb_ptr, intptr_t elem); #ifdef __GNUC__ #define dynarray_add(tab, nb_ptr, elem)\ do {\ __typeof__(tab) _tab = (tab);\ __typeof__(elem) _elem = (elem);\ (void)sizeof(**_tab == _elem); /* check that types are compatible */\ ff_dynarray_add((intptr_t **)_tab, nb_ptr, (intptr_t)_elem);\ } while(0) #else #define dynarray_add(tab, nb_ptr, elem)\ do {\ ff_dynarray_add((intptr_t **)(tab), nb_ptr, (intptr_t)(elem));\ } while(0) #endif time_t mktimegm(struct tm *tm); struct tm *brktimegm(time_t secs, struct tm *tm); const char *small_strptime(const char *p, const char *fmt, struct tm *dt); char *ff_data_to_hex(char *buf, const uint8_t *src, int size, int lowercase); void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx); /** * Add packet to AVFormatContext->packet_buffer list, determining its * interleaved position using compare() function argument. */ void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)); void ff_read_frame_flush(AVFormatContext *s); #define NTP_OFFSET 2208988800ULL #define NTP_OFFSET_US (NTP_OFFSET * 1000000ULL) /** Gets the current time since NTP epoch in microseconds. */ uint64_t ff_ntp_time(void); /** * Probes a bytestream to determine the input format. Each time a probe returns * with a score that is too low, the probe buffer size is increased and another * attempt is made. When the maximum probe size is reached, the input format * with the highest score is returned. * * @param pb the bytestream to probe, it may be closed and opened again * @param fmt the input format is put here * @param filename the filename of the stream * @param logctx the log context * @param offset the offset within the bytestream to probe from * @param max_probe_size the maximum probe buffer size (zero for default) * @return 0 in case of success, a negative value corresponding to an * AVERROR code otherwise */ int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size); /** * Splits a URL string into components. To reassemble components back into * a URL, use ff_url_join instead of using snprintf directly. * * The pointers to buffers for storing individual components may be null, * in order to ignore that component. Buffers for components not found are * set to empty strings. If the port isn't found, it is set to a negative * value. * * @see ff_url_join * * @param proto the buffer for the protocol * @param proto_size the size of the proto buffer * @param authorization the buffer for the authorization * @param authorization_size the size of the authorization buffer * @param hostname the buffer for the host name * @param hostname_size the size of the hostname buffer * @param port_ptr a pointer to store the port number in * @param path the buffer for the path * @param path_size the size of the path buffer * @param url the URL to split */ void ff_url_split(char *proto, int proto_size, char *authorization, int authorization_size, char *hostname, int hostname_size, int *port_ptr, char *path, int path_size, const char *url); /** * Assembles a URL string from components. This is the reverse operation * of ff_url_split. * * Note, this requires networking to be initialized, so the caller must * ensure ff_network_init has been called. * * @see ff_url_split * * @param str the buffer to fill with the url * @param size the size of the str buffer * @param proto the protocol identifier, if null, the separator * after the identifier is left out, too * @param authorization an optional authorization string, may be null * @param hostname the host name string * @param port the port number, left out from the string if negative * @param fmt a generic format string for everything to add after the * host/port, may be null * @return the number of characters written to the destination buffer */ int ff_url_join(char *str, int size, const char *proto, const char *authorization, const char *hostname, int port, const char *fmt, ...); /** * Appends the media-specific SDP fragment for the media stream c * to the buffer buff. * * Note, the buffer needs to be initialized, since it is appended to * existing content. * * @param buff the buffer to append the SDP fragment to * @param size the size of the buff buffer * @param c the AVCodecContext of the media to describe * @param dest_addr the destination address of the media stream, may be NULL * @param port the destination port of the media stream, 0 if unknown * @param ttl the time to live of the stream, 0 if not multicast */ void ff_sdp_write_media(char *buff, int size, AVCodecContext *c, const char *dest_addr, int port, int ttl); #endif /* AVFORMAT_INTERNAL_H */
123linslouis-android-video-cutter
jni/libavformat/internal.h
C
asf20
6,074
/* * Brute Force & Ignorance (BFI) demuxer * Copyright (c) 2008 Sisir Koppaka * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Brute Force & Ignorance (.bfi) file demuxer * @author Sisir Koppaka ( sisir.koppaka at gmail dot com ) * @sa http://wiki.multimedia.cx/index.php?title=BFI */ #include "libavutil/intreadwrite.h" #include "avformat.h" typedef struct BFIContext { int nframes; int audio_frame; int video_frame; int video_size; int avflag; } BFIContext; static int bfi_probe(AVProbeData * p) { /* Check file header */ if (AV_RL32(p->buf) == MKTAG('B', 'F', '&', 'I')) return AVPROBE_SCORE_MAX; else return 0; } static int bfi_read_header(AVFormatContext * s, AVFormatParameters * ap) { BFIContext *bfi = s->priv_data; ByteIOContext *pb = s->pb; AVStream *vstream; AVStream *astream; int fps, chunk_header; /* Initialize the video codec... */ vstream = av_new_stream(s, 0); if (!vstream) return AVERROR(ENOMEM); /* Initialize the audio codec... */ astream = av_new_stream(s, 0); if (!astream) return AVERROR(ENOMEM); /* Set the total number of frames. */ url_fskip(pb, 8); chunk_header = get_le32(pb); bfi->nframes = get_le32(pb); get_le32(pb); get_le32(pb); get_le32(pb); fps = get_le32(pb); url_fskip(pb, 12); vstream->codec->width = get_le32(pb); vstream->codec->height = get_le32(pb); /*Load the palette to extradata */ url_fskip(pb, 8); vstream->codec->extradata = av_malloc(768); vstream->codec->extradata_size = 768; get_buffer(pb, vstream->codec->extradata, vstream->codec->extradata_size); astream->codec->sample_rate = get_le32(pb); /* Set up the video codec... */ av_set_pts_info(vstream, 32, 1, fps); vstream->codec->codec_type = AVMEDIA_TYPE_VIDEO; vstream->codec->codec_id = CODEC_ID_BFI; vstream->codec->pix_fmt = PIX_FMT_PAL8; /* Set up the audio codec now... */ astream->codec->codec_type = AVMEDIA_TYPE_AUDIO; astream->codec->codec_id = CODEC_ID_PCM_U8; astream->codec->channels = 1; astream->codec->bits_per_coded_sample = 8; astream->codec->bit_rate = astream->codec->sample_rate * astream->codec->bits_per_coded_sample; url_fseek(pb, chunk_header - 3, SEEK_SET); av_set_pts_info(astream, 64, 1, astream->codec->sample_rate); return 0; } static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt) { BFIContext *bfi = s->priv_data; ByteIOContext *pb = s->pb; int ret, audio_offset, video_offset, chunk_size, audio_size = 0; if (bfi->nframes == 0 || url_feof(pb)) { return AVERROR(EIO); } /* If all previous chunks were completely read, then find a new one... */ if (!bfi->avflag) { uint32_t state = 0; while(state != MKTAG('S','A','V','I')){ if (url_feof(pb)) return AVERROR(EIO); state = 256*state + get_byte(pb); } /* Now that the chunk's location is confirmed, we proceed... */ chunk_size = get_le32(pb); get_le32(pb); audio_offset = get_le32(pb); get_le32(pb); video_offset = get_le32(pb); audio_size = video_offset - audio_offset; bfi->video_size = chunk_size - video_offset; //Tossing an audio packet at the audio decoder. ret = av_get_packet(pb, pkt, audio_size); if (ret < 0) return ret; pkt->pts = bfi->audio_frame; bfi->audio_frame += ret; } else { //Tossing a video packet at the video decoder. ret = av_get_packet(pb, pkt, bfi->video_size); if (ret < 0) return ret; pkt->pts = bfi->video_frame; bfi->video_frame += ret / bfi->video_size; /* One less frame to read. A cursory decrement. */ bfi->nframes--; } bfi->avflag = !bfi->avflag; pkt->stream_index = bfi->avflag; return ret; } AVInputFormat bfi_demuxer = { "bfi", NULL_IF_CONFIG_SMALL("Brute Force & Ignorance"), sizeof(BFIContext), bfi_probe, bfi_read_header, bfi_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/bfi.c
C
asf20
5,045
/* * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "libavcodec/opt.h" /** * @file * Options definition for AVFormatContext. */ static const char* format_to_name(void* ptr) { AVFormatContext* fc = (AVFormatContext*) ptr; if(fc->iformat) return fc->iformat->name; else if(fc->oformat) return fc->oformat->name; else return "NULL"; } #define OFFSET(x) offsetof(AVFormatContext,x) #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C //these names are too long to be readable #define E AV_OPT_FLAG_ENCODING_PARAM #define D AV_OPT_FLAG_DECODING_PARAM static const AVOption options[]={ {"probesize", "set probing size", OFFSET(probesize), FF_OPT_TYPE_INT, 5000000, 32, INT_MAX, D}, {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E}, {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E}, {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"}, {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"}, {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"}, {"nofillin", "do not fill in missing values that can be exactly calculated", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_NOFILLIN, INT_MIN, INT_MAX, D, "fflags"}, {"noparse", "disable AVParsers, this needs nofillin too", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_NOPARSE, INT_MIN, INT_MAX, D, "fflags"}, {"igndts", "ingore dts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNDTS, INT_MIN, INT_MAX, D, "fflags"}, {"rtphint", "add rtp hinting", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_RTP_HINT, INT_MIN, INT_MAX, E, "fflags"}, #if LIBAVFORMAT_VERSION_INT < (53<<16) {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E}, {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E}, #endif {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 5*AV_TIME_BASE, 0, INT_MAX, D}, {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D}, {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D}, {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */ {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"}, {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"}, {NULL}, }; #undef E #undef D #undef DEFAULT static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options, LIBAVUTIL_VERSION_INT }; static void avformat_get_context_defaults(AVFormatContext *s) { memset(s, 0, sizeof(AVFormatContext)); s->av_class = &av_format_context_class; av_opt_set_defaults(s); } AVFormatContext *avformat_alloc_context(void) { AVFormatContext *ic; ic = av_malloc(sizeof(AVFormatContext)); if (!ic) return ic; avformat_get_context_defaults(ic); ic->av_class = &av_format_context_class; return ic; } #if LIBAVFORMAT_VERSION_MAJOR < 53 AVFormatContext *av_alloc_format_context(void) { return avformat_alloc_context(); } #endif
123linslouis-android-video-cutter
jni/libavformat/options.c
C
asf20
4,216
/* * Creative Voice File demuxer. * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "voc.h" static int voc_probe(AVProbeData *p) { int version, check; if (memcmp(p->buf, ff_voc_magic, sizeof(ff_voc_magic) - 1)) return 0; version = AV_RL16(p->buf + 22); check = AV_RL16(p->buf + 24); if (~version + 0x1234 != check) return 10; return AVPROBE_SCORE_MAX; } static int voc_read_header(AVFormatContext *s, AVFormatParameters *ap) { VocDecContext *voc = s->priv_data; ByteIOContext *pb = s->pb; int header_size; AVStream *st; url_fskip(pb, 20); header_size = get_le16(pb) - 22; if (header_size != 4) { av_log(s, AV_LOG_ERROR, "unknown header size: %d\n", header_size); return AVERROR(ENOSYS); } url_fskip(pb, header_size); st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; voc->remaining_size = 0; return 0; } int voc_get_packet(AVFormatContext *s, AVPacket *pkt, AVStream *st, int max_size) { VocDecContext *voc = s->priv_data; AVCodecContext *dec = st->codec; ByteIOContext *pb = s->pb; VocType type; int size; int sample_rate = 0; int channels = 1; while (!voc->remaining_size) { type = get_byte(pb); if (type == VOC_TYPE_EOF) return AVERROR(EIO); voc->remaining_size = get_le24(pb); if (!voc->remaining_size) { if (url_is_streamed(s->pb)) return AVERROR(EIO); voc->remaining_size = url_fsize(pb) - url_ftell(pb); } max_size -= 4; switch (type) { case VOC_TYPE_VOICE_DATA: dec->sample_rate = 1000000 / (256 - get_byte(pb)); if (sample_rate) dec->sample_rate = sample_rate; dec->channels = channels; dec->codec_id = ff_codec_get_id(ff_voc_codec_tags, get_byte(pb)); dec->bits_per_coded_sample = av_get_bits_per_sample(dec->codec_id); voc->remaining_size -= 2; max_size -= 2; channels = 1; break; case VOC_TYPE_VOICE_DATA_CONT: break; case VOC_TYPE_EXTENDED: sample_rate = get_le16(pb); get_byte(pb); channels = get_byte(pb) + 1; sample_rate = 256000000 / (channels * (65536 - sample_rate)); voc->remaining_size = 0; max_size -= 4; break; case VOC_TYPE_NEW_VOICE_DATA: dec->sample_rate = get_le32(pb); dec->bits_per_coded_sample = get_byte(pb); dec->channels = get_byte(pb); dec->codec_id = ff_codec_get_id(ff_voc_codec_tags, get_le16(pb)); url_fskip(pb, 4); voc->remaining_size -= 12; max_size -= 12; break; default: url_fskip(pb, voc->remaining_size); max_size -= voc->remaining_size; voc->remaining_size = 0; break; } } dec->bit_rate = dec->sample_rate * dec->bits_per_coded_sample; if (max_size <= 0) max_size = 2048; size = FFMIN(voc->remaining_size, max_size); voc->remaining_size -= size; return av_get_packet(pb, pkt, size); } static int voc_read_packet(AVFormatContext *s, AVPacket *pkt) { return voc_get_packet(s, pkt, s->streams[0], 0); } AVInputFormat voc_demuxer = { "voc", NULL_IF_CONFIG_SMALL("Creative Voice file format"), sizeof(VocDecContext), voc_probe, voc_read_header, voc_read_packet, .codec_tag=(const AVCodecTag* const []){ff_voc_codec_tags, 0}, };
123linslouis-android-video-cutter
jni/libavformat/vocdec.c
C
asf20
4,493
/* * Matroska muxer * Copyright (c) 2007 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "riff.h" #include "isom.h" #include "matroska.h" #include "avc.h" #include "flacenc.h" #include "libavutil/intreadwrite.h" #include "libavutil/md5.h" #include "libavcodec/xiph.h" #include "libavcodec/mpeg4audio.h" typedef struct ebml_master { int64_t pos; ///< absolute offset in the file where the master's elements start int sizebytes; ///< how many bytes were reserved for the size } ebml_master; typedef struct mkv_seekhead_entry { unsigned int elementid; uint64_t segmentpos; } mkv_seekhead_entry; typedef struct mkv_seekhead { int64_t filepos; int64_t segment_offset; ///< the file offset to the beginning of the segment int reserved_size; ///< -1 if appending to file int max_entries; mkv_seekhead_entry *entries; int num_entries; } mkv_seekhead; typedef struct { uint64_t pts; int tracknum; int64_t cluster_pos; ///< file offset of the cluster containing the block } mkv_cuepoint; typedef struct { int64_t segment_offset; mkv_cuepoint *entries; int num_entries; } mkv_cues; typedef struct { int write_dts; } mkv_track; #define MODE_MATROSKAv2 0x01 #define MODE_WEBM 0x02 typedef struct MatroskaMuxContext { int mode; ByteIOContext *dyn_bc; ebml_master segment; int64_t segment_offset; int64_t segment_uid; ebml_master cluster; int64_t cluster_pos; ///< file offset of the current cluster int64_t cluster_pts; int64_t duration_offset; int64_t duration; mkv_seekhead *main_seekhead; mkv_seekhead *cluster_seekhead; mkv_cues *cues; mkv_track *tracks; struct AVMD5 *md5_ctx; } MatroskaMuxContext; /** 2 bytes * 3 for EBML IDs, 3 1-byte EBML lengths, 8 bytes for 64 bit * offset, 4 bytes for target EBML ID */ #define MAX_SEEKENTRY_SIZE 21 /** per-cuepoint-track - 3 1-byte EBML IDs, 3 1-byte EBML sizes, 2 * 8-byte uint max */ #define MAX_CUETRACKPOS_SIZE 22 /** per-cuepoint - 2 1-byte EBML IDs, 2 1-byte EBML sizes, 8-byte uint max */ #define MAX_CUEPOINT_SIZE(num_tracks) 12 + MAX_CUETRACKPOS_SIZE*num_tracks static int ebml_id_size(unsigned int id) { return (av_log2(id+1)-1)/7+1; } static void put_ebml_id(ByteIOContext *pb, unsigned int id) { int i = ebml_id_size(id); while (i--) put_byte(pb, id >> (i*8)); } /** * Write an EBML size meaning "unknown size". * * @param bytes The number of bytes the size should occupy (maximum: 8). */ static void put_ebml_size_unknown(ByteIOContext *pb, int bytes) { assert(bytes <= 8); put_byte(pb, 0x1ff >> bytes); while (--bytes) put_byte(pb, 0xff); } /** * Calculate how many bytes are needed to represent a given number in EBML. */ static int ebml_num_size(uint64_t num) { int bytes = 1; while ((num+1) >> bytes*7) bytes++; return bytes; } /** * Write a number in EBML variable length format. * * @param bytes The number of bytes that need to be used to write the number. * If zero, any number of bytes can be used. */ static void put_ebml_num(ByteIOContext *pb, uint64_t num, int bytes) { int i, needed_bytes = ebml_num_size(num); // sizes larger than this are currently undefined in EBML assert(num < (1ULL<<56)-1); if (bytes == 0) // don't care how many bytes are used, so use the min bytes = needed_bytes; // the bytes needed to write the given size would exceed the bytes // that we need to use, so write unknown size. This shouldn't happen. assert(bytes >= needed_bytes); num |= 1ULL << bytes*7; for (i = bytes - 1; i >= 0; i--) put_byte(pb, num >> i*8); } static void put_ebml_uint(ByteIOContext *pb, unsigned int elementid, uint64_t val) { int i, bytes = 1; uint64_t tmp = val; while (tmp>>=8) bytes++; put_ebml_id(pb, elementid); put_ebml_num(pb, bytes, 0); for (i = bytes - 1; i >= 0; i--) put_byte(pb, val >> i*8); } static void put_ebml_float(ByteIOContext *pb, unsigned int elementid, double val) { put_ebml_id(pb, elementid); put_ebml_num(pb, 8, 0); put_be64(pb, av_dbl2int(val)); } static void put_ebml_binary(ByteIOContext *pb, unsigned int elementid, const uint8_t *buf, int size) { put_ebml_id(pb, elementid); put_ebml_num(pb, size, 0); put_buffer(pb, buf, size); } static void put_ebml_string(ByteIOContext *pb, unsigned int elementid, const char *str) { put_ebml_binary(pb, elementid, str, strlen(str)); } /** * Writes a void element of a given size. Useful for reserving space in * the file to be written to later. * * @param size The number of bytes to reserve, which must be at least 2. */ static void put_ebml_void(ByteIOContext *pb, uint64_t size) { int64_t currentpos = url_ftell(pb); assert(size >= 2); put_ebml_id(pb, EBML_ID_VOID); // we need to subtract the length needed to store the size from the // size we need to reserve so 2 cases, we use 8 bytes to store the // size if possible, 1 byte otherwise if (size < 10) put_ebml_num(pb, size-1, 0); else put_ebml_num(pb, size-9, 8); while(url_ftell(pb) < currentpos + size) put_byte(pb, 0); } static ebml_master start_ebml_master(ByteIOContext *pb, unsigned int elementid, uint64_t expectedsize) { int bytes = expectedsize ? ebml_num_size(expectedsize) : 8; put_ebml_id(pb, elementid); put_ebml_size_unknown(pb, bytes); return (ebml_master){ url_ftell(pb), bytes }; } static void end_ebml_master(ByteIOContext *pb, ebml_master master) { int64_t pos = url_ftell(pb); if (url_fseek(pb, master.pos - master.sizebytes, SEEK_SET) < 0) return; put_ebml_num(pb, pos - master.pos, master.sizebytes); url_fseek(pb, pos, SEEK_SET); } static void put_xiph_size(ByteIOContext *pb, int size) { int i; for (i = 0; i < size / 255; i++) put_byte(pb, 255); put_byte(pb, size % 255); } /** * Initialize a mkv_seekhead element to be ready to index level 1 Matroska * elements. If a maximum number of elements is specified, enough space * will be reserved at the current file location to write a seek head of * that size. * * @param segment_offset The absolute offset to the position in the file * where the segment begins. * @param numelements The maximum number of elements that will be indexed * by this seek head, 0 if unlimited. */ static mkv_seekhead * mkv_start_seekhead(ByteIOContext *pb, int64_t segment_offset, int numelements) { mkv_seekhead *new_seekhead = av_mallocz(sizeof(mkv_seekhead)); if (new_seekhead == NULL) return NULL; new_seekhead->segment_offset = segment_offset; if (numelements > 0) { new_seekhead->filepos = url_ftell(pb); // 21 bytes max for a seek entry, 10 bytes max for the SeekHead ID // and size, and 3 bytes to guarantee that an EBML void element // will fit afterwards new_seekhead->reserved_size = numelements * MAX_SEEKENTRY_SIZE + 13; new_seekhead->max_entries = numelements; put_ebml_void(pb, new_seekhead->reserved_size); } return new_seekhead; } static int mkv_add_seekhead_entry(mkv_seekhead *seekhead, unsigned int elementid, uint64_t filepos) { mkv_seekhead_entry *entries = seekhead->entries; // don't store more elements than we reserved space for if (seekhead->max_entries > 0 && seekhead->max_entries <= seekhead->num_entries) return -1; entries = av_realloc(entries, (seekhead->num_entries + 1) * sizeof(mkv_seekhead_entry)); if (entries == NULL) return AVERROR(ENOMEM); entries[seekhead->num_entries ].elementid = elementid; entries[seekhead->num_entries++].segmentpos = filepos - seekhead->segment_offset; seekhead->entries = entries; return 0; } /** * Write the seek head to the file and free it. If a maximum number of * elements was specified to mkv_start_seekhead(), the seek head will * be written at the location reserved for it. Otherwise, it is written * at the current location in the file. * * @return The file offset where the seekhead was written, * -1 if an error occurred. */ static int64_t mkv_write_seekhead(ByteIOContext *pb, mkv_seekhead *seekhead) { ebml_master metaseek, seekentry; int64_t currentpos; int i; currentpos = url_ftell(pb); if (seekhead->reserved_size > 0) if (url_fseek(pb, seekhead->filepos, SEEK_SET) < 0) return -1; metaseek = start_ebml_master(pb, MATROSKA_ID_SEEKHEAD, seekhead->reserved_size); for (i = 0; i < seekhead->num_entries; i++) { mkv_seekhead_entry *entry = &seekhead->entries[i]; seekentry = start_ebml_master(pb, MATROSKA_ID_SEEKENTRY, MAX_SEEKENTRY_SIZE); put_ebml_id(pb, MATROSKA_ID_SEEKID); put_ebml_num(pb, ebml_id_size(entry->elementid), 0); put_ebml_id(pb, entry->elementid); put_ebml_uint(pb, MATROSKA_ID_SEEKPOSITION, entry->segmentpos); end_ebml_master(pb, seekentry); } end_ebml_master(pb, metaseek); if (seekhead->reserved_size > 0) { uint64_t remaining = seekhead->filepos + seekhead->reserved_size - url_ftell(pb); put_ebml_void(pb, remaining); url_fseek(pb, currentpos, SEEK_SET); currentpos = seekhead->filepos; } av_free(seekhead->entries); av_free(seekhead); return currentpos; } static mkv_cues * mkv_start_cues(int64_t segment_offset) { mkv_cues *cues = av_mallocz(sizeof(mkv_cues)); if (cues == NULL) return NULL; cues->segment_offset = segment_offset; return cues; } static int mkv_add_cuepoint(mkv_cues *cues, int stream, int64_t ts, int64_t cluster_pos) { mkv_cuepoint *entries = cues->entries; entries = av_realloc(entries, (cues->num_entries + 1) * sizeof(mkv_cuepoint)); if (entries == NULL) return AVERROR(ENOMEM); if (ts < 0) return 0; entries[cues->num_entries ].pts = ts; entries[cues->num_entries ].tracknum = stream + 1; entries[cues->num_entries++].cluster_pos = cluster_pos - cues->segment_offset; cues->entries = entries; return 0; } static int64_t mkv_write_cues(ByteIOContext *pb, mkv_cues *cues, int num_tracks) { ebml_master cues_element; int64_t currentpos; int i, j; currentpos = url_ftell(pb); cues_element = start_ebml_master(pb, MATROSKA_ID_CUES, 0); for (i = 0; i < cues->num_entries; i++) { ebml_master cuepoint, track_positions; mkv_cuepoint *entry = &cues->entries[i]; uint64_t pts = entry->pts; cuepoint = start_ebml_master(pb, MATROSKA_ID_POINTENTRY, MAX_CUEPOINT_SIZE(num_tracks)); put_ebml_uint(pb, MATROSKA_ID_CUETIME, pts); // put all the entries from different tracks that have the exact same // timestamp into the same CuePoint for (j = 0; j < cues->num_entries - i && entry[j].pts == pts; j++) { track_positions = start_ebml_master(pb, MATROSKA_ID_CUETRACKPOSITION, MAX_CUETRACKPOS_SIZE); put_ebml_uint(pb, MATROSKA_ID_CUETRACK , entry[j].tracknum ); put_ebml_uint(pb, MATROSKA_ID_CUECLUSTERPOSITION, entry[j].cluster_pos); end_ebml_master(pb, track_positions); } i += j - 1; end_ebml_master(pb, cuepoint); } end_ebml_master(pb, cues_element); av_free(cues->entries); av_free(cues); return currentpos; } static int put_xiph_codecpriv(AVFormatContext *s, ByteIOContext *pb, AVCodecContext *codec) { uint8_t *header_start[3]; int header_len[3]; int first_header_size; int j; if (codec->codec_id == CODEC_ID_VORBIS) first_header_size = 30; else first_header_size = 42; if (ff_split_xiph_headers(codec->extradata, codec->extradata_size, first_header_size, header_start, header_len) < 0) { av_log(s, AV_LOG_ERROR, "Extradata corrupt.\n"); return -1; } put_byte(pb, 2); // number packets - 1 for (j = 0; j < 2; j++) { put_xiph_size(pb, header_len[j]); } for (j = 0; j < 3; j++) put_buffer(pb, header_start[j], header_len[j]); return 0; } static void get_aac_sample_rates(AVFormatContext *s, AVCodecContext *codec, int *sample_rate, int *output_sample_rate) { int sri; if (codec->extradata_size < 2) { av_log(s, AV_LOG_WARNING, "No AAC extradata, unable to determine samplerate.\n"); return; } sri = ((codec->extradata[0] << 1) & 0xE) | (codec->extradata[1] >> 7); if (sri > 12) { av_log(s, AV_LOG_WARNING, "AAC samplerate index out of bounds\n"); return; } *sample_rate = ff_mpeg4audio_sample_rates[sri]; // if sbr, get output sample rate as well if (codec->extradata_size == 5) { sri = (codec->extradata[4] >> 3) & 0xF; if (sri > 12) { av_log(s, AV_LOG_WARNING, "AAC output samplerate index out of bounds\n"); return; } *output_sample_rate = ff_mpeg4audio_sample_rates[sri]; } } static int mkv_write_codecprivate(AVFormatContext *s, ByteIOContext *pb, AVCodecContext *codec, int native_id, int qt_id) { ByteIOContext *dyn_cp; uint8_t *codecpriv; int ret, codecpriv_size; ret = url_open_dyn_buf(&dyn_cp); if(ret < 0) return ret; if (native_id) { if (codec->codec_id == CODEC_ID_VORBIS || codec->codec_id == CODEC_ID_THEORA) ret = put_xiph_codecpriv(s, dyn_cp, codec); else if (codec->codec_id == CODEC_ID_FLAC) ret = ff_flac_write_header(dyn_cp, codec, 1); else if (codec->codec_id == CODEC_ID_H264) ret = ff_isom_write_avcc(dyn_cp, codec->extradata, codec->extradata_size); else if (codec->extradata_size) put_buffer(dyn_cp, codec->extradata, codec->extradata_size); } else if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (qt_id) { if (!codec->codec_tag) codec->codec_tag = ff_codec_get_tag(codec_movvideo_tags, codec->codec_id); if (codec->extradata_size) put_buffer(dyn_cp, codec->extradata, codec->extradata_size); } else { if (!codec->codec_tag) codec->codec_tag = ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id); if (!codec->codec_tag) { av_log(s, AV_LOG_ERROR, "No bmp codec ID found.\n"); ret = -1; } ff_put_bmp_header(dyn_cp, codec, ff_codec_bmp_tags, 0); } } else if (codec->codec_type == AVMEDIA_TYPE_AUDIO) { unsigned int tag; tag = ff_codec_get_tag(ff_codec_wav_tags, codec->codec_id); if (!tag) { av_log(s, AV_LOG_ERROR, "No wav codec ID found.\n"); ret = -1; } if (!codec->codec_tag) codec->codec_tag = tag; ff_put_wav_header(dyn_cp, codec); } codecpriv_size = url_close_dyn_buf(dyn_cp, &codecpriv); if (codecpriv_size) put_ebml_binary(pb, MATROSKA_ID_CODECPRIVATE, codecpriv, codecpriv_size); av_free(codecpriv); return ret; } static int mkv_write_tracks(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; ByteIOContext *pb = s->pb; ebml_master tracks; int i, j, ret; ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_TRACKS, url_ftell(pb)); if (ret < 0) return ret; tracks = start_ebml_master(pb, MATROSKA_ID_TRACKS, 0); for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; AVCodecContext *codec = st->codec; ebml_master subinfo, track; int native_id = 0; int qt_id = 0; int bit_depth = av_get_bits_per_sample(codec->codec_id); int sample_rate = codec->sample_rate; int output_sample_rate = 0; AVMetadataTag *tag; if (!bit_depth) bit_depth = av_get_bits_per_sample_format(codec->sample_fmt); if (codec->codec_id == CODEC_ID_AAC) get_aac_sample_rates(s, codec, &sample_rate, &output_sample_rate); track = start_ebml_master(pb, MATROSKA_ID_TRACKENTRY, 0); put_ebml_uint (pb, MATROSKA_ID_TRACKNUMBER , i + 1); put_ebml_uint (pb, MATROSKA_ID_TRACKUID , i + 1); put_ebml_uint (pb, MATROSKA_ID_TRACKFLAGLACING , 0); // no lacing (yet) if ((tag = av_metadata_get(st->metadata, "title", NULL, 0))) put_ebml_string(pb, MATROSKA_ID_TRACKNAME, tag->value); tag = av_metadata_get(st->metadata, "language", NULL, 0); put_ebml_string(pb, MATROSKA_ID_TRACKLANGUAGE, tag ? tag->value:"und"); if (st->disposition) put_ebml_uint(pb, MATROSKA_ID_TRACKFLAGDEFAULT, !!(st->disposition & AV_DISPOSITION_DEFAULT)); // look for a codec ID string specific to mkv to use, // if none are found, use AVI codes for (j = 0; ff_mkv_codec_tags[j].id != CODEC_ID_NONE; j++) { if (ff_mkv_codec_tags[j].id == codec->codec_id) { put_ebml_string(pb, MATROSKA_ID_CODECID, ff_mkv_codec_tags[j].str); native_id = 1; break; } } if (mkv->mode == MODE_WEBM && !(codec->codec_id == CODEC_ID_VP8 || codec->codec_id == CODEC_ID_VORBIS)) { av_log(s, AV_LOG_ERROR, "Only VP8 video and Vorbis audio are supported for WebM.\n"); return AVERROR(EINVAL); } switch (codec->codec_type) { case AVMEDIA_TYPE_VIDEO: put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_VIDEO); put_ebml_uint(pb, MATROSKA_ID_TRACKDEFAULTDURATION, av_q2d(codec->time_base)*1E9); if (!native_id && ff_codec_get_tag(codec_movvideo_tags, codec->codec_id) && (!ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id) || codec->codec_id == CODEC_ID_SVQ1 || codec->codec_id == CODEC_ID_SVQ3 || codec->codec_id == CODEC_ID_CINEPAK)) qt_id = 1; if (qt_id) put_ebml_string(pb, MATROSKA_ID_CODECID, "V_QUICKTIME"); else if (!native_id) { // if there is no mkv-specific codec ID, use VFW mode put_ebml_string(pb, MATROSKA_ID_CODECID, "V_MS/VFW/FOURCC"); mkv->tracks[i].write_dts = 1; } subinfo = start_ebml_master(pb, MATROSKA_ID_TRACKVIDEO, 0); // XXX: interlace flag? put_ebml_uint (pb, MATROSKA_ID_VIDEOPIXELWIDTH , codec->width); put_ebml_uint (pb, MATROSKA_ID_VIDEOPIXELHEIGHT, codec->height); if (st->sample_aspect_ratio.num) { int d_width = codec->width*av_q2d(st->sample_aspect_ratio); put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYWIDTH , d_width); put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYHEIGHT, codec->height); } end_ebml_master(pb, subinfo); break; case AVMEDIA_TYPE_AUDIO: put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_AUDIO); if (!native_id) // no mkv-specific ID, use ACM mode put_ebml_string(pb, MATROSKA_ID_CODECID, "A_MS/ACM"); subinfo = start_ebml_master(pb, MATROSKA_ID_TRACKAUDIO, 0); put_ebml_uint (pb, MATROSKA_ID_AUDIOCHANNELS , codec->channels); put_ebml_float (pb, MATROSKA_ID_AUDIOSAMPLINGFREQ, sample_rate); if (output_sample_rate) put_ebml_float(pb, MATROSKA_ID_AUDIOOUTSAMPLINGFREQ, output_sample_rate); if (bit_depth) put_ebml_uint(pb, MATROSKA_ID_AUDIOBITDEPTH, bit_depth); end_ebml_master(pb, subinfo); break; case AVMEDIA_TYPE_SUBTITLE: put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_SUBTITLE); break; default: av_log(s, AV_LOG_ERROR, "Only audio, video, and subtitles are supported for Matroska."); break; } ret = mkv_write_codecprivate(s, pb, codec, native_id, qt_id); if (ret < 0) return ret; end_ebml_master(pb, track); // ms precision is the de-facto standard timescale for mkv files av_set_pts_info(st, 64, 1, 1000); } end_ebml_master(pb, tracks); return 0; } static int mkv_write_chapters(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; ByteIOContext *pb = s->pb; ebml_master chapters, editionentry; AVRational scale = {1, 1E9}; int i, ret; if (!s->nb_chapters) return 0; ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_CHAPTERS, url_ftell(pb)); if (ret < 0) return ret; chapters = start_ebml_master(pb, MATROSKA_ID_CHAPTERS , 0); editionentry = start_ebml_master(pb, MATROSKA_ID_EDITIONENTRY, 0); put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGDEFAULT, 1); put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGHIDDEN , 0); for (i = 0; i < s->nb_chapters; i++) { ebml_master chapteratom, chapterdisplay; AVChapter *c = s->chapters[i]; AVMetadataTag *t = NULL; chapteratom = start_ebml_master(pb, MATROSKA_ID_CHAPTERATOM, 0); put_ebml_uint(pb, MATROSKA_ID_CHAPTERUID, c->id); put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMESTART, av_rescale_q(c->start, c->time_base, scale)); put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMEEND, av_rescale_q(c->end, c->time_base, scale)); put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGHIDDEN , 0); put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGENABLED, 1); if ((t = av_metadata_get(c->metadata, "title", NULL, 0))) { chapterdisplay = start_ebml_master(pb, MATROSKA_ID_CHAPTERDISPLAY, 0); put_ebml_string(pb, MATROSKA_ID_CHAPSTRING, t->value); put_ebml_string(pb, MATROSKA_ID_CHAPLANG , "und"); end_ebml_master(pb, chapterdisplay); } end_ebml_master(pb, chapteratom); } end_ebml_master(pb, editionentry); end_ebml_master(pb, chapters); return 0; } static int mkv_write_header(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; ByteIOContext *pb = s->pb; ebml_master ebml_header, segment_info; AVMetadataTag *tag; int ret; if (!strcmp(s->oformat->name, "webm")) mkv->mode = MODE_WEBM; else mkv->mode = MODE_MATROSKAv2; mkv->md5_ctx = av_mallocz(av_md5_size); av_md5_init(mkv->md5_ctx); mkv->tracks = av_mallocz(s->nb_streams * sizeof(*mkv->tracks)); ebml_header = start_ebml_master(pb, EBML_ID_HEADER, 0); put_ebml_uint (pb, EBML_ID_EBMLVERSION , 1); put_ebml_uint (pb, EBML_ID_EBMLREADVERSION , 1); put_ebml_uint (pb, EBML_ID_EBMLMAXIDLENGTH , 4); put_ebml_uint (pb, EBML_ID_EBMLMAXSIZELENGTH , 8); put_ebml_string (pb, EBML_ID_DOCTYPE , s->oformat->name); put_ebml_uint (pb, EBML_ID_DOCTYPEVERSION , 2); put_ebml_uint (pb, EBML_ID_DOCTYPEREADVERSION , 2); end_ebml_master(pb, ebml_header); mkv->segment = start_ebml_master(pb, MATROSKA_ID_SEGMENT, 0); mkv->segment_offset = url_ftell(pb); // we write 2 seek heads - one at the end of the file to point to each // cluster, and one at the beginning to point to all other level one // elements (including the seek head at the end of the file), which // isn't more than 10 elements if we only write one of each other // currently defined level 1 element mkv->main_seekhead = mkv_start_seekhead(pb, mkv->segment_offset, 10); mkv->cluster_seekhead = mkv_start_seekhead(pb, mkv->segment_offset, 0); if (mkv->main_seekhead == NULL || mkv->cluster_seekhead == NULL) return AVERROR(ENOMEM); ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_INFO, url_ftell(pb)); if (ret < 0) return ret; segment_info = start_ebml_master(pb, MATROSKA_ID_INFO, 0); put_ebml_uint(pb, MATROSKA_ID_TIMECODESCALE, 1000000); if ((tag = av_metadata_get(s->metadata, "title", NULL, 0))) put_ebml_string(pb, MATROSKA_ID_TITLE, tag->value); if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { put_ebml_string(pb, MATROSKA_ID_MUXINGAPP , LIBAVFORMAT_IDENT); put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, LIBAVFORMAT_IDENT); // reserve space to write the segment UID later mkv->segment_uid = url_ftell(pb); put_ebml_void(pb, 19); } // reserve space for the duration mkv->duration = 0; mkv->duration_offset = url_ftell(pb); put_ebml_void(pb, 11); // assumes double-precision float to be written end_ebml_master(pb, segment_info); ret = mkv_write_tracks(s); if (ret < 0) return ret; if (mkv->mode != MODE_WEBM) { ret = mkv_write_chapters(s); if (ret < 0) return ret; } if (url_is_streamed(s->pb)) mkv_write_seekhead(pb, mkv->main_seekhead); mkv->cues = mkv_start_cues(mkv->segment_offset); if (mkv->cues == NULL) return AVERROR(ENOMEM); put_flush_packet(pb); return 0; } static int mkv_blockgroup_size(int pkt_size) { int size = pkt_size + 4; size += ebml_num_size(size); size += 2; // EBML ID for block and block duration size += 8; // max size of block duration size += ebml_num_size(size); size += 1; // blockgroup EBML ID return size; } static int ass_get_duration(const uint8_t *p) { int sh, sm, ss, sc, eh, em, es, ec; uint64_t start, end; if (sscanf(p, "%*[^,],%d:%d:%d%*c%d,%d:%d:%d%*c%d", &sh, &sm, &ss, &sc, &eh, &em, &es, &ec) != 8) return 0; start = 3600000*sh + 60000*sm + 1000*ss + 10*sc; end = 3600000*eh + 60000*em + 1000*es + 10*ec; return end - start; } static int mkv_write_ass_blocks(AVFormatContext *s, ByteIOContext *pb, AVPacket *pkt) { MatroskaMuxContext *mkv = s->priv_data; int i, layer = 0, max_duration = 0, size, line_size, data_size = pkt->size; uint8_t *start, *end, *data = pkt->data; ebml_master blockgroup; char buffer[2048]; while (data_size) { int duration = ass_get_duration(data); max_duration = FFMAX(duration, max_duration); end = memchr(data, '\n', data_size); size = line_size = end ? end-data+1 : data_size; size -= end ? (end[-1]=='\r')+1 : 0; start = data; for (i=0; i<3; i++, start++) if (!(start = memchr(start, ',', size-(start-data)))) return max_duration; size -= start - data; sscanf(data, "Dialogue: %d,", &layer); i = snprintf(buffer, sizeof(buffer), "%"PRId64",%d,", s->streams[pkt->stream_index]->nb_frames++, layer); size = FFMIN(i+size, sizeof(buffer)); memcpy(buffer+i, start, size-i); av_log(s, AV_LOG_DEBUG, "Writing block at offset %" PRIu64 ", size %d, " "pts %" PRId64 ", duration %d\n", url_ftell(pb), size, pkt->pts, duration); blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(size)); put_ebml_id(pb, MATROSKA_ID_BLOCK); put_ebml_num(pb, size+4, 0); put_byte(pb, 0x80 | (pkt->stream_index + 1)); // this assumes stream_index is less than 126 put_be16(pb, pkt->pts - mkv->cluster_pts); put_byte(pb, 0); put_buffer(pb, buffer, size); put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration); end_ebml_master(pb, blockgroup); data += line_size; data_size -= line_size; } return max_duration; } static void mkv_write_block(AVFormatContext *s, ByteIOContext *pb, unsigned int blockid, AVPacket *pkt, int flags) { MatroskaMuxContext *mkv = s->priv_data; AVCodecContext *codec = s->streams[pkt->stream_index]->codec; uint8_t *data = NULL; int size = pkt->size; int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts; av_log(s, AV_LOG_DEBUG, "Writing block at offset %" PRIu64 ", size %d, " "pts %" PRId64 ", dts %" PRId64 ", duration %d, flags %d\n", url_ftell(pb), pkt->size, pkt->pts, pkt->dts, pkt->duration, flags); if (codec->codec_id == CODEC_ID_H264 && codec->extradata_size > 0 && (AV_RB24(codec->extradata) == 1 || AV_RB32(codec->extradata) == 1)) ff_avc_parse_nal_units_buf(pkt->data, &data, &size); else data = pkt->data; put_ebml_id(pb, blockid); put_ebml_num(pb, size+4, 0); put_byte(pb, 0x80 | (pkt->stream_index + 1)); // this assumes stream_index is less than 126 put_be16(pb, ts - mkv->cluster_pts); put_byte(pb, flags); put_buffer(pb, data, size); if (data != pkt->data) av_free(data); } static void mkv_flush_dynbuf(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; int bufsize; uint8_t *dyn_buf; if (!mkv->dyn_bc) return; bufsize = url_close_dyn_buf(mkv->dyn_bc, &dyn_buf); put_buffer(s->pb, dyn_buf, bufsize); av_free(dyn_buf); mkv->dyn_bc = NULL; } static int mkv_write_packet(AVFormatContext *s, AVPacket *pkt) { MatroskaMuxContext *mkv = s->priv_data; ByteIOContext *pb = s->pb; AVCodecContext *codec = s->streams[pkt->stream_index]->codec; int keyframe = !!(pkt->flags & AV_PKT_FLAG_KEY); int duration = pkt->duration; int ret; int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts; if (ts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_ERROR, "Can't write packet with unknown timestamp\n"); return AVERROR(EINVAL); } if (url_is_streamed(s->pb)) { if (!mkv->dyn_bc) url_open_dyn_buf(&mkv->dyn_bc); pb = mkv->dyn_bc; } if (!mkv->cluster_pos) { ret = mkv_add_seekhead_entry(mkv->cluster_seekhead, MATROSKA_ID_CLUSTER, url_ftell(pb)); if (ret < 0) return ret; mkv->cluster_pos = url_ftell(s->pb); mkv->cluster = start_ebml_master(pb, MATROSKA_ID_CLUSTER, 0); put_ebml_uint(pb, MATROSKA_ID_CLUSTERTIMECODE, FFMAX(0, ts)); mkv->cluster_pts = FFMAX(0, ts); av_md5_update(mkv->md5_ctx, pkt->data, FFMIN(200, pkt->size)); } if (codec->codec_type != AVMEDIA_TYPE_SUBTITLE) { mkv_write_block(s, pb, MATROSKA_ID_SIMPLEBLOCK, pkt, keyframe << 7); } else if (codec->codec_id == CODEC_ID_SSA) { duration = mkv_write_ass_blocks(s, pb, pkt); } else { ebml_master blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(pkt->size)); duration = pkt->convergence_duration; mkv_write_block(s, pb, MATROSKA_ID_BLOCK, pkt, 0); put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration); end_ebml_master(pb, blockgroup); } if (codec->codec_type == AVMEDIA_TYPE_VIDEO && keyframe) { ret = mkv_add_cuepoint(mkv->cues, pkt->stream_index, ts, mkv->cluster_pos); if (ret < 0) return ret; } // start a new cluster every 5 MB or 5 sec, or 32k / 1 sec for streaming if ((url_is_streamed(s->pb) && (url_ftell(pb) > 32*1024 || ts > mkv->cluster_pts + 1000)) || url_ftell(pb) > mkv->cluster_pos + 5*1024*1024 || ts > mkv->cluster_pts + 5000) { av_log(s, AV_LOG_DEBUG, "Starting new cluster at offset %" PRIu64 " bytes, pts %" PRIu64 "\n", url_ftell(pb), ts); end_ebml_master(pb, mkv->cluster); mkv->cluster_pos = 0; if (mkv->dyn_bc) mkv_flush_dynbuf(s); } mkv->duration = FFMAX(mkv->duration, ts + duration); return 0; } static int mkv_write_trailer(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; ByteIOContext *pb = s->pb; int64_t currentpos, second_seekhead, cuespos; int ret; if (mkv->dyn_bc) { end_ebml_master(mkv->dyn_bc, mkv->cluster); mkv_flush_dynbuf(s); } else if (mkv->cluster_pos) { end_ebml_master(pb, mkv->cluster); } if (!url_is_streamed(pb)) { cuespos = mkv_write_cues(pb, mkv->cues, s->nb_streams); second_seekhead = mkv_write_seekhead(pb, mkv->cluster_seekhead); ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_CUES , cuespos); if (ret < 0) return ret; if (second_seekhead >= 0) { ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_SEEKHEAD, second_seekhead); if (ret < 0) return ret; } mkv_write_seekhead(pb, mkv->main_seekhead); // update the duration av_log(s, AV_LOG_DEBUG, "end duration = %" PRIu64 "\n", mkv->duration); currentpos = url_ftell(pb); url_fseek(pb, mkv->duration_offset, SEEK_SET); put_ebml_float(pb, MATROSKA_ID_DURATION, mkv->duration); // write the md5sum of some frames as the segment UID if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { uint8_t segment_uid[16]; av_md5_final(mkv->md5_ctx, segment_uid); url_fseek(pb, mkv->segment_uid, SEEK_SET); put_ebml_binary(pb, MATROSKA_ID_SEGMENTUID, segment_uid, 16); } url_fseek(pb, currentpos, SEEK_SET); } end_ebml_master(pb, mkv->segment); av_free(mkv->md5_ctx); av_free(mkv->tracks); put_flush_packet(pb); return 0; } #if CONFIG_MATROSKA_MUXER AVOutputFormat matroska_muxer = { "matroska", NULL_IF_CONFIG_SMALL("Matroska file format"), "video/x-matroska", "mkv", sizeof(MatroskaMuxContext), CODEC_ID_MP2, CODEC_ID_MPEG4, mkv_write_header, mkv_write_packet, mkv_write_trailer, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS, .codec_tag = (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, 0}, .subtitle_codec = CODEC_ID_TEXT, }; #endif #if CONFIG_WEBM_MUXER AVOutputFormat webm_muxer = { "webm", NULL_IF_CONFIG_SMALL("WebM file format"), "video/webm", "webm", sizeof(MatroskaMuxContext), CODEC_ID_VORBIS, CODEC_ID_VP8, mkv_write_header, mkv_write_packet, mkv_write_trailer, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS, }; #endif #if CONFIG_MATROSKA_AUDIO_MUXER AVOutputFormat matroska_audio_muxer = { "matroska", NULL_IF_CONFIG_SMALL("Matroska file format"), "audio/x-matroska", "mka", sizeof(MatroskaMuxContext), CODEC_ID_MP2, CODEC_ID_NONE, mkv_write_header, mkv_write_packet, mkv_write_trailer, .flags = AVFMT_GLOBALHEADER, .codec_tag = (const AVCodecTag* const []){ff_codec_wav_tags, 0}, }; #endif
123linslouis-android-video-cutter
jni/libavformat/matroskaenc.c
C
asf20
36,344
/* * RTP AMR Depacketizer, RFC 3267 * Copyright (c) 2010 Martin Storsjo * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "rtpdec_amr.h" #include "libavutil/avstring.h" static const uint8_t frame_sizes_nb[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 }; static const uint8_t frame_sizes_wb[16] = { 17, 23, 32, 36, 40, 46, 50, 58, 60, 5, 5, 0, 0, 0, 0, 0 }; static int amr_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket * pkt, uint32_t * timestamp, const uint8_t * buf, int len, int flags) { const uint8_t *frame_sizes = NULL; int frames; int i; const uint8_t *speech_data; uint8_t *ptr; if (st->codec->codec_id == CODEC_ID_AMR_NB) { frame_sizes = frame_sizes_nb; } else if (st->codec->codec_id == CODEC_ID_AMR_WB) { frame_sizes = frame_sizes_wb; } else { av_log(ctx, AV_LOG_ERROR, "Bad codec ID\n"); return AVERROR_INVALIDDATA; } if (st->codec->channels != 1) { av_log(ctx, AV_LOG_ERROR, "Only mono AMR is supported\n"); return AVERROR_INVALIDDATA; } /* The AMR RTP packet consists of one header byte, followed * by one TOC byte for each AMR frame in the packet, followed * by the speech data for all the AMR frames. * * The header byte contains only a codec mode request, for * requesting what kind of AMR data the sender wants to * receive. Not used at the moment. */ /* Count the number of frames in the packet. The highest bit * is set in a TOC byte if there are more frames following. */ for (frames = 1; frames < len && (buf[frames] & 0x80); frames++) ; if (1 + frames >= len) { /* We hit the end of the packet while counting frames. */ av_log(ctx, AV_LOG_ERROR, "No speech data found\n"); return AVERROR_INVALIDDATA; } speech_data = buf + 1 + frames; /* Everything except the codec mode request byte should be output. */ if (av_new_packet(pkt, len - 1)) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); return AVERROR(ENOMEM); } pkt->stream_index = st->index; ptr = pkt->data; for (i = 0; i < frames; i++) { uint8_t toc = buf[1 + i]; int frame_size = frame_sizes[(toc >> 3) & 0x0f]; if (speech_data + frame_size > buf + len) { /* Too little speech data */ av_log(ctx, AV_LOG_WARNING, "Too little speech data in the RTP packet\n"); /* Set the unwritten part of the packet to zero. */ memset(ptr, 0, pkt->data + pkt->size - ptr); pkt->size = ptr - pkt->data; return 0; } /* Extract the AMR frame mode from the TOC byte */ *ptr++ = toc & 0x7C; /* Copy the speech data */ memcpy(ptr, speech_data, frame_size); speech_data += frame_size; ptr += frame_size; } if (speech_data < buf + len) { av_log(ctx, AV_LOG_WARNING, "Too much speech data in the RTP packet?\n"); /* Set the unwritten part of the packet to zero. */ memset(ptr, 0, pkt->data + pkt->size - ptr); pkt->size = ptr - pkt->data; } return 0; } static int amr_parse_sdp_line(AVFormatContext *s, int st_index, PayloadContext *data, const char *line) { const char *p; char attr[25], value[25]; /* Parse an fmtp line this one: * a=fmtp:97 octet-align=1; interleaving=0 * That is, a normal fmtp: line followed by semicolon & space * separated key/value pairs. */ if (av_strstart(line, "fmtp:", &p)) { int octet_align = 0; int crc = 0; int interleaving = 0; int channels = 1; while (*p && *p == ' ') p++; /* strip spaces */ while (*p && *p != ' ') p++; /* eat protocol identifier */ while (*p && *p == ' ') p++; /* strip trailing spaces */ while (ff_rtsp_next_attr_and_value(&p, attr, sizeof(attr), value, sizeof(value))) { /* Some AMR SDP configurations contain "octet-align", without * the trailing =1. Therefore, if the value is empty, * interpret it as "1". */ if (!strcmp(value, "")) { av_log(s, AV_LOG_WARNING, "AMR fmtp attribute %s had " "nonstandard empty value\n", attr); strcpy(value, "1"); } if (!strcmp(attr, "octet-align")) octet_align = atoi(value); else if (!strcmp(attr, "crc")) crc = atoi(value); else if (!strcmp(attr, "interleaving")) interleaving = atoi(value); else if (!strcmp(attr, "channels")) channels = atoi(value); } if (!octet_align || crc || interleaving || channels != 1) { av_log(s, AV_LOG_ERROR, "Unsupported RTP/AMR configuration!\n"); return -1; } } return 0; } RTPDynamicProtocolHandler ff_amr_nb_dynamic_handler = { .enc_name = "AMR", .codec_type = AVMEDIA_TYPE_AUDIO, .codec_id = CODEC_ID_AMR_NB, .parse_sdp_a_line = amr_parse_sdp_line, .parse_packet = amr_handle_packet, }; RTPDynamicProtocolHandler ff_amr_wb_dynamic_handler = { .enc_name = "AMR-WB", .codec_type = AVMEDIA_TYPE_AUDIO, .codec_id = CODEC_ID_AMR_WB, .parse_sdp_a_line = amr_parse_sdp_line, .parse_packet = amr_handle_packet, };
123linslouis-android-video-cutter
jni/libavformat/rtpdec_amr.c
C
asf20
6,453
/* * amr file format * Copyright (c) 2001 ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* Write and read amr data according to RFC3267, http://www.ietf.org/rfc/rfc3267.txt?number=3267 Only mono files are supported. */ #include "avformat.h" static const char AMR_header [] = "#!AMR\n"; static const char AMRWB_header [] = "#!AMR-WB\n"; #if CONFIG_AMR_MUXER static int amr_write_header(AVFormatContext *s) { ByteIOContext *pb = s->pb; AVCodecContext *enc = s->streams[0]->codec; s->priv_data = NULL; if (enc->codec_id == CODEC_ID_AMR_NB) { put_tag(pb, AMR_header); /* magic number */ } else if(enc->codec_id == CODEC_ID_AMR_WB) { put_tag(pb, AMRWB_header); /* magic number */ } else { return -1; } put_flush_packet(pb); return 0; } static int amr_write_packet(AVFormatContext *s, AVPacket *pkt) { put_buffer(s->pb, pkt->data, pkt->size); put_flush_packet(s->pb); return 0; } #endif /* CONFIG_AMR_MUXER */ static int amr_probe(AVProbeData *p) { //Only check for "#!AMR" which could be amr-wb, amr-nb. //This will also trigger multichannel files: "#!AMR_MC1.0\n" and //"#!AMR-WB_MC1.0\n" (not supported) if(memcmp(p->buf,AMR_header,5)==0) return AVPROBE_SCORE_MAX; else return 0; } /* amr input */ static int amr_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; AVStream *st; uint8_t header[9]; get_buffer(pb, header, 6); st = av_new_stream(s, 0); if (!st) { return AVERROR(ENOMEM); } if(memcmp(header,AMR_header,6)!=0) { get_buffer(pb, header+6, 3); if(memcmp(header,AMRWB_header,9)!=0) { return -1; } st->codec->codec_tag = MKTAG('s', 'a', 'w', 'b'); st->codec->codec_id = CODEC_ID_AMR_WB; st->codec->sample_rate = 16000; } else { st->codec->codec_tag = MKTAG('s', 'a', 'm', 'r'); st->codec->codec_id = CODEC_ID_AMR_NB; st->codec->sample_rate = 8000; } st->codec->channels = 1; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; av_set_pts_info(st, 64, 1, st->codec->sample_rate); return 0; } static int amr_read_packet(AVFormatContext *s, AVPacket *pkt) { AVCodecContext *enc = s->streams[0]->codec; int read, size = 0, toc, mode; if (url_feof(s->pb)) { return AVERROR(EIO); } //FIXME this is wrong, this should rather be in a AVParset toc=get_byte(s->pb); mode = (toc >> 3) & 0x0F; if (enc->codec_id == CODEC_ID_AMR_NB) { static const uint8_t packed_size[16] = {12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0}; size=packed_size[mode]+1; } else if(enc->codec_id == CODEC_ID_AMR_WB) { static uint8_t packed_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1}; size=packed_size[mode]; } else { assert(0); } if ( (size==0) || av_new_packet(pkt, size)) { return AVERROR(EIO); } pkt->stream_index = 0; pkt->pos= url_ftell(s->pb); pkt->data[0]=toc; pkt->duration= enc->codec_id == CODEC_ID_AMR_NB ? 160 : 320; read = get_buffer(s->pb, pkt->data+1, size-1); if (read != size-1) { av_free_packet(pkt); return AVERROR(EIO); } return 0; } #if CONFIG_AMR_DEMUXER AVInputFormat amr_demuxer = { "amr", NULL_IF_CONFIG_SMALL("3GPP AMR file format"), 0, /*priv_data_size*/ amr_probe, amr_read_header, amr_read_packet, NULL, }; #endif #if CONFIG_AMR_MUXER AVOutputFormat amr_muxer = { "amr", NULL_IF_CONFIG_SMALL("3GPP AMR file format"), "audio/amr", "amr", 0, CODEC_ID_AMR_NB, CODEC_ID_NONE, amr_write_header, amr_write_packet, }; #endif
123linslouis-android-video-cutter
jni/libavformat/amr.c
C
asf20
4,650
/* * IFF (.iff) file demuxer * Copyright (c) 2008 Jaikrishnan Menon <realityman@gmx.net> * Copyright (c) 2010 Peter Ross <pross@xvid.org> * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * IFF file demuxer * by Jaikrishnan Menon * for more information on the .iff file format, visit: * http://wiki.multimedia.cx/index.php?title=IFF */ #include "libavutil/intreadwrite.h" #include "libavcodec/iff.h" #include "avformat.h" #define ID_8SVX MKTAG('8','S','V','X') #define ID_VHDR MKTAG('V','H','D','R') #define ID_ATAK MKTAG('A','T','A','K') #define ID_RLSE MKTAG('R','L','S','E') #define ID_CHAN MKTAG('C','H','A','N') #define ID_PBM MKTAG('P','B','M',' ') #define ID_ILBM MKTAG('I','L','B','M') #define ID_BMHD MKTAG('B','M','H','D') #define ID_CMAP MKTAG('C','M','A','P') #define ID_FORM MKTAG('F','O','R','M') #define ID_ANNO MKTAG('A','N','N','O') #define ID_AUTH MKTAG('A','U','T','H') #define ID_CHRS MKTAG('C','H','R','S') #define ID_COPYRIGHT MKTAG('(','c',')',' ') #define ID_CSET MKTAG('C','S','E','T') #define ID_FVER MKTAG('F','V','E','R') #define ID_NAME MKTAG('N','A','M','E') #define ID_TEXT MKTAG('T','E','X','T') #define ID_BODY MKTAG('B','O','D','Y') #define ID_ANNO MKTAG('A','N','N','O') #define LEFT 2 #define RIGHT 4 #define STEREO 6 #define PACKET_SIZE 1024 typedef enum { COMP_NONE, COMP_FIB, COMP_EXP } svx8_compression_type; typedef enum { BITMAP_RAW, BITMAP_BYTERUN1 } bitmap_compression_type; typedef struct { uint64_t body_pos; uint32_t body_size; uint32_t sent_bytes; uint32_t audio_frame_count; } IffDemuxContext; static void interleave_stereo(const uint8_t *src, uint8_t *dest, int size) { uint8_t *end = dest + size; size = size>>1; while(dest < end) { *dest++ = *src; *dest++ = *(src+size); src++; } } static int iff_probe(AVProbeData *p) { const uint8_t *d = p->buf; if ( AV_RL32(d) == ID_FORM && (AV_RL32(d+8) == ID_8SVX || AV_RL32(d+8) == ID_PBM || AV_RL32(d+8) == ID_ILBM) ) return AVPROBE_SCORE_MAX; return 0; } static int iff_read_header(AVFormatContext *s, AVFormatParameters *ap) { IffDemuxContext *iff = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st; uint32_t chunk_id, data_size; int compression = -1; char *buf; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->channels = 1; url_fskip(pb, 8); // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content st->codec->codec_tag = get_le32(pb); while(!url_feof(pb)) { uint64_t orig_pos; chunk_id = get_le32(pb); data_size = get_be32(pb); orig_pos = url_ftell(pb); switch(chunk_id) { case ID_VHDR: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 14) return AVERROR_INVALIDDATA; url_fskip(pb, 12); st->codec->sample_rate = get_be16(pb); if (data_size >= 16) { url_fskip(pb, 1); compression = get_byte(pb); } break; case ID_BODY: iff->body_pos = url_ftell(pb); iff->body_size = data_size; break; case ID_CHAN: if (data_size < 4) return AVERROR_INVALIDDATA; st->codec->channels = (get_be32(pb) < 6) ? 1 : 2; break; case ID_CMAP: st->codec->extradata_size = data_size; st->codec->extradata = av_malloc(data_size); if (!st->codec->extradata) return AVERROR(ENOMEM); if (get_buffer(pb, st->codec->extradata, data_size) < 0) return AVERROR(EIO); break; case ID_BMHD: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size <= 8) return AVERROR_INVALIDDATA; st->codec->width = get_be16(pb); st->codec->height = get_be16(pb); url_fskip(pb, 4); // x, y offset st->codec->bits_per_coded_sample = get_byte(pb); if (data_size >= 11) { url_fskip(pb, 1); // masking compression = get_byte(pb); } if (data_size >= 16) { url_fskip(pb, 3); // paddding, transparent st->sample_aspect_ratio.num = get_byte(pb); st->sample_aspect_ratio.den = get_byte(pb); } break; case ID_ANNO: buf = av_malloc(data_size + 1); if (!buf) break; get_buffer(pb, buf, data_size); buf[data_size] = 0; av_metadata_set2(&s->metadata, "comment", buf, AV_METADATA_DONT_STRDUP_VAL); break; } url_fskip(pb, data_size - (url_ftell(pb) - orig_pos) + (data_size & 1)); } url_fseek(pb, iff->body_pos, SEEK_SET); switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: av_set_pts_info(st, 32, 1, st->codec->sample_rate); switch(compression) { case COMP_NONE: st->codec->codec_id = CODEC_ID_PCM_S8; break; case COMP_FIB: st->codec->codec_id = CODEC_ID_8SVX_FIB; break; case COMP_EXP: st->codec->codec_id = CODEC_ID_8SVX_EXP; break; default: av_log(s, AV_LOG_ERROR, "iff: unknown compression method\n"); return -1; } st->codec->bits_per_coded_sample = 8; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; break; case AVMEDIA_TYPE_VIDEO: switch (compression) { case BITMAP_RAW: if (st->codec->codec_tag == ID_ILBM) { st->codec->codec_id = CODEC_ID_IFF_ILBM; } else { st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->pix_fmt = PIX_FMT_PAL8; st->codec->codec_tag = 0; } break; case BITMAP_BYTERUN1: st->codec->codec_id = CODEC_ID_IFF_BYTERUN1; break; default: av_log(s, AV_LOG_ERROR, "unknown compression method\n"); return AVERROR_INVALIDDATA; } break; default: return -1; } return 0; } static int iff_read_packet(AVFormatContext *s, AVPacket *pkt) { IffDemuxContext *iff = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st = s->streams[0]; int ret; if(iff->sent_bytes >= iff->body_size) return AVERROR(EIO); if(s->streams[0]->codec->channels == 2) { uint8_t sample_buffer[PACKET_SIZE]; ret = get_buffer(pb, sample_buffer, PACKET_SIZE); if(av_new_packet(pkt, PACKET_SIZE) < 0) { av_log(s, AV_LOG_ERROR, "iff: cannot allocate packet \n"); return AVERROR(ENOMEM); } interleave_stereo(sample_buffer, pkt->data, PACKET_SIZE); } else if (s->streams[0]->codec->codec_id == CODEC_ID_RAWVIDEO) { if(av_new_packet(pkt, iff->body_size + AVPALETTE_SIZE) < 0) { return AVERROR(ENOMEM); } ret = ff_cmap_read_palette(st->codec, (uint32_t*)(pkt->data + iff->body_size)); if (ret < 0) return ret; av_freep(&st->codec->extradata); st->codec->extradata_size = 0; ret = get_buffer(pb, pkt->data, iff->body_size); } else if (s->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { ret = av_get_packet(pb, pkt, iff->body_size); } else { ret = av_get_packet(pb, pkt, PACKET_SIZE); } if(iff->sent_bytes == 0) pkt->flags |= AV_PKT_FLAG_KEY; if(s->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { iff->sent_bytes += PACKET_SIZE; } else { iff->sent_bytes = iff->body_size; } pkt->stream_index = 0; if(s->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { pkt->pts = iff->audio_frame_count; iff->audio_frame_count += ret / s->streams[0]->codec->channels; } return ret; } AVInputFormat iff_demuxer = { "IFF", NULL_IF_CONFIG_SMALL("IFF format"), sizeof(IffDemuxContext), iff_probe, iff_read_header, iff_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/iff.c
C
asf20
9,488
/* * VC-1 test bitstreams format muxer. * Copyright (c) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" typedef struct RCVContext { int frames; } RCVContext; static int vc1test_write_header(AVFormatContext *s) { AVCodecContext *avc = s->streams[0]->codec; ByteIOContext *pb = s->pb; if (avc->codec_id != CODEC_ID_WMV3) { av_log(s, AV_LOG_ERROR, "Only WMV3 is accepted!\n"); return -1; } put_le24(pb, 0); //frames count will be here put_byte(pb, 0xC5); put_le32(pb, 4); put_buffer(pb, avc->extradata, 4); put_le32(pb, avc->height); put_le32(pb, avc->width); put_le32(pb, 0xC); put_le24(pb, 0); // hrd_buffer put_byte(pb, 0x80); // level|cbr|res1 put_le32(pb, 0); // hrd_rate if (s->streams[0]->r_frame_rate.den && s->streams[0]->r_frame_rate.num == 1) put_le32(pb, s->streams[0]->r_frame_rate.den); else put_le32(pb, 0xFFFFFFFF); //variable framerate return 0; } static int vc1test_write_packet(AVFormatContext *s, AVPacket *pkt) { RCVContext *ctx = s->priv_data; ByteIOContext *pb = s->pb; if (!pkt->size) return 0; put_le32(pb, pkt->size | ((pkt->flags & AV_PKT_FLAG_KEY) ? 0x80000000 : 0)); put_le32(pb, pkt->pts); put_buffer(pb, pkt->data, pkt->size); put_flush_packet(pb); ctx->frames++; return 0; } static int vc1test_write_trailer(AVFormatContext *s) { RCVContext *ctx = s->priv_data; ByteIOContext *pb = s->pb; if (!url_is_streamed(s->pb)) { url_fseek(pb, 0, SEEK_SET); put_le24(pb, ctx->frames); put_flush_packet(pb); } return 0; } AVOutputFormat vc1t_muxer = { "rcv", NULL_IF_CONFIG_SMALL("VC-1 test bitstream"), "", "rcv", sizeof(RCVContext), CODEC_ID_NONE, CODEC_ID_WMV3, vc1test_write_header, vc1test_write_packet, vc1test_write_trailer, };
123linslouis-android-video-cutter
jni/libavformat/vc1testenc.c
C
asf20
2,659
/* * AVI common data * Copyright (c) 2010 Anton Khirnov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avi.h" const AVMetadataConv ff_avi_metadata_conv[] = { { "IART", "artist" }, { "ICMT", "comment" }, { "ICOP", "copyright" }, { "ICRD", "date" }, { "IGNR", "genre" }, { "ILNG", "language" }, { "INAM", "title" }, { "IPRD", "album" }, { "IPRT", "track" }, { "ISFT", "encoder" }, { "ITCH", "encoded_by"}, { "strn", "title" }, { 0 }, }; const char ff_avi_tags[][5] = { "IARL", "IART", "ICMS", "ICMT", "ICOP", "ICRD", "ICRP", "IDIM", "IDPI", "IENG", "IGNR", "IKEY", "ILGT", "ILNG", "IMED", "INAM", "IPLT", "IPRD", "IPRT", "ISBJ", "ISFT", "ISHP", "ISRC", "ISRF", "ITCH", {0} };
123linslouis-android-video-cutter
jni/libavformat/avi.c
C
asf20
1,506
/* * Delay Locked Loop based time filter * Copyright (c) 2009 Samalyse * Copyright (c) 2009 Michael Niedermayer * Author: Olivier Guilyardi <olivier samalyse com> * Michael Niedermayer <michaelni gmx at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "avformat.h" #include "timefilter.h" struct TimeFilter { /// Delay Locked Loop data. These variables refer to mathematical /// concepts described in: http://www.kokkinizita.net/papers/usingdll.pdf double cycle_time; double feedback2_factor; double feedback3_factor; double clock_period; int count; }; TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, double feedback3_factor) { TimeFilter *self = av_mallocz(sizeof(TimeFilter)); self->clock_period = clock_period; self->feedback2_factor = feedback2_factor; self->feedback3_factor = feedback3_factor; return self; } void ff_timefilter_destroy(TimeFilter *self) { av_freep(&self); } void ff_timefilter_reset(TimeFilter *self) { self->count = 0; } double ff_timefilter_update(TimeFilter *self, double system_time, double period) { self->count++; if (self->count==1) { /// init loop self->cycle_time = system_time; } else { double loop_error; self->cycle_time += self->clock_period * period; /// calculate loop error loop_error = system_time - self->cycle_time; /// update loop self->cycle_time += FFMAX(self->feedback2_factor, 1.0/(self->count)) * loop_error; self->clock_period += self->feedback3_factor * loop_error / period; } return self->cycle_time; } #ifdef TEST #include "libavutil/lfg.h" #define LFG_MAX ((1LL << 32) - 1) int main(void) { AVLFG prng; double n0,n1; #define SAMPLES 1000 double ideal[SAMPLES]; double samples[SAMPLES]; #if 1 for(n0= 0; n0<40; n0=2*n0+1){ for(n1= 0; n1<10; n1=2*n1+1){ #else {{ n0=7; n1=1; #endif double best_error= 1000000000; double bestpar0=1; double bestpar1=0.001; int better, i; av_lfg_init(&prng, 123); for(i=0; i<SAMPLES; i++){ ideal[i] = 10 + i + n1*i/(1000); samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL); } do{ double par0, par1; better=0; for(par0= bestpar0*0.8; par0<=bestpar0*1.21; par0+=bestpar0*0.05){ for(par1= bestpar1*0.8; par1<=bestpar1*1.21; par1+=bestpar1*0.05){ double error=0; TimeFilter *tf= ff_timefilter_new(1, par0, par1); for(i=0; i<SAMPLES; i++){ double filtered; filtered= ff_timefilter_update(tf, samples[i], 1); error += (filtered - ideal[i]) * (filtered - ideal[i]); } ff_timefilter_destroy(tf); if(error < best_error){ best_error= error; bestpar0= par0; bestpar1= par1; better=1; } } } }while(better); #if 0 double lastfil=9; TimeFilter *tf= ff_timefilter_new(1, bestpar0, bestpar1); for(i=0; i<SAMPLES; i++){ double filtered; filtered= ff_timefilter_update(tf, samples[i], 1); printf("%f %f %f %f\n", i - samples[i] + 10, filtered - samples[i], samples[FFMAX(i, 1)] - samples[FFMAX(i-1, 0)], filtered - lastfil); lastfil= filtered; } ff_timefilter_destroy(tf); #else printf(" [%f %f %9f]", bestpar0, bestpar1, best_error); #endif } printf("\n"); } return 0; } #endif
123linslouis-android-video-cutter
jni/libavformat/timefilter.c
C
asf20
4,818
/* * RTP packetization for H.264 (RFC3984) * Copyright (c) 2008 Luca Abeni * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief H.264 packetization * @author Luca Abeni <lucabe72@email.it> */ #include "avformat.h" #include "avc.h" #include "rtpenc.h" static void nal_send(AVFormatContext *s1, const uint8_t *buf, int size, int last) { RTPMuxContext *s = s1->priv_data; av_log(s1, AV_LOG_DEBUG, "Sending NAL %x of len %d M=%d\n", buf[0] & 0x1F, size, last); if (size <= s->max_payload_size) { ff_rtp_send_data(s1, buf, size, last); } else { uint8_t type = buf[0] & 0x1F; uint8_t nri = buf[0] & 0x60; av_log(s1, AV_LOG_DEBUG, "NAL size %d > %d\n", size, s->max_payload_size); s->buf[0] = 28; /* FU Indicator; Type = 28 ---> FU-A */ s->buf[0] |= nri; s->buf[1] = type; s->buf[1] |= 1 << 7; buf += 1; size -= 1; while (size + 2 > s->max_payload_size) { memcpy(&s->buf[2], buf, s->max_payload_size - 2); ff_rtp_send_data(s1, s->buf, s->max_payload_size, 0); buf += s->max_payload_size - 2; size -= s->max_payload_size - 2; s->buf[1] &= ~(1 << 7); } s->buf[1] |= 1 << 6; memcpy(&s->buf[2], buf, size); ff_rtp_send_data(s1, s->buf, size + 2, last); } } void ff_rtp_send_h264(AVFormatContext *s1, const uint8_t *buf1, int size) { const uint8_t *r; RTPMuxContext *s = s1->priv_data; s->timestamp = s->cur_timestamp; r = ff_avc_find_startcode(buf1, buf1 + size); while (r < buf1 + size) { const uint8_t *r1; while(!*(r++)); r1 = ff_avc_find_startcode(r, buf1 + size); nal_send(s1, r, r1 - r, (r1 == buf1 + size)); r = r1; } }
123linslouis-android-video-cutter
jni/libavformat/rtpenc_h264.c
C
asf20
2,532
/* * FLV muxer * Copyright (c) 2003 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "flv.h" #include "riff.h" #include "avc.h" #undef NDEBUG #include <assert.h> static const AVCodecTag flv_video_codec_ids[] = { {CODEC_ID_FLV1, FLV_CODECID_H263 }, {CODEC_ID_FLASHSV, FLV_CODECID_SCREEN}, {CODEC_ID_FLASHSV2, FLV_CODECID_SCREEN2}, {CODEC_ID_VP6F, FLV_CODECID_VP6 }, {CODEC_ID_VP6, FLV_CODECID_VP6 }, {CODEC_ID_H264, FLV_CODECID_H264 }, {CODEC_ID_NONE, 0} }; static const AVCodecTag flv_audio_codec_ids[] = { {CODEC_ID_MP3, FLV_CODECID_MP3 >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_PCM_U8, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_PCM_S16BE, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_PCM_S16LE, FLV_CODECID_PCM_LE >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_ADPCM_SWF, FLV_CODECID_ADPCM >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_AAC, FLV_CODECID_AAC >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_NELLYMOSER, FLV_CODECID_NELLYMOSER >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_SPEEX, FLV_CODECID_SPEEX >> FLV_AUDIO_CODECID_OFFSET}, {CODEC_ID_NONE, 0} }; typedef struct FLVContext { int reserved; int64_t duration_offset; int64_t filesize_offset; int64_t duration; int delay; ///< first dts delay for AVC } FLVContext; static int get_audio_flags(AVCodecContext *enc){ int flags = (enc->bits_per_coded_sample == 16) ? FLV_SAMPLESSIZE_16BIT : FLV_SAMPLESSIZE_8BIT; if (enc->codec_id == CODEC_ID_AAC) // specs force these parameters return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO; else if (enc->codec_id == CODEC_ID_SPEEX) { if (enc->sample_rate != 16000) { av_log(enc, AV_LOG_ERROR, "flv only supports wideband (16kHz) Speex audio\n"); return -1; } if (enc->channels != 1) { av_log(enc, AV_LOG_ERROR, "flv only supports mono Speex audio\n"); return -1; } if (enc->frame_size / 320 > 8) { av_log(enc, AV_LOG_WARNING, "Warning: Speex stream has more than " "8 frames per packet. Adobe Flash " "Player cannot handle this!\n"); } return FLV_CODECID_SPEEX | FLV_SAMPLERATE_11025HZ | FLV_SAMPLESSIZE_16BIT; } else { switch (enc->sample_rate) { case 44100: flags |= FLV_SAMPLERATE_44100HZ; break; case 22050: flags |= FLV_SAMPLERATE_22050HZ; break; case 11025: flags |= FLV_SAMPLERATE_11025HZ; break; case 8000: //nellymoser only case 5512: //not mp3 if(enc->codec_id != CODEC_ID_MP3){ flags |= FLV_SAMPLERATE_SPECIAL; break; } default: av_log(enc, AV_LOG_ERROR, "flv does not support that sample rate, choose from (44100, 22050, 11025).\n"); return -1; } } if (enc->channels > 1) { flags |= FLV_STEREO; } switch(enc->codec_id){ case CODEC_ID_MP3: flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_PCM_U8: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_8BIT; break; case CODEC_ID_PCM_S16BE: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_PCM_S16LE: flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_ADPCM_SWF: flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_NELLYMOSER: if (enc->sample_rate == 8000) { flags |= FLV_CODECID_NELLYMOSER_8KHZ_MONO | FLV_SAMPLESSIZE_16BIT; } else { flags |= FLV_CODECID_NELLYMOSER | FLV_SAMPLESSIZE_16BIT; } break; case 0: flags |= enc->codec_tag<<4; break; default: av_log(enc, AV_LOG_ERROR, "codec not compatible with flv\n"); return -1; } return flags; } static void put_amf_string(ByteIOContext *pb, const char *str) { size_t len = strlen(str); put_be16(pb, len); put_buffer(pb, str, len); } static void put_amf_double(ByteIOContext *pb, double d) { put_byte(pb, AMF_DATA_TYPE_NUMBER); put_be64(pb, av_dbl2int(d)); } static void put_amf_bool(ByteIOContext *pb, int b) { put_byte(pb, AMF_DATA_TYPE_BOOL); put_byte(pb, !!b); } static int flv_write_header(AVFormatContext *s) { ByteIOContext *pb = s->pb; FLVContext *flv = s->priv_data; AVCodecContext *audio_enc = NULL, *video_enc = NULL; int i; double framerate = 0.0; int metadata_size_pos, data_size; for(i=0; i<s->nb_streams; i++){ AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) { framerate = av_q2d(s->streams[i]->r_frame_rate); } else { framerate = 1/av_q2d(s->streams[i]->codec->time_base); } video_enc = enc; if(enc->codec_tag == 0) { av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n"); return -1; } } else { audio_enc = enc; if(get_audio_flags(enc)<0) return -1; } av_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */ } put_tag(pb,"FLV"); put_byte(pb,1); put_byte(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc + FLV_HEADER_FLAG_HASVIDEO * !!video_enc); put_be32(pb,9); put_be32(pb,0); for(i=0; i<s->nb_streams; i++){ if(s->streams[i]->codec->codec_tag == 5){ put_byte(pb,8); // message type put_be24(pb,0); // include flags put_be24(pb,0); // time stamp put_be32(pb,0); // reserved put_be32(pb,11); // size flv->reserved=5; } } /* write meta_tag */ put_byte(pb, 18); // tag type META metadata_size_pos= url_ftell(pb); put_be24(pb, 0); // size of data part (sum of all parts below) put_be24(pb, 0); // time stamp put_be32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ put_byte(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY); put_be32(pb, 5*!!video_enc + 5*!!audio_enc + 2); // +2 for duration and file size put_amf_string(pb, "duration"); flv->duration_offset= url_ftell(pb); put_amf_double(pb, s->duration / AV_TIME_BASE); // fill in the guessed duration, it'll be corrected later if incorrect if(video_enc){ put_amf_string(pb, "width"); put_amf_double(pb, video_enc->width); put_amf_string(pb, "height"); put_amf_double(pb, video_enc->height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, video_enc->bit_rate / 1024.0); put_amf_string(pb, "framerate"); put_amf_double(pb, framerate); put_amf_string(pb, "videocodecid"); put_amf_double(pb, video_enc->codec_tag); } if(audio_enc){ put_amf_string(pb, "audiodatarate"); put_amf_double(pb, audio_enc->bit_rate / 1024.0); put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, audio_enc->sample_rate); put_amf_string(pb, "audiosamplesize"); put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_U8 ? 8 : 16); put_amf_string(pb, "stereo"); put_amf_bool(pb, audio_enc->channels == 2); put_amf_string(pb, "audiocodecid"); put_amf_double(pb, audio_enc->codec_tag); } put_amf_string(pb, "filesize"); flv->filesize_offset= url_ftell(pb); put_amf_double(pb, 0); // delayed write put_amf_string(pb, ""); put_byte(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size= url_ftell(pb) - metadata_size_pos - 10; url_fseek(pb, metadata_size_pos, SEEK_SET); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) { int64_t pos; put_byte(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ? FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO); put_be24(pb, 0); // size patched later put_be24(pb, 0); // ts put_byte(pb, 0); // ts ext put_be24(pb, 0); // streamid pos = url_ftell(pb); if (enc->codec_id == CODEC_ID_AAC) { put_byte(pb, get_audio_flags(enc)); put_byte(pb, 0); // AAC sequence header put_buffer(pb, enc->extradata, enc->extradata_size); } else { put_byte(pb, enc->codec_tag | FLV_FRAME_KEY); // flags put_byte(pb, 0); // AVC sequence header put_be24(pb, 0); // composition time ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size); } data_size = url_ftell(pb) - pos; url_fseek(pb, -data_size - 10, SEEK_CUR); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); // previous tag size } } return 0; } static int flv_write_trailer(AVFormatContext *s) { int64_t file_size; ByteIOContext *pb = s->pb; FLVContext *flv = s->priv_data; file_size = url_ftell(pb); /* update informations */ url_fseek(pb, flv->duration_offset, SEEK_SET); put_amf_double(pb, flv->duration / (double)1000); url_fseek(pb, flv->filesize_offset, SEEK_SET); put_amf_double(pb, file_size); url_fseek(pb, file_size, SEEK_SET); return 0; } static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; unsigned ts; int size= pkt->size; uint8_t *data= NULL; int flags, flags_size; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if(enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F || enc->codec_id == CODEC_ID_AAC) flags_size= 2; else if(enc->codec_id == CODEC_ID_H264) flags_size= 5; else flags_size= 1; if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { put_byte(pb, FLV_TAG_TYPE_VIDEO); flags = enc->codec_tag; if(flags == 0) { av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n",enc->codec_id); return -1; } flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; } else { assert(enc->codec_type == AVMEDIA_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, FLV_TAG_TYPE_AUDIO); } if (enc->codec_id == CODEC_ID_H264) { /* check if extradata looks like mp4 formated */ if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) { if (ff_avc_parse_nal_units_buf(pkt->data, &data, &size) < 0) return -1; } if (!flv->delay && pkt->dts < 0) flv->delay = -pkt->dts; } ts = pkt->dts + flv->delay; // add delay to force positive dts put_be24(pb,size + flags_size); put_be24(pb,ts); put_byte(pb,(ts >> 24) & 0x7F); // timestamps are 32bits _signed_ put_be24(pb,flv->reserved); put_byte(pb,flags); if (enc->codec_id == CODEC_ID_VP6) put_byte(pb,0); if (enc->codec_id == CODEC_ID_VP6F) put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0); else if (enc->codec_id == CODEC_ID_AAC) put_byte(pb,1); // AAC raw else if (enc->codec_id == CODEC_ID_H264) { put_byte(pb,1); // AVC NALU put_be24(pb,pkt->pts - pkt->dts); } put_buffer(pb, data ? data : pkt->data, size); put_be32(pb,size+flags_size+11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); put_flush_packet(pb); av_free(data); return 0; } AVOutputFormat flv_muxer = { "flv", NULL_IF_CONFIG_SMALL("FLV format"), "video/x-flv", "flv", sizeof(FLVContext), #if CONFIG_LIBMP3LAME CODEC_ID_MP3, #else // CONFIG_LIBMP3LAME CODEC_ID_ADPCM_SWF, #endif // CONFIG_LIBMP3LAME CODEC_ID_FLV1, flv_write_header, flv_write_packet, flv_write_trailer, .codec_tag= (const AVCodecTag* const []){flv_video_codec_ids, flv_audio_codec_ids, 0}, .flags= AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS, };
123linslouis-android-video-cutter
jni/libavformat/flvenc.c
C
asf20
13,895
/* * Xiph RTP Protocols * Based off RFC 5215 (Vorbis RTP) and the Theora RTP draft. * Copyright (c) 2009 Colin McQuillian * Copyright (c) 2010 Josh Allmann * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_RTPDEC_XIPH_H #define AVFORMAT_RTPDEC_XIPH_H #include "libavcodec/avcodec.h" #include "rtpdec.h" /** * Theora RTP callbacks. */ extern RTPDynamicProtocolHandler ff_theora_dynamic_handler; /** * Vorbis RTP callbacks. */ extern RTPDynamicProtocolHandler ff_vorbis_dynamic_handler; #endif /* AVFORMAT_RTPDEC_XIPH_H */
123linslouis-android-video-cutter
jni/libavformat/rtpdec_xiph.h
C
asf20
1,264
/* * Creative Voice File common data. * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "voc.h" const unsigned char ff_voc_magic[21] = "Creative Voice File\x1A"; const AVCodecTag ff_voc_codec_tags[] = { {CODEC_ID_PCM_U8, 0x00}, {CODEC_ID_ADPCM_SBPRO_4, 0x01}, {CODEC_ID_ADPCM_SBPRO_3, 0x02}, {CODEC_ID_ADPCM_SBPRO_2, 0x03}, {CODEC_ID_PCM_S16LE, 0x04}, {CODEC_ID_PCM_ALAW, 0x06}, {CODEC_ID_PCM_MULAW, 0x07}, {CODEC_ID_ADPCM_CT, 0x0200}, {CODEC_ID_NONE, 0}, };
123linslouis-android-video-cutter
jni/libavformat/voc.c
C
asf20
1,314
/* * Realmedia RTSP (RDT) definitions * Copyright (c) 2007 Ronald S. Bultje <rbultje@ronald.bitfreak.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_RDT_H #define AVFORMAT_RDT_H #include <stdint.h> #include "avformat.h" #include "rtpdec.h" typedef struct RDTDemuxContext RDTDemuxContext; /** * Allocate and init the RDT parsing context. * @param ic the containing RTSP demuxer context * @param first_stream_of_set_idx index to the first AVStream in the RTSP * demuxer context's ic->streams array that is part of this * particular stream's set of streams (with identical content) * @param priv_data private data of the payload data handler context * @param handler pointer to the parse_packet() payload parsing function * @return a newly allocated RDTDemuxContext. Free with ff_rdt_parse_close(). */ RDTDemuxContext *ff_rdt_parse_open(AVFormatContext *ic, int first_stream_of_set_idx, void *priv_data, RTPDynamicProtocolHandler *handler); void ff_rdt_parse_close(RDTDemuxContext *s); /** * Calculate the response (RealChallenge2 in the RTSP header) to the * challenge (RealChallenge1 in the RTSP header from the Real/Helix * server), which is used as some sort of client validation. * * @param response pointer to response buffer, it should be at least 41 bytes * (40 data + 1 zero) bytes long. * @param chksum pointer to buffer containing a checksum of the response, * it should be at least 9 (8 data + 1 zero) bytes long. * @param challenge pointer to the RealChallenge1 value provided by the * server. */ void ff_rdt_calc_response_and_checksum(char response[41], char chksum[9], const char *challenge); /** * Register RDT-related dynamic payload handlers with our cache. */ void av_register_rdt_dynamic_payload_handlers(void); /** * Add subscription information to Subscribe parameter string. * * @param cmd string to write the subscription information into. * @param size size of cmd. * @param stream_nr stream number. * @param rule_nr rule number to conform to. */ void ff_rdt_subscribe_rule(char *cmd, int size, int stream_nr, int rule_nr); /** * Parse RDT-style packet header. * * @param buf input buffer * @param len length of input buffer * @param set_id will be set to the set ID this packet belongs to * @param seq_no will be set to the sequence number of the packet * @param stream_id will be set to the stream ID this packet belongs to * @param is_keyframe will be whether this packet belongs to a keyframe * @param timestamp will be set to the timestamp of the packet * @return the amount of bytes consumed, or <0 on error */ int ff_rdt_parse_header(const uint8_t *buf, int len, int *set_id, int *seq_no, int *stream_id, int *is_keyframe, uint32_t *timestamp); /** * Parse RDT-style packet data (header + media data). * Usage similar to rtp_parse_packet(). */ int ff_rdt_parse_packet(RDTDemuxContext *s, AVPacket *pkt, const uint8_t *buf, int len); /** * Parse a server-related SDP line. * * @param s the RTSP AVFormatContext * @param stream_index the index of the first stream in the set represented * by the SDP m= line (in s->streams) * @param buf the SDP line */ void ff_real_parse_sdp_a_line(AVFormatContext *s, int stream_index, const char *buf); #endif /* AVFORMAT_RDT_H */
123linslouis-android-video-cutter
jni/libavformat/rdt.h
C
asf20
4,354
/* * YUV4MPEG format * Copyright (c) 2001, 2002, 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #define Y4M_MAGIC "YUV4MPEG2" #define Y4M_FRAME_MAGIC "FRAME" #define Y4M_LINE_MAX 256 struct frame_attributes { int interlaced_frame; int top_field_first; }; #if CONFIG_YUV4MPEGPIPE_MUXER static int yuv4_generate_header(AVFormatContext *s, char* buf) { AVStream *st; int width, height; int raten, rated, aspectn, aspectd, n; char inter; const char *colorspace = ""; st = s->streams[0]; width = st->codec->width; height = st->codec->height; av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1); aspectn = st->sample_aspect_ratio.num; aspectd = st->sample_aspect_ratio.den; if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown inter = 'p'; /* progressive is the default */ if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) { inter = st->codec->coded_frame->top_field_first ? 't' : 'b'; } switch(st->codec->pix_fmt) { case PIX_FMT_GRAY8: colorspace = " Cmono"; break; case PIX_FMT_YUV411P: colorspace = " C411 XYSCSS=411"; break; case PIX_FMT_YUV420P: colorspace = (st->codec->chroma_sample_location == AVCHROMA_LOC_TOPLEFT)?" C420paldv XYSCSS=420PALDV": (st->codec->chroma_sample_location == AVCHROMA_LOC_LEFT) ?" C420mpeg2 XYSCSS=420MPEG2": " C420jpeg XYSCSS=420JPEG"; break; case PIX_FMT_YUV422P: colorspace = " C422 XYSCSS=422"; break; case PIX_FMT_YUV444P: colorspace = " C444 XYSCSS=444"; break; } /* construct stream header, if this is the first frame */ n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n", Y4M_MAGIC, width, height, raten, rated, inter, aspectn, aspectd, colorspace); return n; } static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; ByteIOContext *pb = s->pb; AVPicture *picture; int* first_pkt = s->priv_data; int width, height, h_chroma_shift, v_chroma_shift; int i, m; char buf2[Y4M_LINE_MAX+1]; char buf1[20]; uint8_t *ptr, *ptr1, *ptr2; picture = (AVPicture *)pkt->data; /* for the first packet we have to output the header as well */ if (*first_pkt) { *first_pkt = 0; if (yuv4_generate_header(s, buf2) < 0) { av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n"); return AVERROR(EIO); } else { put_buffer(pb, buf2, strlen(buf2)); } } /* construct frame header */ m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC); put_buffer(pb, buf1, strlen(buf1)); width = st->codec->width; height = st->codec->height; ptr = picture->data[0]; for(i=0;i<height;i++) { put_buffer(pb, ptr, width); ptr += picture->linesize[0]; } if (st->codec->pix_fmt != PIX_FMT_GRAY8){ // Adjust for smaller Cb and Cr planes avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift); width >>= h_chroma_shift; height >>= v_chroma_shift; ptr1 = picture->data[1]; ptr2 = picture->data[2]; for(i=0;i<height;i++) { /* Cb */ put_buffer(pb, ptr1, width); ptr1 += picture->linesize[1]; } for(i=0;i<height;i++) { /* Cr */ put_buffer(pb, ptr2, width); ptr2 += picture->linesize[2]; } } put_flush_packet(pb); return 0; } static int yuv4_write_header(AVFormatContext *s) { int* first_pkt = s->priv_data; if (s->nb_streams != 1) return AVERROR(EIO); if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) { av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV stream, some mjpegtools might not work.\n"); } else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) && (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) && (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) && (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) { av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, yuv422p, yuv420p, yuv411p and gray pixel formats. Use -pix_fmt to select one.\n"); return AVERROR(EIO); } *first_pkt = 1; return 0; } AVOutputFormat yuv4mpegpipe_muxer = { "yuv4mpegpipe", NULL_IF_CONFIG_SMALL("YUV4MPEG pipe format"), "", "y4m", sizeof(int), CODEC_ID_NONE, CODEC_ID_RAWVIDEO, yuv4_write_header, yuv4_write_packet, .flags = AVFMT_RAWPICTURE, }; #endif /* Header size increased to allow room for optional flags */ #define MAX_YUV4_HEADER 80 #define MAX_FRAME_HEADER 80 static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap) { char header[MAX_YUV4_HEADER+10]; // Include headroom for the longest option char *tokstart,*tokend,*header_end; int i; ByteIOContext *pb = s->pb; int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0; enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE; enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; AVStream *st; struct frame_attributes *s1 = s->priv_data; for (i=0; i<MAX_YUV4_HEADER; i++) { header[i] = get_byte(pb); if (header[i] == '\n') { header[i+1] = 0x20; // Add a space after last option. Makes parsing "444" vs "444alpha" easier. header[i+2] = 0; break; } } if (i == MAX_YUV4_HEADER) return -1; if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) return -1; s1->interlaced_frame = 0; s1->top_field_first = 0; header_end = &header[i+1]; // Include space for(tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) { if (*tokstart==0x20) continue; switch (*tokstart++) { case 'W': // Width. Required. width = strtol(tokstart, &tokend, 10); tokstart=tokend; break; case 'H': // Height. Required. height = strtol(tokstart, &tokend, 10); tokstart=tokend; break; case 'C': // Color space if (strncmp("420jpeg",tokstart,7)==0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_CENTER; } else if (strncmp("420mpeg2",tokstart,8)==0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_LEFT; } else if (strncmp("420paldv", tokstart, 8)==0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_TOPLEFT; } else if (strncmp("411", tokstart, 3)==0) pix_fmt = PIX_FMT_YUV411P; else if (strncmp("422", tokstart, 3)==0) pix_fmt = PIX_FMT_YUV422P; else if (strncmp("444alpha", tokstart, 8)==0) { av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 YUV4MPEG stream.\n"); return -1; } else if (strncmp("444", tokstart, 3)==0) pix_fmt = PIX_FMT_YUV444P; else if (strncmp("mono",tokstart, 4)==0) { pix_fmt = PIX_FMT_GRAY8; } else { av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown pixel format.\n"); return -1; } while(tokstart<header_end&&*tokstart!=0x20) tokstart++; break; case 'I': // Interlace type switch (*tokstart++){ case '?': break; case 'p': s1->interlaced_frame=0; break; case 't': s1->interlaced_frame=1; s1->top_field_first=1; break; case 'b': s1->interlaced_frame=1; s1->top_field_first=0; break; case 'm': av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed interlaced and non-interlaced frames.\n"); return -1; default: av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); return -1; } break; case 'F': // Frame rate sscanf(tokstart,"%d:%d",&raten,&rated); // 0:0 if unknown while(tokstart<header_end&&*tokstart!=0x20) tokstart++; break; case 'A': // Pixel aspect sscanf(tokstart,"%d:%d",&aspectn,&aspectd); // 0:0 if unknown while(tokstart<header_end&&*tokstart!=0x20) tokstart++; break; case 'X': // Vendor extensions if (strncmp("YSCSS=",tokstart,6)==0) { // Older nonstandard pixel format representation tokstart+=6; if (strncmp("420JPEG",tokstart,7)==0) alt_pix_fmt=PIX_FMT_YUV420P; else if (strncmp("420MPEG2",tokstart,8)==0) alt_pix_fmt=PIX_FMT_YUV420P; else if (strncmp("420PALDV",tokstart,8)==0) alt_pix_fmt=PIX_FMT_YUV420P; else if (strncmp("411",tokstart,3)==0) alt_pix_fmt=PIX_FMT_YUV411P; else if (strncmp("422",tokstart,3)==0) alt_pix_fmt=PIX_FMT_YUV422P; else if (strncmp("444",tokstart,3)==0) alt_pix_fmt=PIX_FMT_YUV444P; } while(tokstart<header_end&&*tokstart!=0x20) tokstart++; break; } } if ((width == -1) || (height == -1)) { av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); return -1; } if (pix_fmt == PIX_FMT_NONE) { if (alt_pix_fmt == PIX_FMT_NONE) pix_fmt = PIX_FMT_YUV420P; else pix_fmt = alt_pix_fmt; } if (raten == 0 && rated == 0) { // Frame rate unknown raten = 25; rated = 1; } if (aspectn == 0 && aspectd == 0) { // Pixel aspect unknown aspectd = 1; } st = av_new_stream(s, 0); if(!st) return AVERROR(ENOMEM); st->codec->width = width; st->codec->height = height; av_reduce(&raten, &rated, raten, rated, (1UL<<31)-1); av_set_pts_info(st, 64, rated, raten); st->codec->pix_fmt = pix_fmt; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->sample_aspect_ratio= (AVRational){aspectn, aspectd}; st->codec->chroma_sample_location = chroma_sample_location; return 0; } static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt) { int i; char header[MAX_FRAME_HEADER+1]; int packet_size, width, height; AVStream *st = s->streams[0]; struct frame_attributes *s1 = s->priv_data; for (i=0; i<MAX_FRAME_HEADER; i++) { header[i] = get_byte(s->pb); if (header[i] == '\n') { header[i+1] = 0; break; } } if (i == MAX_FRAME_HEADER) return -1; if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1; width = st->codec->width; height = st->codec->height; packet_size = avpicture_get_size(st->codec->pix_fmt, width, height); if (packet_size < 0) return -1; if (av_get_packet(s->pb, pkt, packet_size) != packet_size) return AVERROR(EIO); if (s->streams[0]->codec->coded_frame) { s->streams[0]->codec->coded_frame->interlaced_frame = s1->interlaced_frame; s->streams[0]->codec->coded_frame->top_field_first = s1->top_field_first; } pkt->stream_index = 0; return 0; } static int yuv4_probe(AVProbeData *pd) { /* check file header */ if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC)-1)==0) return AVPROBE_SCORE_MAX; else return 0; } #if CONFIG_YUV4MPEGPIPE_DEMUXER AVInputFormat yuv4mpegpipe_demuxer = { "yuv4mpegpipe", NULL_IF_CONFIG_SMALL("YUV4MPEG pipe format"), sizeof(struct frame_attributes), yuv4_probe, yuv4_read_header, yuv4_read_packet, .extensions = "y4m" }; #endif
123linslouis-android-video-cutter
jni/libavformat/yuv4mpeg.c
C
asf20
13,147
/* * RIFF codec tags * Copyright (c) 2000 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/avcodec.h" #include "avformat.h" #include "riff.h" #include "libavcodec/bytestream.h" /* Note: when encoding, the first matching tag is used, so order is important if multiple tags possible for a given codec. */ const AVCodecTag ff_codec_bmp_tags[] = { { CODEC_ID_H264, MKTAG('H', '2', '6', '4') }, { CODEC_ID_H264, MKTAG('h', '2', '6', '4') }, { CODEC_ID_H264, MKTAG('X', '2', '6', '4') }, { CODEC_ID_H264, MKTAG('x', '2', '6', '4') }, { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, { CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') }, { CODEC_ID_H263, MKTAG('H', '2', '6', '3') }, { CODEC_ID_H263, MKTAG('X', '2', '6', '3') }, { CODEC_ID_H263, MKTAG('T', '2', '6', '3') }, { CODEC_ID_H263, MKTAG('L', '2', '6', '3') }, { CODEC_ID_H263, MKTAG('V', 'X', '1', 'K') }, { CODEC_ID_H263, MKTAG('Z', 'y', 'G', 'o') }, { CODEC_ID_H263P, MKTAG('H', '2', '6', '3') }, { CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* intel h263 */ { CODEC_ID_H261, MKTAG('H', '2', '6', '1') }, { CODEC_ID_H263P, MKTAG('U', '2', '6', '3') }, { CODEC_ID_H263P, MKTAG('v', 'i', 'v', '1') }, { CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4') }, { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, { CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0') }, { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') }, { CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') }, { CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') }, { CODEC_ID_MPEG4, MKTAG( 4 , 0 , 0 , 0 ) }, /* some broken avi use this */ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', '1') }, { CODEC_ID_MPEG4, MKTAG('B', 'L', 'Z', '0') }, { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') }, { CODEC_ID_MPEG4, MKTAG('U', 'M', 'P', '4') }, { CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') }, { CODEC_ID_MPEG4, MKTAG('S', 'E', 'D', 'G') }, { CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') }, { CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, { CODEC_ID_MPEG4, MKTAG('F', 'F', 'D', 'S') }, { CODEC_ID_MPEG4, MKTAG('F', 'V', 'F', 'W') }, { CODEC_ID_MPEG4, MKTAG('D', 'C', 'O', 'D') }, { CODEC_ID_MPEG4, MKTAG('M', 'V', 'X', 'M') }, { CODEC_ID_MPEG4, MKTAG('P', 'M', '4', 'V') }, { CODEC_ID_MPEG4, MKTAG('S', 'M', 'P', '4') }, { CODEC_ID_MPEG4, MKTAG('D', 'X', 'G', 'M') }, { CODEC_ID_MPEG4, MKTAG('V', 'I', 'D', 'M') }, { CODEC_ID_MPEG4, MKTAG('M', '4', 'T', '3') }, { CODEC_ID_MPEG4, MKTAG('G', 'E', 'O', 'X') }, { CODEC_ID_MPEG4, MKTAG('H', 'D', 'X', '4') }, /* flipped video */ { CODEC_ID_MPEG4, MKTAG('D', 'M', 'K', '2') }, { CODEC_ID_MPEG4, MKTAG('D', 'I', 'G', 'I') }, { CODEC_ID_MPEG4, MKTAG('I', 'N', 'M', 'C') }, { CODEC_ID_MPEG4, MKTAG('E', 'P', 'H', 'V') }, /* Ephv MPEG-4 */ { CODEC_ID_MPEG4, MKTAG('E', 'M', '4', 'A') }, { CODEC_ID_MPEG4, MKTAG('M', '4', 'C', 'C') }, /* Divio MPEG-4 */ { CODEC_ID_MPEG4, MKTAG('S', 'N', '4', '0') }, { CODEC_ID_MPEG4, MKTAG('V', 'S', 'P', 'X') }, { CODEC_ID_MPEG4, MKTAG('U', 'L', 'D', 'X') }, { CODEC_ID_MPEG4, MKTAG('G', 'E', 'O', 'V') }, { CODEC_ID_MPEG4, MKTAG('S', 'I', 'P', 'P') }, /* Samsung SHR-6040 */ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3') }, /* default signature when using MSMPEG4 */ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') }, { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') }, { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') }, { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') }, { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') }, { CODEC_ID_MSMPEG4V3, MKTAG('D', 'V', 'X', '3') }, { CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') }, { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') }, { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') }, { CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') }, { CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') }, { CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') }, { CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', '4', '1') }, { CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') }, { CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '1') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', '0') }, { CODEC_ID_DVVIDEO, MKTAG('c', 'd', 'v', 'c') }, /* Canopus DV */ { CODEC_ID_DVVIDEO, MKTAG('C', 'D', 'V', 'H') }, /* Canopus DV */ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 's') }, { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '1') }, { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') }, { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') }, { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') }, { CODEC_ID_MPEG2VIDEO, MKTAG('M', 'P', 'E', 'G') }, { CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') }, { CODEC_ID_MPEG2VIDEO, MKTAG('P', 'I', 'M', '2') }, { CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') }, { CODEC_ID_MPEG1VIDEO, MKTAG( 1 , 0 , 0 , 16) }, { CODEC_ID_MPEG2VIDEO, MKTAG( 2 , 0 , 0 , 16) }, { CODEC_ID_MPEG4, MKTAG( 4 , 0 , 0 , 16) }, { CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') }, { CODEC_ID_MPEG2VIDEO, MKTAG('M', 'M', 'E', 'S') }, { CODEC_ID_MPEG2VIDEO, MKTAG('L', 'M', 'P', '2') }, /* Lead MPEG2 in avi */ { CODEC_ID_MPEG2VIDEO, MKTAG('s', 'l', 'i', 'f') }, { CODEC_ID_MPEG2VIDEO, MKTAG('E', 'M', '2', 'V') }, { CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') }, { CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') }, { CODEC_ID_MJPEG, MKTAG('d', 'm', 'b', '1') }, { CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, { CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') }, { CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') }, /* Pegasus lossless JPEG */ { CODEC_ID_JPEGLS, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - encoder */ { CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - decoder */ { CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, { CODEC_ID_MJPEG, MKTAG('I', 'J', 'P', 'G') }, { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, { CODEC_ID_MJPEG, MKTAG('A', 'C', 'D', 'V') }, { CODEC_ID_MJPEG, MKTAG('Q', 'I', 'V', 'G') }, { CODEC_ID_MJPEG, MKTAG('S', 'L', 'M', 'J') }, /* SL M-JPEG */ { CODEC_ID_MJPEG, MKTAG('C', 'J', 'P', 'G') }, /* Creative Webcam JPEG */ { CODEC_ID_MJPEG, MKTAG('I', 'J', 'L', 'V') }, /* Intel JPEG Library Video Codec */ { CODEC_ID_MJPEG, MKTAG('M', 'V', 'J', 'P') }, /* Midvid JPEG Video Codec */ { CODEC_ID_MJPEG, MKTAG('A', 'V', 'I', '1') }, { CODEC_ID_MJPEG, MKTAG('A', 'V', 'I', '2') }, { CODEC_ID_MJPEG, MKTAG('M', 'T', 'S', 'J') }, { CODEC_ID_MJPEG, MKTAG('Z', 'J', 'P', 'G') }, /* Paradigm Matrix M-JPEG Codec */ { CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') }, { CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') }, { CODEC_ID_CYUV, MKTAG('C', 'Y', 'U', 'V') }, { CODEC_ID_RAWVIDEO, MKTAG( 0 , 0 , 0 , 0 ) }, { CODEC_ID_RAWVIDEO, MKTAG( 3 , 0 , 0 , 0 ) }, { CODEC_ID_RAWVIDEO, MKTAG('I', '4', '2', '0') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'Y', '2') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', '2') }, { CODEC_ID_RAWVIDEO, MKTAG('V', '4', '2', '2') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'N', 'V') }, { CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'N', 'V') }, { CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'N', 'Y') }, { CODEC_ID_RAWVIDEO, MKTAG('u', 'y', 'v', '1') }, { CODEC_ID_RAWVIDEO, MKTAG('2', 'V', 'u', '1') }, { CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, { CODEC_ID_RAWVIDEO, MKTAG('P', '4', '2', '2') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') }, { CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') }, { CODEC_ID_RAWVIDEO, MKTAG('V', 'Y', 'U', 'Y') }, { CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') }, { CODEC_ID_RAWVIDEO, MKTAG('H', 'D', 'Y', 'C') }, { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', 'U', '9') }, { CODEC_ID_RAWVIDEO, MKTAG('V', 'D', 'T', 'Z') }, /* SoftLab-NSK VideoTizer */ { CODEC_ID_FRWU, MKTAG('F', 'R', 'W', 'U') }, { CODEC_ID_R210, MKTAG('r', '2', '1', '0') }, { CODEC_ID_V210, MKTAG('v', '2', '1', '0') }, { CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '1') }, { CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '2') }, { CODEC_ID_INDEO4, MKTAG('I', 'V', '4', '1') }, { CODEC_ID_INDEO5, MKTAG('I', 'V', '5', '0') }, { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, { CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') }, { CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') }, { CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') }, { CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') }, { CODEC_ID_VP6F, MKTAG('F', 'L', 'V', '4') }, { CODEC_ID_VP8, MKTAG('V', 'P', '8', '0') }, { CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') }, { CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') }, { CODEC_ID_VCR1, MKTAG('V', 'C', 'R', '1') }, { CODEC_ID_FFV1, MKTAG('F', 'F', 'V', '1') }, { CODEC_ID_XAN_WC4, MKTAG('X', 'x', 'a', 'n') }, { CODEC_ID_MIMIC, MKTAG('L', 'M', '2', '0') }, { CODEC_ID_MSRLE, MKTAG('m', 'r', 'l', 'e') }, { CODEC_ID_MSRLE, MKTAG( 1 , 0 , 0 , 0 ) }, { CODEC_ID_MSRLE, MKTAG( 2 , 0 , 0 , 0 ) }, { CODEC_ID_MSVIDEO1, MKTAG('M', 'S', 'V', 'C') }, { CODEC_ID_MSVIDEO1, MKTAG('m', 's', 'v', 'c') }, { CODEC_ID_MSVIDEO1, MKTAG('C', 'R', 'A', 'M') }, { CODEC_ID_MSVIDEO1, MKTAG('c', 'r', 'a', 'm') }, { CODEC_ID_MSVIDEO1, MKTAG('W', 'H', 'A', 'M') }, { CODEC_ID_MSVIDEO1, MKTAG('w', 'h', 'a', 'm') }, { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, { CODEC_ID_TRUEMOTION1, MKTAG('D', 'U', 'C', 'K') }, { CODEC_ID_TRUEMOTION1, MKTAG('P', 'V', 'E', 'Z') }, { CODEC_ID_MSZH, MKTAG('M', 'S', 'Z', 'H') }, { CODEC_ID_ZLIB, MKTAG('Z', 'L', 'I', 'B') }, { CODEC_ID_SNOW, MKTAG('S', 'N', 'O', 'W') }, { CODEC_ID_4XM, MKTAG('4', 'X', 'M', 'V') }, { CODEC_ID_FLV1, MKTAG('F', 'L', 'V', '1') }, { CODEC_ID_FLASHSV, MKTAG('F', 'S', 'V', '1') }, { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, { CODEC_ID_TSCC, MKTAG('t', 's', 'c', 'c') }, { CODEC_ID_ULTI, MKTAG('U', 'L', 'T', 'I') }, { CODEC_ID_VIXL, MKTAG('V', 'I', 'X', 'L') }, { CODEC_ID_QPEG, MKTAG('Q', 'P', 'E', 'G') }, { CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') }, { CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') }, { CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') }, { CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') }, { CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') }, { CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') }, { CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') }, { CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') }, { CODEC_ID_INDEO2, MKTAG('R', 'T', '2', '1') }, { CODEC_ID_FRAPS, MKTAG('F', 'P', 'S', '1') }, { CODEC_ID_THEORA, MKTAG('t', 'h', 'e', 'o') }, { CODEC_ID_TRUEMOTION2, MKTAG('T', 'M', '2', '0') }, { CODEC_ID_CSCD, MKTAG('C', 'S', 'C', 'D') }, { CODEC_ID_ZMBV, MKTAG('Z', 'M', 'B', 'V') }, { CODEC_ID_KMVC, MKTAG('K', 'M', 'V', 'C') }, { CODEC_ID_CAVS, MKTAG('C', 'A', 'V', 'S') }, { CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') }, { CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') }, { CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, { CODEC_ID_PNG, MKTAG('M', 'P', 'N', 'G') }, { CODEC_ID_PNG, MKTAG('P', 'N', 'G', '1') }, { CODEC_ID_CLJR, MKTAG('c', 'l', 'j', 'r') }, { CODEC_ID_DIRAC, MKTAG('d', 'r', 'a', 'c') }, { CODEC_ID_RPZA, MKTAG('a', 'z', 'p', 'r') }, { CODEC_ID_RPZA, MKTAG('R', 'P', 'Z', 'A') }, { CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, { CODEC_ID_SP5X, MKTAG('S', 'P', '5', '4') }, { CODEC_ID_AURA, MKTAG('A', 'U', 'R', 'A') }, { CODEC_ID_AURA2, MKTAG('A', 'U', 'R', '2') }, { CODEC_ID_DPX, MKTAG('d', 'p', 'x', ' ') }, { CODEC_ID_KGV1, MKTAG('K', 'G', 'V', '1') }, { CODEC_ID_NONE, 0 } }; const AVCodecTag ff_codec_wav_tags[] = { { CODEC_ID_PCM_S16LE, 0x0001 }, { CODEC_ID_PCM_U8, 0x0001 }, /* must come after s16le in this list */ { CODEC_ID_PCM_S24LE, 0x0001 }, { CODEC_ID_PCM_S32LE, 0x0001 }, { CODEC_ID_ADPCM_MS, 0x0002 }, { CODEC_ID_PCM_F32LE, 0x0003 }, { CODEC_ID_PCM_F64LE, 0x0003 }, /* must come after f32le in this list */ { CODEC_ID_PCM_ALAW, 0x0006 }, { CODEC_ID_PCM_MULAW, 0x0007 }, { CODEC_ID_WMAVOICE, 0x000A }, { CODEC_ID_ADPCM_IMA_WAV, 0x0011 }, { CODEC_ID_PCM_ZORK, 0x0011 }, /* must come after adpcm_ima_wav in this list */ { CODEC_ID_ADPCM_YAMAHA, 0x0020 }, { CODEC_ID_TRUESPEECH, 0x0022 }, { CODEC_ID_GSM_MS, 0x0031 }, { CODEC_ID_ADPCM_G726, 0x0045 }, { CODEC_ID_MP2, 0x0050 }, { CODEC_ID_MP3, 0x0055 }, { CODEC_ID_AMR_NB, 0x0057 }, { CODEC_ID_AMR_WB, 0x0058 }, { CODEC_ID_ADPCM_IMA_DK4, 0x0061 }, /* rogue format number */ { CODEC_ID_ADPCM_IMA_DK3, 0x0062 }, /* rogue format number */ { CODEC_ID_ADPCM_IMA_WAV, 0x0069 }, { CODEC_ID_VOXWARE, 0x0075 }, { CODEC_ID_AAC, 0x00ff }, { CODEC_ID_SIPR, 0x0130 }, { CODEC_ID_WMAV1, 0x0160 }, { CODEC_ID_WMAV2, 0x0161 }, { CODEC_ID_WMAPRO, 0x0162 }, { CODEC_ID_WMALOSSLESS, 0x0163 }, { CODEC_ID_ADPCM_CT, 0x0200 }, { CODEC_ID_ATRAC3, 0x0270 }, { CODEC_ID_IMC, 0x0401 }, { CODEC_ID_GSM_MS, 0x1500 }, { CODEC_ID_TRUESPEECH, 0x1501 }, { CODEC_ID_AC3, 0x2000 }, { CODEC_ID_DTS, 0x2001 }, { CODEC_ID_SONIC, 0x2048 }, { CODEC_ID_SONIC_LS, 0x2048 }, { CODEC_ID_PCM_MULAW, 0x6c75 }, { CODEC_ID_AAC, 0x706d }, { CODEC_ID_AAC, 0x4143 }, { CODEC_ID_FLAC, 0xF1AC }, { CODEC_ID_ADPCM_SWF, ('S'<<8)+'F' }, { CODEC_ID_VORBIS, ('V'<<8)+'o' }, //HACK/FIXME, does vorbis in WAV/AVI have an (in)official id? /* FIXME: All of the IDs below are not 16 bit and thus illegal. */ // for NuppelVideo (nuv.c) { CODEC_ID_PCM_S16LE, MKTAG('R', 'A', 'W', 'A') }, { CODEC_ID_MP3, MKTAG('L', 'A', 'M', 'E') }, { CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') }, { CODEC_ID_NONE, 0 }, }; #if CONFIG_MUXERS int64_t ff_start_tag(ByteIOContext *pb, const char *tag) { put_tag(pb, tag); put_le32(pb, 0); return url_ftell(pb); } void ff_end_tag(ByteIOContext *pb, int64_t start) { int64_t pos; pos = url_ftell(pb); url_fseek(pb, start - 4, SEEK_SET); put_le32(pb, (uint32_t)(pos - start)); url_fseek(pb, pos, SEEK_SET); } /* WAVEFORMATEX header */ /* returns the size or -1 on error */ int ff_put_wav_header(ByteIOContext *pb, AVCodecContext *enc) { int bps, blkalign, bytespersec; int hdrsize = 18; int waveformatextensible; uint8_t temp[256]; uint8_t *riff_extradata= temp; uint8_t *riff_extradata_start= temp; if(!enc->codec_tag || enc->codec_tag > 0xffff) return -1; waveformatextensible = (enc->channels > 2 && enc->channel_layout) || enc->sample_rate > 48000 || av_get_bits_per_sample(enc->codec_id) > 16; if (waveformatextensible) { put_le16(pb, 0xfffe); } else { put_le16(pb, enc->codec_tag); } put_le16(pb, enc->channels); put_le32(pb, enc->sample_rate); if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3 || enc->codec_id == CODEC_ID_GSM_MS) { bps = 0; } else if (enc->codec_id == CODEC_ID_ADPCM_G726) { bps = 4; } else { if (!(bps = av_get_bits_per_sample(enc->codec_id))) bps = 16; // default to 16 } if(bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample){ av_log(enc, AV_LOG_WARNING, "requested bits_per_coded_sample (%d) and actually stored (%d) differ\n", enc->bits_per_coded_sample, bps); } if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3 || enc->codec_id == CODEC_ID_AC3) { blkalign = enc->frame_size; //this is wrong, but it seems many demuxers do not work if this is set correctly //blkalign = 144 * enc->bit_rate/enc->sample_rate; } else if (enc->codec_id == CODEC_ID_ADPCM_G726) { // blkalign = 1; } else if (enc->block_align != 0) { /* specified by the codec */ blkalign = enc->block_align; } else blkalign = enc->channels*bps >> 3; if (enc->codec_id == CODEC_ID_PCM_U8 || enc->codec_id == CODEC_ID_PCM_S24LE || enc->codec_id == CODEC_ID_PCM_S32LE || enc->codec_id == CODEC_ID_PCM_F32LE || enc->codec_id == CODEC_ID_PCM_F64LE || enc->codec_id == CODEC_ID_PCM_S16LE) { bytespersec = enc->sample_rate * blkalign; } else { bytespersec = enc->bit_rate / 8; } put_le32(pb, bytespersec); /* bytes per second */ put_le16(pb, blkalign); /* block align */ put_le16(pb, bps); /* bits per sample */ if (enc->codec_id == CODEC_ID_MP3) { hdrsize += 12; bytestream_put_le16(&riff_extradata, 1); /* wID */ bytestream_put_le32(&riff_extradata, 2); /* fdwFlags */ bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */ bytestream_put_le16(&riff_extradata, 1); /* nFramesPerBlock */ bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */ } else if (enc->codec_id == CODEC_ID_MP2) { hdrsize += 22; bytestream_put_le16(&riff_extradata, 2); /* fwHeadLayer */ bytestream_put_le32(&riff_extradata, enc->bit_rate); /* dwHeadBitrate */ bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8); /* fwHeadMode */ bytestream_put_le16(&riff_extradata, 0); /* fwHeadModeExt */ bytestream_put_le16(&riff_extradata, 1); /* wHeadEmphasis */ bytestream_put_le16(&riff_extradata, 16); /* fwHeadFlags */ bytestream_put_le32(&riff_extradata, 0); /* dwPTSLow */ bytestream_put_le32(&riff_extradata, 0); /* dwPTSHigh */ } else if (enc->codec_id == CODEC_ID_GSM_MS || enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) { hdrsize += 2; bytestream_put_le16(&riff_extradata, enc->frame_size); /* wSamplesPerBlock */ } else if(enc->extradata_size){ riff_extradata_start= enc->extradata; riff_extradata= enc->extradata + enc->extradata_size; hdrsize += enc->extradata_size; } else if (!waveformatextensible){ hdrsize -= 2; } if(waveformatextensible) { /* write WAVEFORMATEXTENSIBLE extensions */ hdrsize += 22; put_le16(pb, riff_extradata - riff_extradata_start + 22); /* 22 is WAVEFORMATEXTENSIBLE size */ put_le16(pb, enc->bits_per_coded_sample); /* ValidBitsPerSample || SamplesPerBlock || Reserved */ put_le32(pb, enc->channel_layout); /* dwChannelMask */ put_le32(pb, enc->codec_tag); /* GUID + next 3 */ put_le32(pb, 0x00100000); put_le32(pb, 0xAA000080); put_le32(pb, 0x719B3800); } else if(riff_extradata - riff_extradata_start) { put_le16(pb, riff_extradata - riff_extradata_start); } put_buffer(pb, riff_extradata_start, riff_extradata - riff_extradata_start); if(hdrsize&1){ hdrsize++; put_byte(pb, 0); } return hdrsize; } /* BITMAPINFOHEADER header */ void ff_put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf) { put_le32(pb, 40 + enc->extradata_size); /* size */ put_le32(pb, enc->width); //We always store RGB TopDown put_le32(pb, enc->codec_tag ? enc->height : -enc->height); put_le16(pb, 1); /* planes */ put_le16(pb, enc->bits_per_coded_sample ? enc->bits_per_coded_sample : 24); /* depth */ /* compression type */ put_le32(pb, enc->codec_tag); put_le32(pb, enc->width * enc->height * 3); put_le32(pb, 0); put_le32(pb, 0); put_le32(pb, 0); put_le32(pb, 0); put_buffer(pb, enc->extradata, enc->extradata_size); if (!for_asf && enc->extradata_size & 1) put_byte(pb, 0); } #endif //CONFIG_MUXERS #if CONFIG_DEMUXERS /* We could be given one of the three possible structures here: * WAVEFORMAT, PCMWAVEFORMAT or WAVEFORMATEX. Each structure * is an expansion of the previous one with the fields added * at the bottom. PCMWAVEFORMAT adds 'WORD wBitsPerSample' and * WAVEFORMATEX adds 'WORD cbSize' and basically makes itself * an openended structure. */ void ff_get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size) { int id; id = get_le16(pb); codec->codec_type = AVMEDIA_TYPE_AUDIO; codec->codec_tag = id; codec->channels = get_le16(pb); codec->sample_rate = get_le32(pb); codec->bit_rate = get_le32(pb) * 8; codec->block_align = get_le16(pb); if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */ codec->bits_per_coded_sample = 8; }else codec->bits_per_coded_sample = get_le16(pb); if (size >= 18) { /* We're obviously dealing with WAVEFORMATEX */ int cbSize = get_le16(pb); /* cbSize */ size -= 18; cbSize = FFMIN(size, cbSize); if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */ codec->bits_per_coded_sample = get_le16(pb); codec->channel_layout = get_le32(pb); /* dwChannelMask */ id = get_le32(pb); /* 4 first bytes of GUID */ url_fskip(pb, 12); /* skip end of GUID */ cbSize -= 22; size -= 22; } codec->extradata_size = cbSize; if (cbSize > 0) { codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); get_buffer(pb, codec->extradata, codec->extradata_size); size -= cbSize; } /* It is possible for the chunk to contain garbage at the end */ if (size > 0) url_fskip(pb, size); } codec->codec_id = ff_wav_codec_get_id(id, codec->bits_per_coded_sample); } enum CodecID ff_wav_codec_get_id(unsigned int tag, int bps) { enum CodecID id; id = ff_codec_get_id(ff_codec_wav_tags, tag); if (id <= 0) return id; /* handle specific u8 codec */ if (id == CODEC_ID_PCM_S16LE && bps == 8) id = CODEC_ID_PCM_U8; if (id == CODEC_ID_PCM_S16LE && bps == 24) id = CODEC_ID_PCM_S24LE; if (id == CODEC_ID_PCM_S16LE && bps == 32) id = CODEC_ID_PCM_S32LE; if (id == CODEC_ID_PCM_F32LE && bps == 64) id = CODEC_ID_PCM_F64LE; if (id == CODEC_ID_ADPCM_IMA_WAV && bps == 8) id = CODEC_ID_PCM_ZORK; return id; } #endif // CONFIG_DEMUXERS void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale) { int gcd; *au_ssize= stream->block_align; if(stream->frame_size && stream->sample_rate){ *au_scale=stream->frame_size; *au_rate= stream->sample_rate; }else if(stream->codec_type == AVMEDIA_TYPE_VIDEO || stream->codec_type == AVMEDIA_TYPE_SUBTITLE){ *au_scale= stream->time_base.num; *au_rate = stream->time_base.den; }else{ *au_scale= stream->block_align ? stream->block_align*8 : 8; *au_rate = stream->bit_rate ? stream->bit_rate : 8*stream->sample_rate; } gcd= av_gcd(*au_scale, *au_rate); *au_scale /= gcd; *au_rate /= gcd; }
123linslouis-android-video-cutter
jni/libavformat/riff.c
C
asf20
26,147
/* * Yamaha SMAF format * Copyright (c) 2005 Vidar Madsen * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "raw.h" #include "riff.h" typedef struct { int64_t atrpos, atsqpos, awapos; int64_t data_size; } MMFContext; static const int mmf_rates[] = { 4000, 8000, 11025, 22050, 44100 }; static int mmf_rate(int code) { if((code < 0) || (code > 4)) return -1; return mmf_rates[code]; } #if CONFIG_MMF_MUXER static int mmf_rate_code(int rate) { int i; for(i = 0; i < 5; i++) if(mmf_rates[i] == rate) return i; return -1; } /* Copy of end_tag() from avienc.c, but for big-endian chunk size */ static void end_tag_be(ByteIOContext *pb, int64_t start) { int64_t pos; pos = url_ftell(pb); url_fseek(pb, start - 4, SEEK_SET); put_be32(pb, (uint32_t)(pos - start)); url_fseek(pb, pos, SEEK_SET); } static int mmf_write_header(AVFormatContext *s) { MMFContext *mmf = s->priv_data; ByteIOContext *pb = s->pb; int64_t pos; int rate; rate = mmf_rate_code(s->streams[0]->codec->sample_rate); if(rate < 0) { av_log(s, AV_LOG_ERROR, "Unsupported sample rate %d\n", s->streams[0]->codec->sample_rate); return -1; } put_tag(pb, "MMMD"); put_be32(pb, 0); pos = ff_start_tag(pb, "CNTI"); put_byte(pb, 0); /* class */ put_byte(pb, 0); /* type */ put_byte(pb, 0); /* code type */ put_byte(pb, 0); /* status */ put_byte(pb, 0); /* counts */ put_tag(pb, "VN:libavcodec,"); /* metadata ("ST:songtitle,VN:version,...") */ end_tag_be(pb, pos); put_buffer(pb, "ATR\x00", 4); put_be32(pb, 0); mmf->atrpos = url_ftell(pb); put_byte(pb, 0); /* format type */ put_byte(pb, 0); /* sequence type */ put_byte(pb, (0 << 7) | (1 << 4) | rate); /* (channel << 7) | (format << 4) | rate */ put_byte(pb, 0); /* wave base bit */ put_byte(pb, 2); /* time base d */ put_byte(pb, 2); /* time base g */ put_tag(pb, "Atsq"); put_be32(pb, 16); mmf->atsqpos = url_ftell(pb); /* Will be filled on close */ put_buffer(pb, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 16); mmf->awapos = ff_start_tag(pb, "Awa\x01"); av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate); put_flush_packet(pb); return 0; } static int mmf_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; put_buffer(pb, pkt->data, pkt->size); return 0; } /* Write a variable-length symbol */ static void put_varlength(ByteIOContext *pb, int val) { if(val < 128) put_byte(pb, val); else { val -= 128; put_byte(pb, 0x80 | val >> 7); put_byte(pb, 0x7f & val); } } static int mmf_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; MMFContext *mmf = s->priv_data; int64_t pos, size; int gatetime; if (!url_is_streamed(s->pb)) { /* Fill in length fields */ end_tag_be(pb, mmf->awapos); end_tag_be(pb, mmf->atrpos); end_tag_be(pb, 8); pos = url_ftell(pb); size = pos - mmf->awapos; /* Fill Atsq chunk */ url_fseek(pb, mmf->atsqpos, SEEK_SET); /* "play wav" */ put_byte(pb, 0); /* start time */ put_byte(pb, 1); /* (channel << 6) | wavenum */ gatetime = size * 500 / s->streams[0]->codec->sample_rate; put_varlength(pb, gatetime); /* duration */ /* "nop" */ put_varlength(pb, gatetime); /* start time */ put_buffer(pb, "\xff\x00", 2); /* nop */ /* "end of sequence" */ put_buffer(pb, "\x00\x00\x00\x00", 4); url_fseek(pb, pos, SEEK_SET); put_flush_packet(pb); } return 0; } #endif /* CONFIG_MMF_MUXER */ static int mmf_probe(AVProbeData *p) { /* check file header */ if (p->buf[0] == 'M' && p->buf[1] == 'M' && p->buf[2] == 'M' && p->buf[3] == 'D' && p->buf[8] == 'C' && p->buf[9] == 'N' && p->buf[10] == 'T' && p->buf[11] == 'I') return AVPROBE_SCORE_MAX; else return 0; } /* mmf input */ static int mmf_read_header(AVFormatContext *s, AVFormatParameters *ap) { MMFContext *mmf = s->priv_data; unsigned int tag; ByteIOContext *pb = s->pb; AVStream *st; int64_t file_size, size; int rate, params; tag = get_le32(pb); if (tag != MKTAG('M', 'M', 'M', 'D')) return -1; file_size = get_be32(pb); /* Skip some unused chunks that may or may not be present */ for(;; url_fseek(pb, size, SEEK_CUR)) { tag = get_le32(pb); size = get_be32(pb); if(tag == MKTAG('C','N','T','I')) continue; if(tag == MKTAG('O','P','D','A')) continue; break; } /* Tag = "ATRx", where "x" = track number */ if ((tag & 0xffffff) == MKTAG('M', 'T', 'R', 0)) { av_log(s, AV_LOG_ERROR, "MIDI like format found, unsupported\n"); return -1; } if ((tag & 0xffffff) != MKTAG('A', 'T', 'R', 0)) { av_log(s, AV_LOG_ERROR, "Unsupported SMAF chunk %08x\n", tag); return -1; } get_byte(pb); /* format type */ get_byte(pb); /* sequence type */ params = get_byte(pb); /* (channel << 7) | (format << 4) | rate */ rate = mmf_rate(params & 0x0f); if(rate < 0) { av_log(s, AV_LOG_ERROR, "Invalid sample rate\n"); return -1; } get_byte(pb); /* wave base bit */ get_byte(pb); /* time base d */ get_byte(pb); /* time base g */ /* Skip some unused chunks that may or may not be present */ for(;; url_fseek(pb, size, SEEK_CUR)) { tag = get_le32(pb); size = get_be32(pb); if(tag == MKTAG('A','t','s','q')) continue; if(tag == MKTAG('A','s','p','I')) continue; break; } /* Make sure it's followed by an Awa chunk, aka wave data */ if ((tag & 0xffffff) != MKTAG('A', 'w', 'a', 0)) { av_log(s, AV_LOG_ERROR, "Unexpected SMAF chunk %08x\n", tag); return -1; } mmf->data_size = size; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ADPCM_YAMAHA; st->codec->sample_rate = rate; st->codec->channels = 1; st->codec->bits_per_coded_sample = 4; st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_coded_sample; av_set_pts_info(st, 64, 1, st->codec->sample_rate); return 0; } #define MAX_SIZE 4096 static int mmf_read_packet(AVFormatContext *s, AVPacket *pkt) { MMFContext *mmf = s->priv_data; AVStream *st; int ret, size; if (url_feof(s->pb)) return AVERROR(EIO); st = s->streams[0]; size = MAX_SIZE; if(size > mmf->data_size) size = mmf->data_size; if(!size) return AVERROR(EIO); if (av_new_packet(pkt, size)) return AVERROR(EIO); pkt->stream_index = 0; ret = get_buffer(s->pb, pkt->data, pkt->size); if (ret < 0) av_free_packet(pkt); mmf->data_size -= ret; pkt->size = ret; return ret; } #if CONFIG_MMF_DEMUXER AVInputFormat mmf_demuxer = { "mmf", NULL_IF_CONFIG_SMALL("Yamaha SMAF"), sizeof(MMFContext), mmf_probe, mmf_read_header, mmf_read_packet, NULL, pcm_read_seek, }; #endif #if CONFIG_MMF_MUXER AVOutputFormat mmf_muxer = { "mmf", NULL_IF_CONFIG_SMALL("Yamaha SMAF"), "application/vnd.smaf", "mmf", sizeof(MMFContext), CODEC_ID_ADPCM_YAMAHA, CODEC_ID_NONE, mmf_write_header, mmf_write_packet, mmf_write_trailer, }; #endif
123linslouis-android-video-cutter
jni/libavformat/mmf.c
C
asf20
8,433
/* * MOV, 3GP, MP4 muxer * Copyright (c) 2003 Thomas Raivio * Copyright (c) 2004 Gildas Bazin <gbazin at videolan dot org> * Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_MOVENC_H #define AVFORMAT_MOVENC_H #include "avformat.h" #define MOV_INDEX_CLUSTER_SIZE 16384 #define MOV_TIMESCALE 1000 #define RTP_MAX_PACKET_SIZE 1450 #define MODE_MP4 0x01 #define MODE_MOV 0x02 #define MODE_3GP 0x04 #define MODE_PSP 0x08 // example working PSP command line: // ffmpeg -i testinput.avi -f psp -r 14.985 -s 320x240 -b 768 -ar 24000 -ab 32 M4V00001.MP4 #define MODE_3G2 0x10 #define MODE_IPOD 0x20 typedef struct MOVIentry { unsigned int size; uint64_t pos; unsigned int samplesInChunk; unsigned int entries; int cts; int64_t dts; #define MOV_SYNC_SAMPLE 0x0001 #define MOV_PARTIAL_SYNC_SAMPLE 0x0002 uint32_t flags; } MOVIentry; typedef struct HintSample { uint8_t *data; int size; int sample_number; int offset; int own_data; } HintSample; typedef struct { int size; int len; HintSample *samples; } HintSampleQueue; typedef struct MOVIndex { int mode; int entry; unsigned timescale; uint64_t time; int64_t trackDuration; long sampleCount; long sampleSize; int hasKeyframes; #define MOV_TRACK_CTTS 0x0001 #define MOV_TRACK_STPS 0x0002 uint32_t flags; int language; int trackID; int tag; ///< stsd fourcc AVCodecContext *enc; int vosLen; uint8_t *vosData; MOVIentry *cluster; int audio_vbr; int height; ///< active picture (w/o VBI) height for D-10/IMX uint32_t tref_tag; int tref_id; ///< trackID of the referenced track int hint_track; ///< the track that hints this track, -1 if no hint track is set int src_track; ///< the track that this hint track describes AVFormatContext *rtp_ctx; ///< the format context for the hinting rtp muxer uint32_t prev_rtp_ts; int64_t cur_rtp_ts_unwrapped; uint32_t max_packet_size; HintSampleQueue sample_queue; } MOVTrack; typedef struct MOVMuxContext { int mode; int64_t time; int nb_streams; int chapter_track; ///< qt chapter track number int64_t mdat_pos; uint64_t mdat_size; MOVTrack *tracks; } MOVMuxContext; int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt); int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index); int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt, int track_index, int sample); void ff_mov_close_hinting(MOVTrack *track); #endif /* AVFORMAT_MOVENC_H */
123linslouis-android-video-cutter
jni/libavformat/movenc.h
C
asf20
3,611
/* * RTMP input format * Copyright (c) 2009 Kostya Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/bytestream.h" #include "libavutil/avstring.h" #include "avformat.h" #include "rtmppkt.h" #include "flv.h" void ff_amf_write_bool(uint8_t **dst, int val) { bytestream_put_byte(dst, AMF_DATA_TYPE_BOOL); bytestream_put_byte(dst, val); } void ff_amf_write_number(uint8_t **dst, double val) { bytestream_put_byte(dst, AMF_DATA_TYPE_NUMBER); bytestream_put_be64(dst, av_dbl2int(val)); } void ff_amf_write_string(uint8_t **dst, const char *str) { bytestream_put_byte(dst, AMF_DATA_TYPE_STRING); bytestream_put_be16(dst, strlen(str)); bytestream_put_buffer(dst, str, strlen(str)); } void ff_amf_write_null(uint8_t **dst) { bytestream_put_byte(dst, AMF_DATA_TYPE_NULL); } void ff_amf_write_object_start(uint8_t **dst) { bytestream_put_byte(dst, AMF_DATA_TYPE_OBJECT); } void ff_amf_write_field_name(uint8_t **dst, const char *str) { bytestream_put_be16(dst, strlen(str)); bytestream_put_buffer(dst, str, strlen(str)); } void ff_amf_write_object_end(uint8_t **dst) { /* first two bytes are field name length = 0, * AMF object should end with it and end marker */ bytestream_put_be24(dst, AMF_DATA_TYPE_OBJECT_END); } int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket *prev_pkt) { uint8_t hdr, t, buf[16]; int channel_id, timestamp, data_size, offset = 0; uint32_t extra = 0; enum RTMPPacketType type; int size = 0; if (url_read(h, &hdr, 1) != 1) return AVERROR(EIO); size++; channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (url_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); size += channel_id + 1; channel_id = AV_RL16(buf) + 64; } data_size = prev_pkt[channel_id].data_size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; if (hdr == RTMP_PS_ONEBYTE) { timestamp = prev_pkt[channel_id].ts_delta; } else { if (url_read_complete(h, buf, 3) != 3) return AVERROR(EIO); size += 3; timestamp = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (url_read_complete(h, buf, 3) != 3) return AVERROR(EIO); size += 3; data_size = AV_RB24(buf); if (url_read_complete(h, buf, 1) != 1) return AVERROR(EIO); size++; type = buf[0]; if (hdr == RTMP_PS_TWELVEBYTES) { if (url_read_complete(h, buf, 4) != 4) return AVERROR(EIO); size += 4; extra = AV_RL32(buf); } } if (timestamp == 0xFFFFFF) { if (url_read_complete(h, buf, 4) != 4) return AVERROR(EIO); timestamp = AV_RB32(buf); } } if (hdr != RTMP_PS_TWELVEBYTES) timestamp += prev_pkt[channel_id].timestamp; if (ff_rtmp_packet_create(p, channel_id, type, timestamp, data_size)) return -1; p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].data_size = data_size; prev_pkt[channel_id].ts_delta = timestamp - prev_pkt[channel_id].timestamp; prev_pkt[channel_id].timestamp = timestamp; prev_pkt[channel_id].extra = extra; while (data_size > 0) { int toread = FFMIN(data_size, chunk_size); if (url_read_complete(h, p->data + offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); } data_size -= chunk_size; offset += chunk_size; size += chunk_size; if (data_size > 0) { url_read_complete(h, &t, 1); //marker size++; if (t != (0xC0 + channel_id)) return -1; } } return size; } int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt, int chunk_size, RTMPPacket *prev_pkt) { uint8_t pkt_hdr[16], *p = pkt_hdr; int mode = RTMP_PS_TWELVEBYTES; int off = 0; int size = 0; pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp; //if channel_id = 0, this is first presentation of prev_pkt, send full hdr. if (prev_pkt[pkt->channel_id].channel_id && pkt->extra == prev_pkt[pkt->channel_id].extra) { if (pkt->type == prev_pkt[pkt->channel_id].type && pkt->data_size == prev_pkt[pkt->channel_id].data_size) { mode = RTMP_PS_FOURBYTES; if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta) mode = RTMP_PS_ONEBYTE; } else { mode = RTMP_PS_EIGHTBYTES; } } if (pkt->channel_id < 64) { bytestream_put_byte(&p, pkt->channel_id | (mode << 6)); } else if (pkt->channel_id < 64 + 256) { bytestream_put_byte(&p, 0 | (mode << 6)); bytestream_put_byte(&p, pkt->channel_id - 64); } else { bytestream_put_byte(&p, 1 | (mode << 6)); bytestream_put_le16(&p, pkt->channel_id - 64); } if (mode != RTMP_PS_ONEBYTE) { uint32_t timestamp = pkt->timestamp; if (mode != RTMP_PS_TWELVEBYTES) timestamp = pkt->ts_delta; bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp); if (mode != RTMP_PS_FOURBYTES) { bytestream_put_be24(&p, pkt->data_size); bytestream_put_byte(&p, pkt->type); if (mode == RTMP_PS_TWELVEBYTES) bytestream_put_le32(&p, pkt->extra); } if (timestamp >= 0xFFFFFF) bytestream_put_be32(&p, timestamp); } // save history prev_pkt[pkt->channel_id].channel_id = pkt->channel_id; prev_pkt[pkt->channel_id].type = pkt->type; prev_pkt[pkt->channel_id].data_size = pkt->data_size; prev_pkt[pkt->channel_id].timestamp = pkt->timestamp; if (mode != RTMP_PS_TWELVEBYTES) { prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta; } else { prev_pkt[pkt->channel_id].ts_delta = pkt->timestamp; } prev_pkt[pkt->channel_id].extra = pkt->extra; url_write(h, pkt_hdr, p-pkt_hdr); size = p - pkt_hdr + pkt->data_size; while (off < pkt->data_size) { int towrite = FFMIN(chunk_size, pkt->data_size - off); url_write(h, pkt->data + off, towrite); off += towrite; if (off < pkt->data_size) { uint8_t marker = 0xC0 | pkt->channel_id; url_write(h, &marker, 1); size++; } } return size; } int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type, int timestamp, int size) { pkt->data = av_malloc(size); if (!pkt->data) return AVERROR(ENOMEM); pkt->data_size = size; pkt->channel_id = channel_id; pkt->type = type; pkt->timestamp = timestamp; pkt->extra = 0; pkt->ts_delta = 0; return 0; } void ff_rtmp_packet_destroy(RTMPPacket *pkt) { if (!pkt) return; av_freep(&pkt->data); pkt->data_size = 0; } int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end) { const uint8_t *base = data; if (data >= data_end) return -1; switch (*data++) { case AMF_DATA_TYPE_NUMBER: return 9; case AMF_DATA_TYPE_BOOL: return 2; case AMF_DATA_TYPE_STRING: return 3 + AV_RB16(data); case AMF_DATA_TYPE_LONG_STRING: return 5 + AV_RB32(data); case AMF_DATA_TYPE_NULL: return 1; case AMF_DATA_TYPE_ARRAY: data += 4; case AMF_DATA_TYPE_OBJECT: for (;;) { int size = bytestream_get_be16(&data); int t; if (!size) { data++; break; } if (data + size >= data_end || data + size < data) return -1; data += size; t = ff_amf_tag_size(data, data_end); if (t < 0 || data + t >= data_end) return -1; data += t; } return data - base; case AMF_DATA_TYPE_OBJECT_END: return 1; default: return -1; } } int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end, const uint8_t *name, uint8_t *dst, int dst_size) { int namelen = strlen(name); int len; while (*data != AMF_DATA_TYPE_OBJECT && data < data_end) { len = ff_amf_tag_size(data, data_end); if (len < 0) len = data_end - data; data += len; } if (data_end - data < 3) return -1; data++; for (;;) { int size = bytestream_get_be16(&data); if (!size) break; if (data + size >= data_end || data + size < data) return -1; data += size; if (size == namelen && !memcmp(data-size, name, namelen)) { switch (*data++) { case AMF_DATA_TYPE_NUMBER: snprintf(dst, dst_size, "%g", av_int2dbl(AV_RB64(data))); break; case AMF_DATA_TYPE_BOOL: snprintf(dst, dst_size, "%s", *data ? "true" : "false"); break; case AMF_DATA_TYPE_STRING: len = bytestream_get_be16(&data); av_strlcpy(dst, data, FFMIN(len+1, dst_size)); break; default: return -1; } return 0; } len = ff_amf_tag_size(data, data_end); if (len < 0 || data + len >= data_end || data + len < data) return -1; data += len; } return -1; } static const char* rtmp_packet_type(int type) { switch (type) { case RTMP_PT_CHUNK_SIZE: return "chunk size"; case RTMP_PT_BYTES_READ: return "bytes read"; case RTMP_PT_PING: return "ping"; case RTMP_PT_SERVER_BW: return "server bandwidth"; case RTMP_PT_CLIENT_BW: return "client bandwidth"; case RTMP_PT_AUDIO: return "audio packet"; case RTMP_PT_VIDEO: return "video packet"; case RTMP_PT_FLEX_STREAM: return "Flex shared stream"; case RTMP_PT_FLEX_OBJECT: return "Flex shared object"; case RTMP_PT_FLEX_MESSAGE: return "Flex shared message"; case RTMP_PT_NOTIFY: return "notification"; case RTMP_PT_SHARED_OBJ: return "shared object"; case RTMP_PT_INVOKE: return "invoke"; case RTMP_PT_METADATA: return "metadata"; default: return "unknown"; } } static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end) { int size; char buf[1024]; if (data >= data_end) return; switch (*data++) { case AMF_DATA_TYPE_NUMBER: av_log(ctx, AV_LOG_DEBUG, " number %g\n", av_int2dbl(AV_RB64(data))); return; case AMF_DATA_TYPE_BOOL: av_log(ctx, AV_LOG_DEBUG, " bool %d\n", *data); return; case AMF_DATA_TYPE_STRING: case AMF_DATA_TYPE_LONG_STRING: if (data[-1] == AMF_DATA_TYPE_STRING) { size = bytestream_get_be16(&data); } else { size = bytestream_get_be32(&data); } size = FFMIN(size, 1023); memcpy(buf, data, size); buf[size] = 0; av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf); return; case AMF_DATA_TYPE_NULL: av_log(ctx, AV_LOG_DEBUG, " NULL\n"); return; case AMF_DATA_TYPE_ARRAY: data += 4; case AMF_DATA_TYPE_OBJECT: av_log(ctx, AV_LOG_DEBUG, " {\n"); for (;;) { int size = bytestream_get_be16(&data); int t; memcpy(buf, data, size); buf[size] = 0; if (!size) { av_log(ctx, AV_LOG_DEBUG, " }\n"); data++; break; } if (data + size >= data_end || data + size < data) return; data += size; av_log(ctx, AV_LOG_DEBUG, " %s: ", buf); ff_amf_tag_contents(ctx, data, data_end); t = ff_amf_tag_size(data, data_end); if (t < 0 || data + t >= data_end) return; data += t; } return; case AMF_DATA_TYPE_OBJECT_END: av_log(ctx, AV_LOG_DEBUG, " }\n"); return; default: return; } } void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p) { av_log(ctx, AV_LOG_DEBUG, "RTMP packet type '%s'(%d) for channel %d, timestamp %d, extra field %d size %d\n", rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->data_size); if (p->type == RTMP_PT_INVOKE || p->type == RTMP_PT_NOTIFY) { uint8_t *src = p->data, *src_end = p->data + p->data_size; while (src < src_end) { int sz; ff_amf_tag_contents(ctx, src, src_end); sz = ff_amf_tag_size(src, src_end); if (sz < 0) break; src += sz; } } else if (p->type == RTMP_PT_SERVER_BW){ av_log(ctx, AV_LOG_DEBUG, "Server BW = %d\n", AV_RB32(p->data)); } else if (p->type == RTMP_PT_CLIENT_BW){ av_log(ctx, AV_LOG_DEBUG, "Client BW = %d\n", AV_RB32(p->data)); } else if (p->type != RTMP_PT_AUDIO && p->type != RTMP_PT_VIDEO && p->type != RTMP_PT_METADATA) { int i; for (i = 0; i < p->data_size; i++) av_log(ctx, AV_LOG_DEBUG, " %02X", p->data[i]); av_log(ctx, AV_LOG_DEBUG, "\n"); } }
123linslouis-android-video-cutter
jni/libavformat/rtmppkt.c
C
asf20
14,613
/* * CRYO APC audio format demuxer * Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <string.h> #include "avformat.h" static int apc_probe(AVProbeData *p) { if (!strncmp(p->buf, "CRYO_APC", 8)) return AVPROBE_SCORE_MAX; return 0; } static int apc_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; AVStream *st; get_le32(pb); /* CRYO */ get_le32(pb); /* _APC */ get_le32(pb); /* 1.20 */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS; get_le32(pb); /* number of samples */ st->codec->sample_rate = get_le32(pb); st->codec->extradata_size = 2 * 4; st->codec->extradata = av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); /* initial predictor values for adpcm decoder */ get_buffer(pb, st->codec->extradata, 2 * 4); st->codec->channels = 1; if (get_le32(pb)) st->codec->channels = 2; st->codec->bits_per_coded_sample = 4; st->codec->bit_rate = st->codec->bits_per_coded_sample * st->codec->channels * st->codec->sample_rate; st->codec->block_align = 1; return 0; } #define MAX_READ_SIZE 4096 static int apc_read_packet(AVFormatContext *s, AVPacket *pkt) { if (av_get_packet(s->pb, pkt, MAX_READ_SIZE) <= 0) return AVERROR(EIO); pkt->stream_index = 0; return 0; } AVInputFormat apc_demuxer = { "apc", NULL_IF_CONFIG_SMALL("CRYO APC format"), 0, apc_probe, apc_read_header, apc_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/apc.c
C
asf20
2,530
/* * ASF decryption * Copyright (c) 2007 Reimar Doeffinger * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_ASFCRYPT_H #define AVFORMAT_ASFCRYPT_H #include <inttypes.h> void ff_asfcrypt_dec(const uint8_t key[20], uint8_t *data, int len); #endif /* AVFORMAT_ASFCRYPT_H */
123linslouis-android-video-cutter
jni/libavformat/asfcrypt.h
C
asf20
1,008
/* * Multipart JPEG format * Copyright (c) 2000, 2001, 2002, 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" /* Multipart JPEG */ #define BOUNDARY_TAG "ffserver" static int mpjpeg_write_header(AVFormatContext *s) { uint8_t buf1[256]; snprintf(buf1, sizeof(buf1), "--%s\n", BOUNDARY_TAG); put_buffer(s->pb, buf1, strlen(buf1)); put_flush_packet(s->pb); return 0; } static int mpjpeg_write_packet(AVFormatContext *s, AVPacket *pkt) { uint8_t buf1[256]; snprintf(buf1, sizeof(buf1), "Content-type: image/jpeg\n\n"); put_buffer(s->pb, buf1, strlen(buf1)); put_buffer(s->pb, pkt->data, pkt->size); snprintf(buf1, sizeof(buf1), "\n--%s\n", BOUNDARY_TAG); put_buffer(s->pb, buf1, strlen(buf1)); put_flush_packet(s->pb); return 0; } static int mpjpeg_write_trailer(AVFormatContext *s) { return 0; } AVOutputFormat mpjpeg_muxer = { "mpjpeg", NULL_IF_CONFIG_SMALL("MIME multipart JPEG format"), "multipart/x-mixed-replace;boundary=" BOUNDARY_TAG, "mjpg", 0, CODEC_ID_NONE, CODEC_ID_MJPEG, mpjpeg_write_header, mpjpeg_write_packet, mpjpeg_write_trailer, };
123linslouis-android-video-cutter
jni/libavformat/mpjpeg.c
C
asf20
1,911
/* * FFM (ffserver live feed) demuxer * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avformat.h" #include "ffm.h" #if CONFIG_FFSERVER #include <unistd.h> int64_t ffm_read_write_index(int fd) { uint8_t buf[8]; lseek(fd, 8, SEEK_SET); if (read(fd, buf, 8) != 8) return AVERROR(EIO); return AV_RB64(buf); } int ffm_write_write_index(int fd, int64_t pos) { uint8_t buf[8]; int i; for(i=0;i<8;i++) buf[i] = (pos >> (56 - i * 8)) & 0xff; lseek(fd, 8, SEEK_SET); if (write(fd, buf, 8) != 8) return AVERROR(EIO); return 8; } void ffm_set_write_index(AVFormatContext *s, int64_t pos, int64_t file_size) { FFMContext *ffm = s->priv_data; ffm->write_index = pos; ffm->file_size = file_size; } #endif // CONFIG_FFSERVER static int ffm_is_avail_data(AVFormatContext *s, int size) { FFMContext *ffm = s->priv_data; int64_t pos, avail_size; int len; len = ffm->packet_end - ffm->packet_ptr; if (size <= len) return 1; pos = url_ftell(s->pb); if (!ffm->write_index) { if (pos == ffm->file_size) return AVERROR_EOF; avail_size = ffm->file_size - pos; } else { if (pos == ffm->write_index) { /* exactly at the end of stream */ return AVERROR(EAGAIN); } else if (pos < ffm->write_index) { avail_size = ffm->write_index - pos; } else { avail_size = (ffm->file_size - pos) + (ffm->write_index - FFM_PACKET_SIZE); } } avail_size = (avail_size / ffm->packet_size) * (ffm->packet_size - FFM_HEADER_SIZE) + len; if (size <= avail_size) return 1; else return AVERROR(EAGAIN); } static int ffm_resync(AVFormatContext *s, int state) { av_log(s, AV_LOG_ERROR, "resyncing\n"); while (state != PACKET_ID) { if (url_feof(s->pb)) { av_log(s, AV_LOG_ERROR, "cannot find FFM syncword\n"); return -1; } state = (state << 8) | get_byte(s->pb); } return 0; } /* first is true if we read the frame header */ static int ffm_read_data(AVFormatContext *s, uint8_t *buf, int size, int header) { FFMContext *ffm = s->priv_data; ByteIOContext *pb = s->pb; int len, fill_size, size1, frame_offset, id; size1 = size; while (size > 0) { redo: len = ffm->packet_end - ffm->packet_ptr; if (len < 0) return -1; if (len > size) len = size; if (len == 0) { if (url_ftell(pb) == ffm->file_size) url_fseek(pb, ffm->packet_size, SEEK_SET); retry_read: id = get_be16(pb); /* PACKET_ID */ if (id != PACKET_ID) if (ffm_resync(s, id) < 0) return -1; fill_size = get_be16(pb); ffm->dts = get_be64(pb); frame_offset = get_be16(pb); get_buffer(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE); ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size); if (ffm->packet_end < ffm->packet || frame_offset < 0) return -1; /* if first packet or resynchronization packet, we must handle it specifically */ if (ffm->first_packet || (frame_offset & 0x8000)) { if (!frame_offset) { /* This packet has no frame headers in it */ if (url_ftell(pb) >= ffm->packet_size * 3) { url_fseek(pb, -ffm->packet_size * 2, SEEK_CUR); goto retry_read; } /* This is bad, we cannot find a valid frame header */ return 0; } ffm->first_packet = 0; if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE) return -1; ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE; if (!header) break; } else { ffm->packet_ptr = ffm->packet; } goto redo; } memcpy(buf, ffm->packet_ptr, len); buf += len; ffm->packet_ptr += len; size -= len; header = 0; } return size1 - size; } //#define DEBUG_SEEK /* ensure that acutal seeking happens between FFM_PACKET_SIZE and file_size - FFM_PACKET_SIZE */ static void ffm_seek1(AVFormatContext *s, int64_t pos1) { FFMContext *ffm = s->priv_data; ByteIOContext *pb = s->pb; int64_t pos; pos = FFMIN(pos1, ffm->file_size - FFM_PACKET_SIZE); pos = FFMAX(pos, FFM_PACKET_SIZE); #ifdef DEBUG_SEEK av_log(s, AV_LOG_DEBUG, "seek to %"PRIx64" -> %"PRIx64"\n", pos1, pos); #endif url_fseek(pb, pos, SEEK_SET); } static int64_t get_dts(AVFormatContext *s, int64_t pos) { ByteIOContext *pb = s->pb; int64_t dts; ffm_seek1(s, pos); url_fskip(pb, 4); dts = get_be64(pb); #ifdef DEBUG_SEEK av_log(s, AV_LOG_DEBUG, "dts=%0.6f\n", dts / 1000000.0); #endif return dts; } static void adjust_write_index(AVFormatContext *s) { FFMContext *ffm = s->priv_data; ByteIOContext *pb = s->pb; int64_t pts; //int64_t orig_write_index = ffm->write_index; int64_t pos_min, pos_max; int64_t pts_start; int64_t ptr = url_ftell(pb); pos_min = 0; pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE; pts_start = get_dts(s, pos_min); pts = get_dts(s, pos_max); if (pts - 100000 > pts_start) goto end; ffm->write_index = FFM_PACKET_SIZE; pts_start = get_dts(s, pos_min); pts = get_dts(s, pos_max); if (pts - 100000 <= pts_start) { while (1) { int64_t newpos; int64_t newpts; newpos = ((pos_max + pos_min) / (2 * FFM_PACKET_SIZE)) * FFM_PACKET_SIZE; if (newpos == pos_min) break; newpts = get_dts(s, newpos); if (newpts - 100000 <= pts) { pos_max = newpos; pts = newpts; } else { pos_min = newpos; } } ffm->write_index += pos_max; } //printf("Adjusted write index from %"PRId64" to %"PRId64": pts=%0.6f\n", orig_write_index, ffm->write_index, pts / 1000000.); //printf("pts range %0.6f - %0.6f\n", get_dts(s, 0) / 1000000. , get_dts(s, ffm->file_size - 2 * FFM_PACKET_SIZE) / 1000000. ); end: url_fseek(pb, ptr, SEEK_SET); } static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) { FFMContext *ffm = s->priv_data; AVStream *st; ByteIOContext *pb = s->pb; AVCodecContext *codec; int i, nb_streams; uint32_t tag; /* header */ tag = get_le32(pb); if (tag != MKTAG('F', 'F', 'M', '1')) goto fail; ffm->packet_size = get_be32(pb); if (ffm->packet_size != FFM_PACKET_SIZE) goto fail; ffm->write_index = get_be64(pb); /* get also filesize */ if (!url_is_streamed(pb)) { ffm->file_size = url_fsize(pb); if (ffm->write_index) adjust_write_index(s); } else { ffm->file_size = (UINT64_C(1) << 63) - 1; } nb_streams = get_be32(pb); get_be32(pb); /* total bitrate */ /* read each stream */ for(i=0;i<nb_streams;i++) { char rc_eq_buf[128]; st = av_new_stream(s, 0); if (!st) goto fail; av_set_pts_info(st, 64, 1, 1000000); codec = st->codec; /* generic info */ codec->codec_id = get_be32(pb); codec->codec_type = get_byte(pb); /* codec_type */ codec->bit_rate = get_be32(pb); st->quality = get_be32(pb); codec->flags = get_be32(pb); codec->flags2 = get_be32(pb); codec->debug = get_be32(pb); /* specific info */ switch(codec->codec_type) { case AVMEDIA_TYPE_VIDEO: codec->time_base.num = get_be32(pb); codec->time_base.den = get_be32(pb); codec->width = get_be16(pb); codec->height = get_be16(pb); codec->gop_size = get_be16(pb); codec->pix_fmt = get_be32(pb); codec->qmin = get_byte(pb); codec->qmax = get_byte(pb); codec->max_qdiff = get_byte(pb); codec->qcompress = get_be16(pb) / 10000.0; codec->qblur = get_be16(pb) / 10000.0; codec->bit_rate_tolerance = get_be32(pb); codec->rc_eq = av_strdup(get_strz(pb, rc_eq_buf, sizeof(rc_eq_buf))); codec->rc_max_rate = get_be32(pb); codec->rc_min_rate = get_be32(pb); codec->rc_buffer_size = get_be32(pb); codec->i_quant_factor = av_int2dbl(get_be64(pb)); codec->b_quant_factor = av_int2dbl(get_be64(pb)); codec->i_quant_offset = av_int2dbl(get_be64(pb)); codec->b_quant_offset = av_int2dbl(get_be64(pb)); codec->dct_algo = get_be32(pb); codec->strict_std_compliance = get_be32(pb); codec->max_b_frames = get_be32(pb); codec->luma_elim_threshold = get_be32(pb); codec->chroma_elim_threshold = get_be32(pb); codec->mpeg_quant = get_be32(pb); codec->intra_dc_precision = get_be32(pb); codec->me_method = get_be32(pb); codec->mb_decision = get_be32(pb); codec->nsse_weight = get_be32(pb); codec->frame_skip_cmp = get_be32(pb); codec->rc_buffer_aggressivity = av_int2dbl(get_be64(pb)); codec->codec_tag = get_be32(pb); codec->thread_count = get_byte(pb); codec->coder_type = get_be32(pb); codec->me_cmp = get_be32(pb); codec->partitions = get_be32(pb); codec->me_subpel_quality = get_be32(pb); codec->me_range = get_be32(pb); codec->keyint_min = get_be32(pb); codec->scenechange_threshold = get_be32(pb); codec->b_frame_strategy = get_be32(pb); codec->qcompress = av_int2dbl(get_be64(pb)); codec->qblur = av_int2dbl(get_be64(pb)); codec->max_qdiff = get_be32(pb); codec->refs = get_be32(pb); codec->directpred = get_be32(pb); break; case AVMEDIA_TYPE_AUDIO: codec->sample_rate = get_be32(pb); codec->channels = get_le16(pb); codec->frame_size = get_le16(pb); codec->sample_fmt = (int16_t) get_le16(pb); break; default: goto fail; } if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) { codec->extradata_size = get_be32(pb); codec->extradata = av_malloc(codec->extradata_size); if (!codec->extradata) return AVERROR(ENOMEM); get_buffer(pb, codec->extradata, codec->extradata_size); } } /* get until end of block reached */ while ((url_ftell(pb) % ffm->packet_size) != 0) get_byte(pb); /* init packet demux */ ffm->packet_ptr = ffm->packet; ffm->packet_end = ffm->packet; ffm->frame_offset = 0; ffm->dts = 0; ffm->read_state = READ_HEADER; ffm->first_packet = 1; return 0; fail: for(i=0;i<s->nb_streams;i++) { st = s->streams[i]; if (st) { av_free(st); } } return -1; } /* return < 0 if eof */ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt) { int size; FFMContext *ffm = s->priv_data; int duration, ret; switch(ffm->read_state) { case READ_HEADER: if ((ret = ffm_is_avail_data(s, FRAME_HEADER_SIZE+4)) < 0) return ret; dprintf(s, "pos=%08"PRIx64" spos=%"PRIx64", write_index=%"PRIx64" size=%"PRIx64"\n", url_ftell(s->pb), s->pb->pos, ffm->write_index, ffm->file_size); if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) != FRAME_HEADER_SIZE) return -1; if (ffm->header[1] & FLAG_DTS) if (ffm_read_data(s, ffm->header+16, 4, 1) != 4) return -1; #if 0 av_hexdump_log(s, AV_LOG_DEBUG, ffm->header, FRAME_HEADER_SIZE); #endif ffm->read_state = READ_DATA; /* fall thru */ case READ_DATA: size = AV_RB24(ffm->header + 2); if ((ret = ffm_is_avail_data(s, size)) < 0) return ret; duration = AV_RB24(ffm->header + 5); av_new_packet(pkt, size); pkt->stream_index = ffm->header[0]; if ((unsigned)pkt->stream_index >= s->nb_streams) { av_log(s, AV_LOG_ERROR, "invalid stream index %d\n", pkt->stream_index); av_free_packet(pkt); ffm->read_state = READ_HEADER; return -1; } pkt->pos = url_ftell(s->pb); if (ffm->header[1] & FLAG_KEY_FRAME) pkt->flags |= AV_PKT_FLAG_KEY; ffm->read_state = READ_HEADER; if (ffm_read_data(s, pkt->data, size, 0) != size) { /* bad case: desynchronized packet. we cancel all the packet loading */ av_free_packet(pkt); return -1; } pkt->pts = AV_RB64(ffm->header+8); if (ffm->header[1] & FLAG_DTS) pkt->dts = pkt->pts - AV_RB32(ffm->header+16); else pkt->dts = pkt->pts; pkt->duration = duration; break; } return 0; } /* seek to a given time in the file. The file read pointer is positioned at or before pts. XXX: the following code is quite approximative */ static int ffm_seek(AVFormatContext *s, int stream_index, int64_t wanted_pts, int flags) { FFMContext *ffm = s->priv_data; int64_t pos_min, pos_max, pos; int64_t pts_min, pts_max, pts; double pos1; #ifdef DEBUG_SEEK av_log(s, AV_LOG_DEBUG, "wanted_pts=%0.6f\n", wanted_pts / 1000000.0); #endif /* find the position using linear interpolation (better than dichotomy in typical cases) */ pos_min = FFM_PACKET_SIZE; pos_max = ffm->file_size - FFM_PACKET_SIZE; while (pos_min <= pos_max) { pts_min = get_dts(s, pos_min); pts_max = get_dts(s, pos_max); /* linear interpolation */ pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) / (double)(pts_max - pts_min); pos = (((int64_t)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE; if (pos <= pos_min) pos = pos_min; else if (pos >= pos_max) pos = pos_max; pts = get_dts(s, pos); /* check if we are lucky */ if (pts == wanted_pts) { goto found; } else if (pts > wanted_pts) { pos_max = pos - FFM_PACKET_SIZE; } else { pos_min = pos + FFM_PACKET_SIZE; } } pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; found: ffm_seek1(s, pos); /* reset read state */ ffm->read_state = READ_HEADER; ffm->packet_ptr = ffm->packet; ffm->packet_end = ffm->packet; ffm->first_packet = 1; return 0; } static int ffm_probe(AVProbeData *p) { if ( p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' && p->buf[3] == '1') return AVPROBE_SCORE_MAX + 1; return 0; } static int ffm_close(AVFormatContext *s) { int i; for (i = 0; i < s->nb_streams; i++) av_freep(&s->streams[i]->codec->rc_eq); return 0; } AVInputFormat ffm_demuxer = { "ffm", NULL_IF_CONFIG_SMALL("FFM (FFserver live feed) format"), sizeof(FFMContext), ffm_probe, ffm_read_header, ffm_read_packet, ffm_close, ffm_seek, };
123linslouis-android-video-cutter
jni/libavformat/ffmdec.c
C
asf20
16,491
/* * Musepack demuxer * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/get_bits.h" #include "avformat.h" #include "id3v2.h" #include "apetag.h" #define MPC_FRAMESIZE 1152 #define DELAY_FRAMES 32 static const int mpc_rate[4] = { 44100, 48000, 37800, 32000 }; typedef struct { int64_t pos; int size, skip; }MPCFrame; typedef struct { int ver; uint32_t curframe, lastframe; uint32_t fcount; MPCFrame *frames; int curbits; int frames_noted; } MPCContext; static int mpc_probe(AVProbeData *p) { const uint8_t *d = p->buf; if (ff_id3v2_match(d)) { d += ff_id3v2_tag_len(d); } if (d+3 < p->buf+p->buf_size) if (d[0] == 'M' && d[1] == 'P' && d[2] == '+' && (d[3] == 0x17 || d[3] == 0x7)) return AVPROBE_SCORE_MAX; return 0; } static int mpc_read_header(AVFormatContext *s, AVFormatParameters *ap) { MPCContext *c = s->priv_data; AVStream *st; int t, ret; int64_t pos = url_ftell(s->pb); t = get_le24(s->pb); if(t != MKTAG('M', 'P', '+', 0)){ uint8_t buf[ID3v2_HEADER_SIZE]; if (url_fseek(s->pb, pos, SEEK_SET) < 0) return -1; ret = get_buffer(s->pb, buf, ID3v2_HEADER_SIZE); if (ret != ID3v2_HEADER_SIZE || !ff_id3v2_match(buf)) { av_log(s, AV_LOG_ERROR, "Not a Musepack file\n"); return -1; } /* skip ID3 tags and try again */ t = ff_id3v2_tag_len(buf) - ID3v2_HEADER_SIZE; av_log(s, AV_LOG_DEBUG, "Skipping %d(%X) bytes of ID3 data\n", t, t); url_fskip(s->pb, t); if(get_le24(s->pb) != MKTAG('M', 'P', '+', 0)){ av_log(s, AV_LOG_ERROR, "Not a Musepack file\n"); return -1; } /* read ID3 tags */ if (url_fseek(s->pb, pos, SEEK_SET) < 0) return -1; ff_id3v2_read(s); get_le24(s->pb); } c->ver = get_byte(s->pb); if(c->ver != 0x07 && c->ver != 0x17){ av_log(s, AV_LOG_ERROR, "Can demux Musepack SV7, got version %02X\n", c->ver); return -1; } c->fcount = get_le32(s->pb); if((int64_t)c->fcount * sizeof(MPCFrame) >= UINT_MAX){ av_log(s, AV_LOG_ERROR, "Too many frames, seeking is not possible\n"); return -1; } c->frames = av_malloc(c->fcount * sizeof(MPCFrame)); c->curframe = 0; c->lastframe = -1; c->curbits = 8; c->frames_noted = 0; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_MUSEPACK7; st->codec->channels = 2; st->codec->bits_per_coded_sample = 16; st->codec->extradata_size = 16; st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE); get_buffer(s->pb, st->codec->extradata, 16); st->codec->sample_rate = mpc_rate[st->codec->extradata[2] & 3]; av_set_pts_info(st, 32, MPC_FRAMESIZE, st->codec->sample_rate); /* scan for seekpoints */ st->start_time = 0; st->duration = c->fcount; /* try to read APE tags */ if (!url_is_streamed(s->pb)) { int64_t pos = url_ftell(s->pb); ff_ape_parse_tag(s); url_fseek(s->pb, pos, SEEK_SET); } return 0; } static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt) { MPCContext *c = s->priv_data; int ret, size, size2, curbits, cur = c->curframe; int64_t tmp, pos; if (c->curframe >= c->fcount) return -1; if(c->curframe != c->lastframe + 1){ url_fseek(s->pb, c->frames[c->curframe].pos, SEEK_SET); c->curbits = c->frames[c->curframe].skip; } c->lastframe = c->curframe; c->curframe++; curbits = c->curbits; pos = url_ftell(s->pb); tmp = get_le32(s->pb); if(curbits <= 12){ size2 = (tmp >> (12 - curbits)) & 0xFFFFF; }else{ tmp = (tmp << 32) | get_le32(s->pb); size2 = (tmp >> (44 - curbits)) & 0xFFFFF; } curbits += 20; url_fseek(s->pb, pos, SEEK_SET); size = ((size2 + curbits + 31) & ~31) >> 3; if(cur == c->frames_noted){ c->frames[cur].pos = pos; c->frames[cur].size = size; c->frames[cur].skip = curbits - 20; av_add_index_entry(s->streams[0], cur, cur, size, 0, AVINDEX_KEYFRAME); c->frames_noted++; } c->curbits = (curbits + size2) & 0x1F; if (av_new_packet(pkt, size) < 0) return AVERROR(EIO); pkt->data[0] = curbits; pkt->data[1] = (c->curframe > c->fcount); pkt->data[2] = 0; pkt->data[3] = 0; pkt->stream_index = 0; pkt->pts = cur; ret = get_buffer(s->pb, pkt->data + 4, size); if(c->curbits) url_fseek(s->pb, -4, SEEK_CUR); if(ret < size){ av_free_packet(pkt); return AVERROR(EIO); } pkt->size = ret + 4; return 0; } static int mpc_read_close(AVFormatContext *s) { MPCContext *c = s->priv_data; av_freep(&c->frames); return 0; } /** * Seek to the given position * If position is unknown but is within the limits of file * then packets are skipped unless desired position is reached * * Also this function makes use of the fact that timestamp == frameno */ static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; MPCContext *c = s->priv_data; AVPacket pkt1, *pkt = &pkt1; int ret; int index = av_index_search_timestamp(st, timestamp - DELAY_FRAMES, flags); uint32_t lastframe; /* if found, seek there */ if (index >= 0){ c->curframe = st->index_entries[index].pos; return 0; } /* if timestamp is out of bounds, return error */ if(timestamp < 0 || timestamp >= c->fcount) return -1; timestamp -= DELAY_FRAMES; /* seek to the furthest known position and read packets until we reach desired position */ lastframe = c->curframe; if(c->frames_noted) c->curframe = c->frames_noted - 1; while(c->curframe < timestamp){ ret = av_read_frame(s, pkt); if (ret < 0){ c->curframe = lastframe; return -1; } av_free_packet(pkt); } return 0; } AVInputFormat mpc_demuxer = { "mpc", NULL_IF_CONFIG_SMALL("Musepack"), sizeof(MPCContext), mpc_probe, mpc_read_header, mpc_read_packet, mpc_read_close, mpc_read_seek, .extensions = "mpc", };
123linslouis-android-video-cutter
jni/libavformat/mpc.c
C
asf20
7,214
/* * R3D REDCODE demuxer * Copyright (c) 2008 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ //#define DEBUG #include "libavutil/intreadwrite.h" #include "avformat.h" typedef struct { unsigned video_offsets_count; unsigned *video_offsets; unsigned rdvo_offset; } R3DContext; typedef struct { unsigned size; uint32_t tag; uint64_t offset; } Atom; static int read_atom(AVFormatContext *s, Atom *atom) { atom->offset = url_ftell(s->pb); atom->size = get_be32(s->pb); if (atom->size < 8) return -1; atom->tag = get_le32(s->pb); dprintf(s, "atom %d %.4s offset %#llx\n", atom->size, (char*)&atom->tag, atom->offset); return atom->size; } static int r3d_read_red1(AVFormatContext *s) { AVStream *st = av_new_stream(s, 0); char filename[258]; int tmp, tmp2; if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_JPEG2000; tmp = get_byte(s->pb); // major version tmp2 = get_byte(s->pb); // minor version dprintf(s, "version %d.%d\n", tmp, tmp2); tmp = get_be16(s->pb); // unknown dprintf(s, "unknown1 %d\n", tmp); tmp = get_be32(s->pb); av_set_pts_info(st, 32, 1, tmp); tmp = get_be32(s->pb); // filenum dprintf(s, "filenum %d\n", tmp); url_fskip(s->pb, 32); // unknown st->codec->width = get_be32(s->pb); st->codec->height = get_be32(s->pb); tmp = get_be16(s->pb); // unknown dprintf(s, "unknown2 %d\n", tmp); st->codec->time_base.den = get_be16(s->pb); st->codec->time_base.num = get_be16(s->pb); tmp = get_byte(s->pb); // audio channels dprintf(s, "audio channels %d\n", tmp); if (tmp > 0) { AVStream *ast = av_new_stream(s, 1); if (!ast) return AVERROR(ENOMEM); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_id = CODEC_ID_PCM_S32BE; ast->codec->channels = tmp; av_set_pts_info(ast, 32, 1, st->time_base.den); } get_buffer(s->pb, filename, 257); filename[sizeof(filename)-1] = 0; av_metadata_set2(&st->metadata, "filename", filename, 0); dprintf(s, "filename %s\n", filename); dprintf(s, "resolution %dx%d\n", st->codec->width, st->codec->height); dprintf(s, "timescale %d\n", st->time_base.den); dprintf(s, "frame rate %d/%d\n", st->codec->time_base.num, st->codec->time_base.den); return 0; } static int r3d_read_rdvo(AVFormatContext *s, Atom *atom) { R3DContext *r3d = s->priv_data; AVStream *st = s->streams[0]; int i; r3d->video_offsets_count = (atom->size - 8) / 4; r3d->video_offsets = av_malloc(atom->size); if (!r3d->video_offsets) return AVERROR(ENOMEM); for (i = 0; i < r3d->video_offsets_count; i++) { r3d->video_offsets[i] = get_be32(s->pb); if (!r3d->video_offsets[i]) { r3d->video_offsets_count = i; break; } dprintf(s, "video offset %d: %#x\n", i, r3d->video_offsets[i]); } if (st->codec->time_base.den) st->duration = (uint64_t)r3d->video_offsets_count* st->time_base.den*st->codec->time_base.num/st->codec->time_base.den; dprintf(s, "duration %lld\n", st->duration); return 0; } static void r3d_read_reos(AVFormatContext *s) { R3DContext *r3d = s->priv_data; int tmp; r3d->rdvo_offset = get_be32(s->pb); get_be32(s->pb); // rdvs offset get_be32(s->pb); // rdao offset get_be32(s->pb); // rdas offset tmp = get_be32(s->pb); dprintf(s, "num video chunks %d\n", tmp); tmp = get_be32(s->pb); dprintf(s, "num audio chunks %d\n", tmp); url_fskip(s->pb, 6*4); } static int r3d_read_header(AVFormatContext *s, AVFormatParameters *ap) { R3DContext *r3d = s->priv_data; Atom atom; int ret; if (read_atom(s, &atom) < 0) { av_log(s, AV_LOG_ERROR, "error reading atom\n"); return -1; } if (atom.tag == MKTAG('R','E','D','1')) { if ((ret = r3d_read_red1(s)) < 0) { av_log(s, AV_LOG_ERROR, "error parsing 'red1' atom\n"); return ret; } } else { av_log(s, AV_LOG_ERROR, "could not find 'red1' atom\n"); return -1; } s->data_offset = url_ftell(s->pb); dprintf(s, "data offset %#llx\n", s->data_offset); if (url_is_streamed(s->pb)) return 0; // find REOB/REOF/REOS to load index url_fseek(s->pb, url_fsize(s->pb)-48-8, SEEK_SET); if (read_atom(s, &atom) < 0) av_log(s, AV_LOG_ERROR, "error reading end atom\n"); if (atom.tag != MKTAG('R','E','O','B') && atom.tag != MKTAG('R','E','O','F') && atom.tag != MKTAG('R','E','O','S')) goto out; r3d_read_reos(s); if (r3d->rdvo_offset) { url_fseek(s->pb, r3d->rdvo_offset, SEEK_SET); if (read_atom(s, &atom) < 0) av_log(s, AV_LOG_ERROR, "error reading 'rdvo' atom\n"); if (atom.tag == MKTAG('R','D','V','O')) { if (r3d_read_rdvo(s, &atom) < 0) av_log(s, AV_LOG_ERROR, "error parsing 'rdvo' atom\n"); } } out: url_fseek(s->pb, s->data_offset, SEEK_SET); return 0; } static int r3d_read_redv(AVFormatContext *s, AVPacket *pkt, Atom *atom) { AVStream *st = s->streams[0]; int tmp, tmp2; uint64_t pos = url_ftell(s->pb); unsigned dts; int ret; dts = get_be32(s->pb); tmp = get_be32(s->pb); dprintf(s, "frame num %d\n", tmp); tmp = get_byte(s->pb); // major version tmp2 = get_byte(s->pb); // minor version dprintf(s, "version %d.%d\n", tmp, tmp2); tmp = get_be16(s->pb); // unknown dprintf(s, "unknown %d\n", tmp); if (tmp > 4) { tmp = get_be16(s->pb); // unknown dprintf(s, "unknown %d\n", tmp); tmp = get_be16(s->pb); // unknown dprintf(s, "unknown %d\n", tmp); tmp = get_be32(s->pb); dprintf(s, "width %d\n", tmp); tmp = get_be32(s->pb); dprintf(s, "height %d\n", tmp); tmp = get_be32(s->pb); dprintf(s, "metadata len %d\n", tmp); } tmp = atom->size - 8 - (url_ftell(s->pb) - pos); if (tmp < 0) return -1; ret = av_get_packet(s->pb, pkt, tmp); if (ret < 0) { av_log(s, AV_LOG_ERROR, "error reading video packet\n"); return -1; } pkt->stream_index = 0; pkt->dts = dts; if (st->codec->time_base.den) pkt->duration = (uint64_t)st->time_base.den* st->codec->time_base.num/st->codec->time_base.den; dprintf(s, "pkt dts %lld duration %d\n", pkt->dts, pkt->duration); return 0; } static int r3d_read_reda(AVFormatContext *s, AVPacket *pkt, Atom *atom) { AVStream *st = s->streams[1]; int tmp, tmp2, samples, size; uint64_t pos = url_ftell(s->pb); unsigned dts; int ret; dts = get_be32(s->pb); st->codec->sample_rate = get_be32(s->pb); samples = get_be32(s->pb); tmp = get_be32(s->pb); dprintf(s, "packet num %d\n", tmp); tmp = get_be16(s->pb); // unkown dprintf(s, "unknown %d\n", tmp); tmp = get_byte(s->pb); // major version tmp2 = get_byte(s->pb); // minor version dprintf(s, "version %d.%d\n", tmp, tmp2); tmp = get_be32(s->pb); // unknown dprintf(s, "unknown %d\n", tmp); size = atom->size - 8 - (url_ftell(s->pb) - pos); if (size < 0) return -1; ret = av_get_packet(s->pb, pkt, size); if (ret < 0) { av_log(s, AV_LOG_ERROR, "error reading audio packet\n"); return ret; } pkt->stream_index = 1; pkt->dts = dts; pkt->duration = av_rescale(samples, st->time_base.den, st->codec->sample_rate); dprintf(s, "pkt dts %lld duration %d samples %d sample rate %d\n", pkt->dts, pkt->duration, samples, st->codec->sample_rate); return 0; } static int r3d_read_packet(AVFormatContext *s, AVPacket *pkt) { Atom atom; int err = 0; while (!err) { if (read_atom(s, &atom) < 0) { err = -1; break; } switch (atom.tag) { case MKTAG('R','E','D','V'): if (s->streams[0]->discard == AVDISCARD_ALL) goto skip; if (!(err = r3d_read_redv(s, pkt, &atom))) return 0; break; case MKTAG('R','E','D','A'): if (s->nb_streams < 2) return -1; if (s->streams[1]->discard == AVDISCARD_ALL) goto skip; if (!(err = r3d_read_reda(s, pkt, &atom))) return 0; break; default: skip: url_fskip(s->pb, atom.size-8); } } return err; } static int r3d_probe(AVProbeData *p) { if (AV_RL32(p->buf + 4) == MKTAG('R','E','D','1')) return AVPROBE_SCORE_MAX; return 0; } static int r3d_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags) { AVStream *st = s->streams[0]; // video stream R3DContext *r3d = s->priv_data; int frame_num; if (!st->codec->time_base.num || !st->time_base.den) return -1; frame_num = sample_time*st->codec->time_base.den/ ((int64_t)st->codec->time_base.num*st->time_base.den); dprintf(s, "seek frame num %d timestamp %lld\n", frame_num, sample_time); if (frame_num < r3d->video_offsets_count) { url_fseek(s->pb, r3d->video_offsets_count, SEEK_SET); } else { av_log(s, AV_LOG_ERROR, "could not seek to frame %d\n", frame_num); return -1; } return 0; } static int r3d_close(AVFormatContext *s) { R3DContext *r3d = s->priv_data; av_freep(&r3d->video_offsets); return 0; } AVInputFormat r3d_demuxer = { "r3d", NULL_IF_CONFIG_SMALL("REDCODE R3D format"), sizeof(R3DContext), r3d_probe, r3d_read_header, r3d_read_packet, r3d_close, r3d_seek, };
123linslouis-android-video-cutter
jni/libavformat/r3d.c
C
asf20
10,705
NAME = avformat FFLIBS = avcodec avutil HEADERS = avformat.h avio.h OBJS = allformats.o \ cutils.o \ metadata.o \ metadata_compat.o \ options.o \ os_support.o \ sdp.o \ seek.o \ utils.o \ # muxers/demuxers OBJS-$(CONFIG_AAC_DEMUXER) += raw.o id3v1.o id3v2.o OBJS-$(CONFIG_AC3_DEMUXER) += raw.o OBJS-$(CONFIG_AC3_MUXER) += raw.o OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o OBJS-$(CONFIG_AEA_DEMUXER) += aea.o raw.o OBJS-$(CONFIG_AIFF_DEMUXER) += aiffdec.o riff.o raw.o OBJS-$(CONFIG_AIFF_MUXER) += aiffenc.o riff.o OBJS-$(CONFIG_AMR_DEMUXER) += amr.o OBJS-$(CONFIG_AMR_MUXER) += amr.o OBJS-$(CONFIG_ANM_DEMUXER) += anm.o OBJS-$(CONFIG_APC_DEMUXER) += apc.o OBJS-$(CONFIG_APE_DEMUXER) += ape.o apetag.o OBJS-$(CONFIG_ASF_DEMUXER) += asfdec.o asf.o asfcrypt.o \ riff.o avlanguage.o OBJS-$(CONFIG_ASF_MUXER) += asfenc.o asf.o riff.o OBJS-$(CONFIG_ASS_DEMUXER) += assdec.o OBJS-$(CONFIG_ASS_MUXER) += assenc.o OBJS-$(CONFIG_AU_DEMUXER) += au.o raw.o OBJS-$(CONFIG_AU_MUXER) += au.o OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o riff.o avi.o OBJS-$(CONFIG_AVI_MUXER) += avienc.o riff.o avi.o OBJS-$(CONFIG_AVISYNTH) += avisynth.o OBJS-$(CONFIG_AVM2_MUXER) += swfenc.o OBJS-$(CONFIG_AVS_DEMUXER) += avs.o vocdec.o voc.o OBJS-$(CONFIG_BETHSOFTVID_DEMUXER) += bethsoftvid.o OBJS-$(CONFIG_BFI_DEMUXER) += bfi.o OBJS-$(CONFIG_BINK_DEMUXER) += bink.o OBJS-$(CONFIG_C93_DEMUXER) += c93.o vocdec.o voc.o OBJS-$(CONFIG_CAF_DEMUXER) += cafdec.o caf.o mov.o riff.o isom.o OBJS-$(CONFIG_CAVSVIDEO_DEMUXER) += raw.o OBJS-$(CONFIG_CDG_DEMUXER) += cdg.o OBJS-$(CONFIG_CRC_MUXER) += crcenc.o OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o OBJS-$(CONFIG_DAUD_MUXER) += daud.o OBJS-$(CONFIG_DIRAC_DEMUXER) += raw.o OBJS-$(CONFIG_DIRAC_MUXER) += raw.o OBJS-$(CONFIG_DNXHD_DEMUXER) += raw.o OBJS-$(CONFIG_DNXHD_MUXER) += raw.o OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o OBJS-$(CONFIG_DTS_DEMUXER) += raw.o id3v2.o OBJS-$(CONFIG_DTS_MUXER) += raw.o OBJS-$(CONFIG_DV_DEMUXER) += dv.o OBJS-$(CONFIG_DV_MUXER) += dvenc.o OBJS-$(CONFIG_DXA_DEMUXER) += dxa.o riff.o OBJS-$(CONFIG_EA_CDATA_DEMUXER) += eacdata.o OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o OBJS-$(CONFIG_EAC3_DEMUXER) += raw.o id3v2.o OBJS-$(CONFIG_EAC3_MUXER) += raw.o OBJS-$(CONFIG_FFM_DEMUXER) += ffmdec.o OBJS-$(CONFIG_FFM_MUXER) += ffmenc.o OBJS-$(CONFIG_FILMSTRIP_DEMUXER) += filmstripdec.o OBJS-$(CONFIG_FILMSTRIP_MUXER) += filmstripenc.o OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o raw.o id3v1.o \ id3v2.o oggparsevorbis.o \ vorbiscomment.o OBJS-$(CONFIG_FLAC_MUXER) += flacenc.o flacenc_header.o \ vorbiscomment.o OBJS-$(CONFIG_FLIC_DEMUXER) += flic.o OBJS-$(CONFIG_FLV_DEMUXER) += flvdec.o OBJS-$(CONFIG_FLV_MUXER) += flvenc.o avc.o OBJS-$(CONFIG_FOURXM_DEMUXER) += 4xm.o OBJS-$(CONFIG_FRAMECRC_MUXER) += framecrcenc.o OBJS-$(CONFIG_GIF_MUXER) += gif.o OBJS-$(CONFIG_GSM_DEMUXER) += raw.o id3v2.o OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o audiointerleave.o OBJS-$(CONFIG_H261_DEMUXER) += raw.o OBJS-$(CONFIG_H261_MUXER) += raw.o OBJS-$(CONFIG_H263_DEMUXER) += raw.o OBJS-$(CONFIG_H263_MUXER) += raw.o OBJS-$(CONFIG_H264_DEMUXER) += raw.o OBJS-$(CONFIG_H264_MUXER) += raw.o OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o OBJS-$(CONFIG_IFF_DEMUXER) += iff.o OBJS-$(CONFIG_IMAGE2_DEMUXER) += img2.o OBJS-$(CONFIG_IMAGE2_MUXER) += img2.o OBJS-$(CONFIG_IMAGE2PIPE_DEMUXER) += img2.o OBJS-$(CONFIG_IMAGE2PIPE_MUXER) += img2.o OBJS-$(CONFIG_INGENIENT_DEMUXER) += raw.o OBJS-$(CONFIG_IPMOVIE_DEMUXER) += ipmovie.o OBJS-$(CONFIG_ISS_DEMUXER) += iss.o OBJS-$(CONFIG_IV8_DEMUXER) += iv8.o OBJS-$(CONFIG_LMLM4_DEMUXER) += lmlm4.o OBJS-$(CONFIG_M4V_DEMUXER) += raw.o OBJS-$(CONFIG_M4V_MUXER) += raw.o OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroskadec.o matroska.o \ riff.o isom.o rmdec.o rm.o OBJS-$(CONFIG_MATROSKA_MUXER) += matroskaenc.o matroska.o \ riff.o isom.o avc.o \ flacenc_header.o OBJS-$(CONFIG_MJPEG_DEMUXER) += raw.o OBJS-$(CONFIG_MJPEG_MUXER) += raw.o OBJS-$(CONFIG_MLP_DEMUXER) += raw.o id3v2.o OBJS-$(CONFIG_MLP_MUXER) += raw.o OBJS-$(CONFIG_MM_DEMUXER) += mm.o OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o raw.o OBJS-$(CONFIG_MMF_MUXER) += mmf.o riff.o OBJS-$(CONFIG_MOV_DEMUXER) += mov.o riff.o isom.o OBJS-$(CONFIG_MOV_MUXER) += movenc.o riff.o isom.o avc.o movenchint.o OBJS-$(CONFIG_MP2_MUXER) += mp3.o id3v1.o OBJS-$(CONFIG_MP3_DEMUXER) += mp3.o id3v1.o id3v2.o OBJS-$(CONFIG_MP3_MUXER) += mp3.o id3v1.o id3v2.o OBJS-$(CONFIG_MPC_DEMUXER) += mpc.o id3v1.o id3v2.o apetag.o OBJS-$(CONFIG_MPC8_DEMUXER) += mpc8.o OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpegenc.o OBJS-$(CONFIG_MPEG1VCD_MUXER) += mpegenc.o OBJS-$(CONFIG_MPEG2DVD_MUXER) += mpegenc.o OBJS-$(CONFIG_MPEG2VOB_MUXER) += mpegenc.o OBJS-$(CONFIG_MPEG2SVCD_MUXER) += mpegenc.o OBJS-$(CONFIG_MPEG1VIDEO_MUXER) += raw.o OBJS-$(CONFIG_MPEG2VIDEO_MUXER) += raw.o OBJS-$(CONFIG_MPEGPS_DEMUXER) += mpeg.o OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpegts.o OBJS-$(CONFIG_MPEGTS_MUXER) += mpegtsenc.o adtsenc.o OBJS-$(CONFIG_MPEGVIDEO_DEMUXER) += raw.o OBJS-$(CONFIG_MPJPEG_MUXER) += mpjpeg.o OBJS-$(CONFIG_MSNWC_TCP_DEMUXER) += msnwc_tcp.o OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o OBJS-$(CONFIG_MVI_DEMUXER) += mvi.o OBJS-$(CONFIG_MXF_DEMUXER) += mxfdec.o mxf.o OBJS-$(CONFIG_MXF_MUXER) += mxfenc.o mxf.o audiointerleave.o OBJS-$(CONFIG_NC_DEMUXER) += ncdec.o OBJS-$(CONFIG_NSV_DEMUXER) += nsvdec.o OBJS-$(CONFIG_NULL_MUXER) += raw.o OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o nut.o riff.o OBJS-$(CONFIG_NUT_MUXER) += nutenc.o nut.o riff.o OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o riff.o OBJS-$(CONFIG_OGG_DEMUXER) += oggdec.o \ oggparsedirac.o \ oggparseflac.o \ oggparseogm.o \ oggparseskeleton.o \ oggparsespeex.o \ oggparsetheora.o \ oggparsevorbis.o \ riff.o \ vorbiscomment.o OBJS-$(CONFIG_OGG_MUXER) += oggenc.o \ vorbiscomment.o OBJS-$(CONFIG_OMA_DEMUXER) += oma.o raw.o OBJS-$(CONFIG_PCM_ALAW_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_ALAW_MUXER) += raw.o OBJS-$(CONFIG_PCM_F32BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_F32BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_F32LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_F32LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_F64BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_F64BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_F64LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_F64LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_MULAW_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_MULAW_MUXER) += raw.o OBJS-$(CONFIG_PCM_S16BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S16BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_S16LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S16LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_S24BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S24BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_S24LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S24LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_S32BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S32BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_S32LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S32LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_S8_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_S8_MUXER) += raw.o OBJS-$(CONFIG_PCM_U16BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U16BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_U16LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U16LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_U24BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U24BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_U24LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U24LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_U32BE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U32BE_MUXER) += raw.o OBJS-$(CONFIG_PCM_U32LE_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U32LE_MUXER) += raw.o OBJS-$(CONFIG_PCM_U8_DEMUXER) += raw.o OBJS-$(CONFIG_PCM_U8_MUXER) += raw.o OBJS-$(CONFIG_PVA_DEMUXER) += pva.o OBJS-$(CONFIG_QCP_DEMUXER) += qcp.o OBJS-$(CONFIG_R3D_DEMUXER) += r3d.o OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += raw.o OBJS-$(CONFIG_RAWVIDEO_MUXER) += raw.o OBJS-$(CONFIG_RL2_DEMUXER) += rl2.o OBJS-$(CONFIG_RM_DEMUXER) += rmdec.o rm.o OBJS-$(CONFIG_RM_MUXER) += rmenc.o rm.o OBJS-$(CONFIG_ROQ_DEMUXER) += idroq.o OBJS-$(CONFIG_ROQ_MUXER) += raw.o OBJS-$(CONFIG_RPL_DEMUXER) += rpl.o OBJS-$(CONFIG_RTP_MUXER) += rtp.o \ rtpenc_aac.o \ rtpenc_amr.o \ rtpenc_h263.o \ rtpenc_mpv.o \ rtpenc.o \ rtpenc_h264.o \ avc.o OBJS-$(CONFIG_RTSP_DEMUXER) += rtsp.o httpauth.o OBJS-$(CONFIG_RTSP_MUXER) += rtsp.o rtspenc.o httpauth.o OBJS-$(CONFIG_SDP_DEMUXER) += rtsp.o \ rdt.o \ rtp.o \ rtpdec.o \ rtpdec_amr.o \ rtpdec_asf.o \ rtpdec_h263.o \ rtpdec_h264.o \ rtpdec_xiph.o OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o OBJS-$(CONFIG_SHORTEN_DEMUXER) += raw.o id3v2.o OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o OBJS-$(CONFIG_SOL_DEMUXER) += sol.o raw.o OBJS-$(CONFIG_SOX_DEMUXER) += soxdec.o raw.o OBJS-$(CONFIG_SOX_MUXER) += soxenc.o OBJS-$(CONFIG_SPDIF_MUXER) += spdif.o OBJS-$(CONFIG_STR_DEMUXER) += psxstr.o OBJS-$(CONFIG_SWF_DEMUXER) += swfdec.o OBJS-$(CONFIG_SWF_MUXER) += swfenc.o OBJS-$(CONFIG_THP_DEMUXER) += thp.o OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o OBJS-$(CONFIG_TMV_DEMUXER) += tmv.o OBJS-$(CONFIG_TRUEHD_DEMUXER) += raw.o id3v2.o OBJS-$(CONFIG_TRUEHD_MUXER) += raw.o OBJS-$(CONFIG_TTA_DEMUXER) += tta.o id3v1.o id3v2.o OBJS-$(CONFIG_TXD_DEMUXER) += txd.o OBJS-$(CONFIG_VC1_DEMUXER) += raw.o OBJS-$(CONFIG_VC1T_DEMUXER) += vc1test.o OBJS-$(CONFIG_VC1T_MUXER) += vc1testenc.o OBJS-$(CONFIG_VMD_DEMUXER) += sierravmd.o OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc.o OBJS-$(CONFIG_VOC_MUXER) += vocenc.o voc.o OBJS-$(CONFIG_VQF_DEMUXER) += vqf.o OBJS-$(CONFIG_W64_DEMUXER) += wav.o riff.o raw.o OBJS-$(CONFIG_WAV_DEMUXER) += wav.o riff.o raw.o OBJS-$(CONFIG_WAV_MUXER) += wav.o riff.o OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o OBJS-$(CONFIG_WEBM_MUXER) += matroskaenc.o matroska.o \ riff.o isom.o avc.o \ flacenc_header.o OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o OBJS-$(CONFIG_WV_DEMUXER) += wv.o apetag.o id3v1.o OBJS-$(CONFIG_XA_DEMUXER) += xa.o OBJS-$(CONFIG_YOP_DEMUXER) += yop.o OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpeg.o OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o # external libraries OBJS-$(CONFIG_LIBNUT_DEMUXER) += libnut.o riff.o OBJS-$(CONFIG_LIBNUT_MUXER) += libnut.o riff.o # protocols I/O OBJS+= avio.o aviobuf.o OBJS-$(CONFIG_FILE_PROTOCOL) += file.o OBJS-$(CONFIG_GOPHER_PROTOCOL) += gopher.o OBJS-$(CONFIG_HTTP_PROTOCOL) += http.o httpauth.o OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o # external or internal rtmp RTMP-OBJS-$(CONFIG_LIBRTMP) = librtmp.o RTMP-OBJS-$(!CONFIG_LIBRTMP) = rtmpproto.o rtmppkt.o OBJS-$(CONFIG_RTMP_PROTOCOL) += $(RTMP-OBJS-yes) OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o OBJS-$(CONFIG_UDP_PROTOCOL) += udp.o OBJS-$(CONFIG_CONCAT_PROTOCOL) += concat.o # libavdevice dependencies OBJS-$(CONFIG_JACK_INDEV) += timefilter.o EXAMPLES = output TESTPROGS = timefilter $(SUBDIR)output-example$(EXESUF): ELIBS = -lswscale
123linslouis-android-video-cutter
jni/libavformat/Makefile
Makefile
asf20
15,213
/* * MXF * Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mxf.h" /** * SMPTE RP224 http://www.smpte-ra.org/mdd/index.html */ const MXFCodecUL ff_mxf_data_definition_uls[] = { { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x01,0x00,0x00,0x00 }, 13, AVMEDIA_TYPE_VIDEO }, { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x02,0x00,0x00,0x00 }, 13, AVMEDIA_TYPE_AUDIO }, { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, AVMEDIA_TYPE_DATA }, }; const MXFCodecUL ff_mxf_codec_uls[] = { /* PictureEssenceCoding */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x11,0x00 }, 14, CODEC_ID_MPEG2VIDEO }, /* MP@ML Long GoP */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x01 }, 14, CODEC_ID_MPEG2VIDEO }, /* D-10 50Mbps PAL */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x03,0x00 }, 14, CODEC_ID_MPEG2VIDEO }, /* MP@HL Long GoP */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x02,0x00 }, 14, CODEC_ID_MPEG2VIDEO }, /* 422P@HL I-Frame */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x20,0x02,0x03 }, 14, CODEC_ID_MPEG4 }, /* XDCAM proxy_pal030926.mxf */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x01,0x02,0x00 }, 13, CODEC_ID_DVVIDEO }, /* DV25 IEC PAL */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x01,0x02,0x02,0x03,0x01,0x01,0x00 }, 14, CODEC_ID_JPEG2000 }, /* JPEG2000 Codestream */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x01,0x7F,0x00,0x00,0x00 }, 13, CODEC_ID_RAWVIDEO }, /* Uncompressed */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x03,0x02,0x00,0x00 }, 14, CODEC_ID_DNXHD }, /* SMPTE VC-3/DNxHD */ /* SoundEssenceCompression */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 }, 13, CODEC_ID_PCM_S16LE }, /* Uncompressed */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x7F,0x00,0x00,0x00 }, 13, CODEC_ID_PCM_S16LE }, { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x02,0x02,0x01,0x7E,0x00,0x00,0x00 }, 13, CODEC_ID_PCM_S16BE }, /* From Omneon MXF file */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x04,0x04,0x02,0x02,0x02,0x03,0x01,0x01,0x00 }, 15, CODEC_ID_PCM_ALAW }, /* XDCAM Proxy C0023S01.mxf */ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x01,0x00 }, 15, CODEC_ID_AC3 }, { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x05,0x00 }, 15, CODEC_ID_MP2 }, /* MP2 or MP3 */ //{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x1C,0x00 }, 15, CODEC_ID_DOLBY_E }, /* Dolby-E */ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, 0, CODEC_ID_NONE }, };
123linslouis-android-video-cutter
jni/libavformat/mxf.c
C
asf20
3,787
/* * audio interleaving prototypes and declarations * * Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_AUDIOINTERLEAVE_H #define AVFORMAT_AUDIOINTERLEAVE_H #include "libavutil/fifo.h" #include "avformat.h" typedef struct { AVFifoBuffer *fifo; unsigned fifo_size; ///< size of currently allocated FIFO uint64_t dts; ///< current dts int sample_size; ///< size of one sample all channels included const int *samples_per_frame; ///< must be 0-terminated const int *samples; ///< current samples per frame, pointer to samples_per_frame AVRational time_base; ///< time base of output audio packets } AudioInterleaveContext; int ff_audio_interleave_init(AVFormatContext *s, const int *samples_per_frame, AVRational time_base); void ff_audio_interleave_close(AVFormatContext *s); int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt); /** * Rechunk audio PCM packets per AudioInterleaveContext->samples_per_frame * and interleave them correctly. * The first element of AVStream->priv_data must be AudioInterleaveContext * when using this function. * * @param get_packet function will output a packet when streams are correctly interleaved. * @param compare_ts function will compare AVPackets and decide interleaving order. */ int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush, int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int), int (*compare_ts)(AVFormatContext *, AVPacket *, AVPacket *)); #endif /* AVFORMAT_AUDIOINTERLEAVE_H */
123linslouis-android-video-cutter
jni/libavformat/audiointerleave.h
C
asf20
2,472
/* * Buffered file io for ffmpeg system * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/avstring.h" #include "avformat.h" #include <fcntl.h> #if HAVE_SETMODE #include <io.h> #endif #include <unistd.h> #include <sys/stat.h> #include <sys/time.h> #include <stdlib.h> #include "os_support.h" /* standard file protocol */ static int file_open(URLContext *h, const char *filename, int flags) { int access; int fd; av_strstart(filename, "file:", &filename); if (flags & URL_RDWR) { access = O_CREAT | O_TRUNC | O_RDWR; } else if (flags & URL_WRONLY) { access = O_CREAT | O_TRUNC | O_WRONLY; } else { access = O_RDONLY; } #ifdef O_BINARY access |= O_BINARY; #endif fd = open(filename, access, 0666); if (fd == -1) return AVERROR(errno); h->priv_data = (void *) (intptr_t) fd; return 0; } static int file_read(URLContext *h, unsigned char *buf, int size) { int fd = (intptr_t) h->priv_data; return read(fd, buf, size); } static int file_write(URLContext *h, unsigned char *buf, int size) { int fd = (intptr_t) h->priv_data; return write(fd, buf, size); } /* XXX: use llseek */ static int64_t file_seek(URLContext *h, int64_t pos, int whence) { int fd = (intptr_t) h->priv_data; if (whence == AVSEEK_SIZE) { struct stat st; int ret = fstat(fd, &st); return ret < 0 ? AVERROR(errno) : st.st_size; } return lseek(fd, pos, whence); } static int file_close(URLContext *h) { int fd = (intptr_t) h->priv_data; return close(fd); } static int file_get_handle(URLContext *h) { return (intptr_t) h->priv_data; } URLProtocol file_protocol = { "file", file_open, file_read, file_write, file_seek, file_close, .url_get_file_handle = file_get_handle, }; /* pipe protocol */ static int pipe_open(URLContext *h, const char *filename, int flags) { int fd; char *final; av_strstart(filename, "pipe:", &filename); fd = strtol(filename, &final, 10); if((filename == final) || *final ) {/* No digits found, or something like 10ab */ if (flags & URL_WRONLY) { fd = 1; } else { fd = 0; } } #if HAVE_SETMODE setmode(fd, O_BINARY); #endif h->priv_data = (void *) (intptr_t) fd; h->is_streamed = 1; return 0; } URLProtocol pipe_protocol = { "pipe", pipe_open, file_read, file_write, .url_get_file_handle = file_get_handle, };
123linslouis-android-video-cutter
jni/libavformat/file.c
C
asf20
3,265
/* * NUT (de)muxing via libnut * copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * NUT demuxing and muxing via libnut. * @author Oded Shimon <ods15@ods15.dyndns.org> */ #include "avformat.h" #include "riff.h" #include <libnut.h> #define ID_STRING "nut/multimedia container" #define ID_LENGTH (strlen(ID_STRING) + 1) typedef struct { nut_context_tt * nut; nut_stream_header_tt * s; } NUTContext; static const AVCodecTag nut_tags[] = { { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') }, { CODEC_ID_MP3, MKTAG('m', 'p', '3', ' ') }, { CODEC_ID_VORBIS, MKTAG('v', 'r', 'b', 's') }, { 0, 0 }, }; #if CONFIG_LIBNUT_MUXER static int av_write(void * h, size_t len, const uint8_t * buf) { ByteIOContext * bc = h; put_buffer(bc, buf, len); //put_flush_packet(bc); return len; } static int nut_write_header(AVFormatContext * avf) { NUTContext * priv = avf->priv_data; ByteIOContext * bc = avf->pb; nut_muxer_opts_tt mopts = { .output = { .priv = bc, .write = av_write, }, .alloc = { av_malloc, av_realloc, av_free }, .write_index = 1, .realtime_stream = 0, .max_distance = 32768, .fti = NULL, }; nut_stream_header_tt * s; int i; priv->s = s = av_mallocz((avf->nb_streams + 1) * sizeof*s); for (i = 0; i < avf->nb_streams; i++) { AVCodecContext * codec = avf->streams[i]->codec; int j; int fourcc = 0; int num, denom, ssize; s[i].type = codec->codec_type == AVMEDIA_TYPE_VIDEO ? NUT_VIDEO_CLASS : NUT_AUDIO_CLASS; if (codec->codec_tag) fourcc = codec->codec_tag; else fourcc = ff_codec_get_tag(nut_tags, codec->codec_id); if (!fourcc) { if (codec->codec_type == AVMEDIA_TYPE_VIDEO) fourcc = ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id); if (codec->codec_type == AVMEDIA_TYPE_AUDIO) fourcc = ff_codec_get_tag(ff_codec_wav_tags, codec->codec_id); } s[i].fourcc_len = 4; s[i].fourcc = av_malloc(s[i].fourcc_len); for (j = 0; j < s[i].fourcc_len; j++) s[i].fourcc[j] = (fourcc >> (j*8)) & 0xFF; ff_parse_specific_params(codec, &num, &ssize, &denom); av_set_pts_info(avf->streams[i], 60, denom, num); s[i].time_base.num = denom; s[i].time_base.den = num; s[i].fixed_fps = 0; s[i].decode_delay = codec->has_b_frames; s[i].codec_specific_len = codec->extradata_size; s[i].codec_specific = codec->extradata; if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { s[i].width = codec->width; s[i].height = codec->height; s[i].sample_width = 0; s[i].sample_height = 0; s[i].colorspace_type = 0; } else { s[i].samplerate_num = codec->sample_rate; s[i].samplerate_denom = 1; s[i].channel_count = codec->channels; } } s[avf->nb_streams].type = -1; priv->nut = nut_muxer_init(&mopts, s, NULL); return 0; } static int nut_write_packet(AVFormatContext * avf, AVPacket * pkt) { NUTContext * priv = avf->priv_data; nut_packet_tt p; p.len = pkt->size; p.stream = pkt->stream_index; p.pts = pkt->pts; p.flags = pkt->flags & AV_PKT_FLAG_KEY ? NUT_FLAG_KEY : 0; p.next_pts = 0; nut_write_frame_reorder(priv->nut, &p, pkt->data); return 0; } static int nut_write_trailer(AVFormatContext * avf) { ByteIOContext * bc = avf->pb; NUTContext * priv = avf->priv_data; int i; nut_muxer_uninit_reorder(priv->nut); put_flush_packet(bc); for(i = 0; priv->s[i].type != -1; i++ ) av_freep(&priv->s[i].fourcc); av_freep(&priv->s); return 0; } AVOutputFormat libnut_muxer = { "libnut", "nut format", "video/x-nut", "nut", sizeof(NUTContext), CODEC_ID_VORBIS, CODEC_ID_MPEG4, nut_write_header, nut_write_packet, nut_write_trailer, .flags = AVFMT_GLOBALHEADER, }; #endif /* CONFIG_LIBNUT_MUXER */ static int nut_probe(AVProbeData *p) { if (!memcmp(p->buf, ID_STRING, ID_LENGTH)) return AVPROBE_SCORE_MAX; return 0; } static size_t av_read(void * h, size_t len, uint8_t * buf) { ByteIOContext * bc = h; return get_buffer(bc, buf, len); } static off_t av_seek(void * h, long long pos, int whence) { ByteIOContext * bc = h; if (whence == SEEK_END) { pos = url_fsize(bc) + pos; whence = SEEK_SET; } return url_fseek(bc, pos, whence); } static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) { NUTContext * priv = avf->priv_data; ByteIOContext * bc = avf->pb; nut_demuxer_opts_tt dopts = { .input = { .priv = bc, .seek = av_seek, .read = av_read, .eof = NULL, .file_pos = 0, }, .alloc = { av_malloc, av_realloc, av_free }, .read_index = 1, .cache_syncpoints = 1, }; nut_context_tt * nut = priv->nut = nut_demuxer_init(&dopts); nut_stream_header_tt * s; int ret, i; if ((ret = nut_read_headers(nut, &s, NULL))) { av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret)); nut_demuxer_uninit(nut); return -1; } priv->s = s; for (i = 0; s[i].type != -1 && i < 2; i++) { AVStream * st = av_new_stream(avf, i); int j; for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8); st->codec->has_b_frames = s[i].decode_delay; st->codec->extradata_size = s[i].codec_specific_len; if (st->codec->extradata_size) { st->codec->extradata = av_mallocz(st->codec->extradata_size); memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size); } av_set_pts_info(avf->streams[i], 60, s[i].time_base.num, s[i].time_base.den); st->start_time = 0; st->duration = s[i].max_pts; st->codec->codec_id = ff_codec_get_id(nut_tags, st->codec->codec_tag); switch(s[i].type) { case NUT_AUDIO_CLASS: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, st->codec->codec_tag); st->codec->channels = s[i].channel_count; st->codec->sample_rate = s[i].samplerate_num / s[i].samplerate_denom; break; case NUT_VIDEO_CLASS: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, st->codec->codec_tag); st->codec->width = s[i].width; st->codec->height = s[i].height; st->sample_aspect_ratio.num = s[i].sample_width; st->sample_aspect_ratio.den = s[i].sample_height; break; } if (st->codec->codec_id == CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, "Unknown codec?!\n"); } return 0; } static int nut_read_packet(AVFormatContext * avf, AVPacket * pkt) { NUTContext * priv = avf->priv_data; nut_packet_tt pd; int ret; ret = nut_read_next_packet(priv->nut, &pd); if (ret || av_new_packet(pkt, pd.len) < 0) { if (ret != NUT_ERR_EOF) av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret)); return -1; } if (pd.flags & NUT_FLAG_KEY) pkt->flags |= AV_PKT_FLAG_KEY; pkt->pts = pd.pts; pkt->stream_index = pd.stream; pkt->pos = url_ftell(avf->pb); ret = nut_read_frame(priv->nut, &pd.len, pkt->data); return ret; } static int nut_read_seek(AVFormatContext * avf, int stream_index, int64_t target_ts, int flags) { NUTContext * priv = avf->priv_data; int active_streams[] = { stream_index, -1 }; double time_pos = target_ts * priv->s[stream_index].time_base.num / (double)priv->s[stream_index].time_base.den; if (nut_seek(priv->nut, time_pos, 2*!(flags & AVSEEK_FLAG_BACKWARD), active_streams)) return -1; return 0; } static int nut_read_close(AVFormatContext *s) { NUTContext * priv = s->priv_data; nut_demuxer_uninit(priv->nut); return 0; } AVInputFormat libnut_demuxer = { "libnut", NULL_IF_CONFIG_SMALL("NUT format"), sizeof(NUTContext), nut_probe, nut_read_header, nut_read_packet, nut_read_close, nut_read_seek, .extensions = "nut", };
123linslouis-android-video-cutter
jni/libavformat/libnut.c
C
asf20
9,284
/* * Copyright (c) 2009 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" static int probe(AVProbeData *p) { // the single file i have starts with that, i dont know if others do too if( p->buf[0] == 1 && p->buf[1] == 1 && p->buf[2] == 3 && p->buf[3] == 0xB8 && p->buf[4] == 0x80 && p->buf[5] == 0x60 ) return AVPROBE_SCORE_MAX-2; return 0; } static int read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_MPEG4; st->need_parsing = AVSTREAM_PARSE_FULL; av_set_pts_info(st, 64, 1, 90000); return 0; } static int read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, size, pts, type; retry: type= get_be16(s->pb); // 257 or 258 size= get_be16(s->pb); get_be16(s->pb); //some flags, 0x80 indicates end of frame get_be16(s->pb); //packet number pts=get_be32(s->pb); get_be32(s->pb); //6A 13 E3 88 size -= 12; if(size<1) return -1; if(type==258){ url_fskip(s->pb, size); goto retry; } ret= av_get_packet(s->pb, pkt, size); pkt->pts= pts; pkt->pos-=16; pkt->stream_index = 0; return ret; } AVInputFormat iv8_demuxer = { "iv8", NULL_IF_CONFIG_SMALL("A format generated by IndigoVision 8000 video server"), 0, probe, read_header, read_packet, .flags= AVFMT_GENERIC_INDEX, .value = CODEC_ID_MPEG4, };
123linslouis-android-video-cutter
jni/libavformat/iv8.c
C
asf20
2,346
/* * RL2 Format Demuxer * Copyright (c) 2008 Sascha Sommer (saschasommer@freenet.de) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * RL2 file demuxer * @file * @author Sascha Sommer (saschasommer@freenet.de) * For more information regarding the RL2 file format, visit: * http://wiki.multimedia.cx/index.php?title=RL2 * * extradata: * 2 byte le initial drawing offset within 320x200 viewport * 4 byte le number of used colors * 256 * 3 bytes rgb palette * optional background_frame */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define EXTRADATA1_SIZE (6 + 256 * 3) ///< video base, clr, palette #define FORM_TAG MKBETAG('F', 'O', 'R', 'M') #define RLV2_TAG MKBETAG('R', 'L', 'V', '2') #define RLV3_TAG MKBETAG('R', 'L', 'V', '3') typedef struct Rl2DemuxContext { unsigned int index_pos[2]; ///< indexes in the sample tables } Rl2DemuxContext; /** * check if the file is in rl2 format * @param p probe buffer * @return 0 when the probe buffer does not contain rl2 data, > 0 otherwise */ static int rl2_probe(AVProbeData *p) { if(AV_RB32(&p->buf[0]) != FORM_TAG) return 0; if(AV_RB32(&p->buf[8]) != RLV2_TAG && AV_RB32(&p->buf[8]) != RLV3_TAG) return 0; return AVPROBE_SCORE_MAX; } /** * read rl2 header data and setup the avstreams * @param s demuxer context * @param ap format parameters * @return 0 on success, AVERROR otherwise */ static av_cold int rl2_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; AVStream *st; unsigned int frame_count; unsigned int audio_frame_counter = 0; unsigned int video_frame_counter = 0; unsigned int back_size; int data_size; unsigned short encoding_method; unsigned short sound_rate; unsigned short rate; unsigned short channels; unsigned short def_sound_size; unsigned int signature; unsigned int pts_den = 11025; /* video only case */ unsigned int pts_num = 1103; unsigned int* chunk_offset = NULL; int* chunk_size = NULL; int* audio_size = NULL; int i; int ret = 0; url_fskip(pb,4); /* skip FORM tag */ back_size = get_le32(pb); /** get size of the background frame */ signature = get_be32(pb); data_size = get_be32(pb); frame_count = get_le32(pb); /* disallow back_sizes and frame_counts that may lead to overflows later */ if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t)) return AVERROR_INVALIDDATA; encoding_method = get_le16(pb); sound_rate = get_le16(pb); rate = get_le16(pb); channels = get_le16(pb); def_sound_size = get_le16(pb); /** setup video stream */ st = av_new_stream(s, 0); if(!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RL2; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = 320; st->codec->height = 200; /** allocate and fill extradata */ st->codec->extradata_size = EXTRADATA1_SIZE; if(signature == RLV3_TAG && back_size > 0) st->codec->extradata_size += back_size; st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if(!st->codec->extradata) return AVERROR(ENOMEM); if(get_buffer(pb,st->codec->extradata,st->codec->extradata_size) != st->codec->extradata_size) return AVERROR(EIO); /** setup audio stream if present */ if(sound_rate){ pts_num = def_sound_size; pts_den = rate; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_PCM_U8; st->codec->codec_tag = 1; st->codec->channels = channels; st->codec->bits_per_coded_sample = 8; st->codec->sample_rate = rate; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample / 8; av_set_pts_info(st,32,1,rate); } av_set_pts_info(s->streams[0], 32, pts_num, pts_den); chunk_size = av_malloc(frame_count * sizeof(uint32_t)); audio_size = av_malloc(frame_count * sizeof(uint32_t)); chunk_offset = av_malloc(frame_count * sizeof(uint32_t)); if(!chunk_size || !audio_size || !chunk_offset){ av_free(chunk_size); av_free(audio_size); av_free(chunk_offset); return AVERROR(ENOMEM); } /** read offset and size tables */ for(i=0; i < frame_count;i++) chunk_size[i] = get_le32(pb); for(i=0; i < frame_count;i++) chunk_offset[i] = get_le32(pb); for(i=0; i < frame_count;i++) audio_size[i] = get_le32(pb) & 0xFFFF; /** build the sample index */ for(i=0;i<frame_count;i++){ if(chunk_size[i] < 0 || audio_size[i] > chunk_size[i]){ ret = AVERROR_INVALIDDATA; break; } if(sound_rate && audio_size[i]){ av_add_index_entry(s->streams[1], chunk_offset[i], audio_frame_counter,audio_size[i], 0, AVINDEX_KEYFRAME); audio_frame_counter += audio_size[i] / channels; } av_add_index_entry(s->streams[0], chunk_offset[i] + audio_size[i], video_frame_counter,chunk_size[i]-audio_size[i],0,AVINDEX_KEYFRAME); ++video_frame_counter; } av_free(chunk_size); av_free(audio_size); av_free(chunk_offset); return ret; } /** * read a single audio or video packet * @param s demuxer context * @param pkt the packet to be filled * @return 0 on success, AVERROR otherwise */ static int rl2_read_packet(AVFormatContext *s, AVPacket *pkt) { Rl2DemuxContext *rl2 = s->priv_data; ByteIOContext *pb = s->pb; AVIndexEntry *sample = NULL; int i; int ret = 0; int stream_id = -1; int64_t pos = INT64_MAX; /** check if there is a valid video or audio entry that can be used */ for(i=0; i<s->nb_streams; i++){ if(rl2->index_pos[i] < s->streams[i]->nb_index_entries && s->streams[i]->index_entries[ rl2->index_pos[i] ].pos < pos){ sample = &s->streams[i]->index_entries[ rl2->index_pos[i] ]; pos= sample->pos; stream_id= i; } } if(stream_id == -1) return AVERROR(EIO); ++rl2->index_pos[stream_id]; /** position the stream (will probably be there anyway) */ url_fseek(pb, sample->pos, SEEK_SET); /** fill the packet */ ret = av_get_packet(pb, pkt, sample->size); if(ret != sample->size){ av_free_packet(pkt); return AVERROR(EIO); } pkt->stream_index = stream_id; pkt->pts = sample->timestamp; return ret; } /** * seek to a new timestamp * @param s demuxer context * @param stream_index index of the stream that should be seeked * @param timestamp wanted timestamp * @param flags direction and seeking mode * @return 0 on success, -1 otherwise */ static int rl2_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; Rl2DemuxContext *rl2 = s->priv_data; int i; int index = av_index_search_timestamp(st, timestamp, flags); if(index < 0) return -1; rl2->index_pos[stream_index] = index; timestamp = st->index_entries[index].timestamp; for(i=0; i < s->nb_streams; i++){ AVStream *st2 = s->streams[i]; index = av_index_search_timestamp(st2, av_rescale_q(timestamp, st->time_base, st2->time_base), flags | AVSEEK_FLAG_BACKWARD); if(index < 0) index = 0; rl2->index_pos[i] = index; } return 0; } AVInputFormat rl2_demuxer = { "rl2", NULL_IF_CONFIG_SMALL("RL2 format"), sizeof(Rl2DemuxContext), rl2_probe, rl2_read_header, rl2_read_packet, NULL, rl2_read_seek, };
123linslouis-android-video-cutter
jni/libavformat/rl2.c
C
asf20
8,913
/* * SoX native format common data * Copyright (c) 2009 Daniel Verkamp <daniel@drv.nu> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_SOX_H #define AVFORMAT_SOX_H #define SOX_FIXED_HDR (4 + 8 + 8 + 4 + 4) /**< Size of fixed header without magic */ #define SOX_TAG MKTAG('.', 'S', 'o', 'X') #endif /* AVFORMAT_SOX_H */
123linslouis-android-video-cutter
jni/libavformat/sox.h
C
asf20
1,057
/* * Animated GIF muxer * Copyright (c) 2000 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * First version by Francois Revol revol@free.fr * * Features and limitations: * - currently no compression is performed, * in fact the size of the data is 9/8 the size of the image in 8bpp * - uses only a global standard palette * - tested with IE 5.0, Opera for BeOS, NetPositive (BeOS), and Mozilla (BeOS). * * Reference documents: * http://www.goice.co.jp/member/mo/formats/gif.html * http://astronomy.swin.edu.au/pbourke/dataformats/gif/ * http://www.dcs.ed.ac.uk/home/mxr/gfx/2d/GIF89a.txt * * this url claims to have an LZW algorithm not covered by Unisys patent: * http://www.msg.net/utility/whirlgif/gifencod.html * could help reduce the size of the files _a lot_... * some sites mentions an RLE type compression also. */ #include "avformat.h" /* The GIF format uses reversed order for bitstreams... */ /* at least they don't use PDP_ENDIAN :) */ #define BITSTREAM_WRITER_LE #include "libavcodec/put_bits.h" /* bitstream minipacket size */ #define GIF_CHUNKS 100 /* slows down the decoding (and some browsers don't like it) */ /* update on the 'some browsers don't like it issue from above: this was probably due to missing 'Data Sub-block Terminator' (byte 19) in the app_header */ #define GIF_ADD_APP_HEADER // required to enable looping of animated gif typedef struct { unsigned char r; unsigned char g; unsigned char b; } rgb_triplet; /* we use the standard 216 color palette */ /* this script was used to create the palette: * for r in 00 33 66 99 cc ff; do for g in 00 33 66 99 cc ff; do echo -n " "; for b in 00 33 66 99 cc ff; do * echo -n "{ 0x$r, 0x$g, 0x$b }, "; done; echo ""; done; done */ static const rgb_triplet gif_clut[216] = { { 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x33 }, { 0x00, 0x00, 0x66 }, { 0x00, 0x00, 0x99 }, { 0x00, 0x00, 0xcc }, { 0x00, 0x00, 0xff }, { 0x00, 0x33, 0x00 }, { 0x00, 0x33, 0x33 }, { 0x00, 0x33, 0x66 }, { 0x00, 0x33, 0x99 }, { 0x00, 0x33, 0xcc }, { 0x00, 0x33, 0xff }, { 0x00, 0x66, 0x00 }, { 0x00, 0x66, 0x33 }, { 0x00, 0x66, 0x66 }, { 0x00, 0x66, 0x99 }, { 0x00, 0x66, 0xcc }, { 0x00, 0x66, 0xff }, { 0x00, 0x99, 0x00 }, { 0x00, 0x99, 0x33 }, { 0x00, 0x99, 0x66 }, { 0x00, 0x99, 0x99 }, { 0x00, 0x99, 0xcc }, { 0x00, 0x99, 0xff }, { 0x00, 0xcc, 0x00 }, { 0x00, 0xcc, 0x33 }, { 0x00, 0xcc, 0x66 }, { 0x00, 0xcc, 0x99 }, { 0x00, 0xcc, 0xcc }, { 0x00, 0xcc, 0xff }, { 0x00, 0xff, 0x00 }, { 0x00, 0xff, 0x33 }, { 0x00, 0xff, 0x66 }, { 0x00, 0xff, 0x99 }, { 0x00, 0xff, 0xcc }, { 0x00, 0xff, 0xff }, { 0x33, 0x00, 0x00 }, { 0x33, 0x00, 0x33 }, { 0x33, 0x00, 0x66 }, { 0x33, 0x00, 0x99 }, { 0x33, 0x00, 0xcc }, { 0x33, 0x00, 0xff }, { 0x33, 0x33, 0x00 }, { 0x33, 0x33, 0x33 }, { 0x33, 0x33, 0x66 }, { 0x33, 0x33, 0x99 }, { 0x33, 0x33, 0xcc }, { 0x33, 0x33, 0xff }, { 0x33, 0x66, 0x00 }, { 0x33, 0x66, 0x33 }, { 0x33, 0x66, 0x66 }, { 0x33, 0x66, 0x99 }, { 0x33, 0x66, 0xcc }, { 0x33, 0x66, 0xff }, { 0x33, 0x99, 0x00 }, { 0x33, 0x99, 0x33 }, { 0x33, 0x99, 0x66 }, { 0x33, 0x99, 0x99 }, { 0x33, 0x99, 0xcc }, { 0x33, 0x99, 0xff }, { 0x33, 0xcc, 0x00 }, { 0x33, 0xcc, 0x33 }, { 0x33, 0xcc, 0x66 }, { 0x33, 0xcc, 0x99 }, { 0x33, 0xcc, 0xcc }, { 0x33, 0xcc, 0xff }, { 0x33, 0xff, 0x00 }, { 0x33, 0xff, 0x33 }, { 0x33, 0xff, 0x66 }, { 0x33, 0xff, 0x99 }, { 0x33, 0xff, 0xcc }, { 0x33, 0xff, 0xff }, { 0x66, 0x00, 0x00 }, { 0x66, 0x00, 0x33 }, { 0x66, 0x00, 0x66 }, { 0x66, 0x00, 0x99 }, { 0x66, 0x00, 0xcc }, { 0x66, 0x00, 0xff }, { 0x66, 0x33, 0x00 }, { 0x66, 0x33, 0x33 }, { 0x66, 0x33, 0x66 }, { 0x66, 0x33, 0x99 }, { 0x66, 0x33, 0xcc }, { 0x66, 0x33, 0xff }, { 0x66, 0x66, 0x00 }, { 0x66, 0x66, 0x33 }, { 0x66, 0x66, 0x66 }, { 0x66, 0x66, 0x99 }, { 0x66, 0x66, 0xcc }, { 0x66, 0x66, 0xff }, { 0x66, 0x99, 0x00 }, { 0x66, 0x99, 0x33 }, { 0x66, 0x99, 0x66 }, { 0x66, 0x99, 0x99 }, { 0x66, 0x99, 0xcc }, { 0x66, 0x99, 0xff }, { 0x66, 0xcc, 0x00 }, { 0x66, 0xcc, 0x33 }, { 0x66, 0xcc, 0x66 }, { 0x66, 0xcc, 0x99 }, { 0x66, 0xcc, 0xcc }, { 0x66, 0xcc, 0xff }, { 0x66, 0xff, 0x00 }, { 0x66, 0xff, 0x33 }, { 0x66, 0xff, 0x66 }, { 0x66, 0xff, 0x99 }, { 0x66, 0xff, 0xcc }, { 0x66, 0xff, 0xff }, { 0x99, 0x00, 0x00 }, { 0x99, 0x00, 0x33 }, { 0x99, 0x00, 0x66 }, { 0x99, 0x00, 0x99 }, { 0x99, 0x00, 0xcc }, { 0x99, 0x00, 0xff }, { 0x99, 0x33, 0x00 }, { 0x99, 0x33, 0x33 }, { 0x99, 0x33, 0x66 }, { 0x99, 0x33, 0x99 }, { 0x99, 0x33, 0xcc }, { 0x99, 0x33, 0xff }, { 0x99, 0x66, 0x00 }, { 0x99, 0x66, 0x33 }, { 0x99, 0x66, 0x66 }, { 0x99, 0x66, 0x99 }, { 0x99, 0x66, 0xcc }, { 0x99, 0x66, 0xff }, { 0x99, 0x99, 0x00 }, { 0x99, 0x99, 0x33 }, { 0x99, 0x99, 0x66 }, { 0x99, 0x99, 0x99 }, { 0x99, 0x99, 0xcc }, { 0x99, 0x99, 0xff }, { 0x99, 0xcc, 0x00 }, { 0x99, 0xcc, 0x33 }, { 0x99, 0xcc, 0x66 }, { 0x99, 0xcc, 0x99 }, { 0x99, 0xcc, 0xcc }, { 0x99, 0xcc, 0xff }, { 0x99, 0xff, 0x00 }, { 0x99, 0xff, 0x33 }, { 0x99, 0xff, 0x66 }, { 0x99, 0xff, 0x99 }, { 0x99, 0xff, 0xcc }, { 0x99, 0xff, 0xff }, { 0xcc, 0x00, 0x00 }, { 0xcc, 0x00, 0x33 }, { 0xcc, 0x00, 0x66 }, { 0xcc, 0x00, 0x99 }, { 0xcc, 0x00, 0xcc }, { 0xcc, 0x00, 0xff }, { 0xcc, 0x33, 0x00 }, { 0xcc, 0x33, 0x33 }, { 0xcc, 0x33, 0x66 }, { 0xcc, 0x33, 0x99 }, { 0xcc, 0x33, 0xcc }, { 0xcc, 0x33, 0xff }, { 0xcc, 0x66, 0x00 }, { 0xcc, 0x66, 0x33 }, { 0xcc, 0x66, 0x66 }, { 0xcc, 0x66, 0x99 }, { 0xcc, 0x66, 0xcc }, { 0xcc, 0x66, 0xff }, { 0xcc, 0x99, 0x00 }, { 0xcc, 0x99, 0x33 }, { 0xcc, 0x99, 0x66 }, { 0xcc, 0x99, 0x99 }, { 0xcc, 0x99, 0xcc }, { 0xcc, 0x99, 0xff }, { 0xcc, 0xcc, 0x00 }, { 0xcc, 0xcc, 0x33 }, { 0xcc, 0xcc, 0x66 }, { 0xcc, 0xcc, 0x99 }, { 0xcc, 0xcc, 0xcc }, { 0xcc, 0xcc, 0xff }, { 0xcc, 0xff, 0x00 }, { 0xcc, 0xff, 0x33 }, { 0xcc, 0xff, 0x66 }, { 0xcc, 0xff, 0x99 }, { 0xcc, 0xff, 0xcc }, { 0xcc, 0xff, 0xff }, { 0xff, 0x00, 0x00 }, { 0xff, 0x00, 0x33 }, { 0xff, 0x00, 0x66 }, { 0xff, 0x00, 0x99 }, { 0xff, 0x00, 0xcc }, { 0xff, 0x00, 0xff }, { 0xff, 0x33, 0x00 }, { 0xff, 0x33, 0x33 }, { 0xff, 0x33, 0x66 }, { 0xff, 0x33, 0x99 }, { 0xff, 0x33, 0xcc }, { 0xff, 0x33, 0xff }, { 0xff, 0x66, 0x00 }, { 0xff, 0x66, 0x33 }, { 0xff, 0x66, 0x66 }, { 0xff, 0x66, 0x99 }, { 0xff, 0x66, 0xcc }, { 0xff, 0x66, 0xff }, { 0xff, 0x99, 0x00 }, { 0xff, 0x99, 0x33 }, { 0xff, 0x99, 0x66 }, { 0xff, 0x99, 0x99 }, { 0xff, 0x99, 0xcc }, { 0xff, 0x99, 0xff }, { 0xff, 0xcc, 0x00 }, { 0xff, 0xcc, 0x33 }, { 0xff, 0xcc, 0x66 }, { 0xff, 0xcc, 0x99 }, { 0xff, 0xcc, 0xcc }, { 0xff, 0xcc, 0xff }, { 0xff, 0xff, 0x00 }, { 0xff, 0xff, 0x33 }, { 0xff, 0xff, 0x66 }, { 0xff, 0xff, 0x99 }, { 0xff, 0xff, 0xcc }, { 0xff, 0xff, 0xff }, }; /* GIF header */ static int gif_image_write_header(ByteIOContext *pb, int width, int height, int loop_count, uint32_t *palette) { int i; unsigned int v; put_tag(pb, "GIF"); put_tag(pb, "89a"); put_le16(pb, width); put_le16(pb, height); put_byte(pb, 0xf7); /* flags: global clut, 256 entries */ put_byte(pb, 0x1f); /* background color index */ put_byte(pb, 0); /* aspect ratio */ /* the global palette */ if (!palette) { put_buffer(pb, (const unsigned char *)gif_clut, 216*3); for(i=0;i<((256-216)*3);i++) put_byte(pb, 0); } else { for(i=0;i<256;i++) { v = palette[i]; put_byte(pb, (v >> 16) & 0xff); put_byte(pb, (v >> 8) & 0xff); put_byte(pb, (v) & 0xff); } } /* update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif see http://members.aol.com/royalef/gifabout.htm#net-extension byte 1 : 33 (hex 0x21) GIF Extension code byte 2 : 255 (hex 0xFF) Application Extension Label byte 3 : 11 (hex (0x0B) Length of Application Block (eleven bytes of data to follow) bytes 4 to 11 : "NETSCAPE" bytes 12 to 14 : "2.0" byte 15 : 3 (hex 0x03) Length of Data Sub-Block (three bytes of data to follow) byte 16 : 1 (hex 0x01) bytes 17 to 18 : 0 to 65535, an unsigned integer in lo-hi byte format. This indicate the number of iterations the loop should be executed. bytes 19 : 0 (hex 0x00) a Data Sub-block Terminator */ /* application extension header */ #ifdef GIF_ADD_APP_HEADER if (loop_count >= 0 && loop_count <= 65535) { put_byte(pb, 0x21); put_byte(pb, 0xff); put_byte(pb, 0x0b); put_tag(pb, "NETSCAPE2.0"); // bytes 4 to 14 put_byte(pb, 0x03); // byte 15 put_byte(pb, 0x01); // byte 16 put_le16(pb, (uint16_t)loop_count); put_byte(pb, 0x00); // byte 19 } #endif return 0; } /* this is maybe slow, but allows for extensions */ static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b) { return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6); } static int gif_image_write_image(ByteIOContext *pb, int x1, int y1, int width, int height, const uint8_t *buf, int linesize, int pix_fmt) { PutBitContext p; uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */ int i, left, w, v; const uint8_t *ptr; /* image block */ put_byte(pb, 0x2c); put_le16(pb, x1); put_le16(pb, y1); put_le16(pb, width); put_le16(pb, height); put_byte(pb, 0x00); /* flags */ /* no local clut */ put_byte(pb, 0x08); left= width * height; init_put_bits(&p, buffer, 130); /* * the thing here is the bitstream is written as little packets, with a size byte before * but it's still the same bitstream between packets (no flush !) */ ptr = buf; w = width; while(left>0) { put_bits(&p, 9, 0x0100); /* clear code */ for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) { if (pix_fmt == PIX_FMT_RGB24) { v = gif_clut_index(ptr[0], ptr[1], ptr[2]); ptr+=3; } else { v = *ptr++; } put_bits(&p, 9, v); if (--w == 0) { w = width; buf += linesize; ptr = buf; } } if(left<=GIF_CHUNKS) { put_bits(&p, 9, 0x101); /* end of stream */ flush_put_bits(&p); } if(put_bits_ptr(&p) - p.buf > 0) { put_byte(pb, put_bits_ptr(&p) - p.buf); /* byte count of the packet */ put_buffer(pb, p.buf, put_bits_ptr(&p) - p.buf); /* the actual buffer */ p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */ } left-=GIF_CHUNKS; } put_byte(pb, 0x00); /* end of image block */ return 0; } typedef struct { int64_t time, file_time; uint8_t buffer[100]; /* data chunks */ } GIFContext; static int gif_write_header(AVFormatContext *s) { GIFContext *gif = s->priv_data; ByteIOContext *pb = s->pb; AVCodecContext *enc, *video_enc; int i, width, height, loop_count /*, rate*/; /* XXX: do we reject audio streams or just ignore them ? if(s->nb_streams > 1) return -1; */ gif->time = 0; gif->file_time = 0; video_enc = NULL; for(i=0;i<s->nb_streams;i++) { enc = s->streams[i]->codec; if (enc->codec_type != AVMEDIA_TYPE_AUDIO) video_enc = enc; } if (!video_enc) { av_free(gif); return -1; } else { width = video_enc->width; height = video_enc->height; loop_count = s->loop_output; // rate = video_enc->time_base.den; } if (video_enc->pix_fmt != PIX_FMT_RGB24) { av_log(s, AV_LOG_ERROR, "ERROR: gif only handles the rgb24 pixel format. Use -pix_fmt rgb24.\n"); return AVERROR(EIO); } gif_image_write_header(pb, width, height, loop_count, NULL); put_flush_packet(s->pb); return 0; } static int gif_write_video(AVFormatContext *s, AVCodecContext *enc, const uint8_t *buf, int size) { ByteIOContext *pb = s->pb; GIFContext *gif = s->priv_data; int jiffies; int64_t delay; /* graphic control extension block */ put_byte(pb, 0x21); put_byte(pb, 0xf9); put_byte(pb, 0x04); /* block size */ put_byte(pb, 0x04); /* flags */ /* 1 jiffy is 1/70 s */ /* the delay_time field indicates the number of jiffies - 1 */ delay = gif->file_time - gif->time; /* XXX: should use delay, in order to be more accurate */ /* instead of using the same rounded value each time */ /* XXX: don't even remember if I really use it for now */ jiffies = (70*enc->time_base.num/enc->time_base.den) - 1; put_le16(pb, jiffies); put_byte(pb, 0x1f); /* transparent color index */ put_byte(pb, 0x00); gif_image_write_image(pb, 0, 0, enc->width, enc->height, buf, enc->width * 3, PIX_FMT_RGB24); put_flush_packet(s->pb); return 0; } static int gif_write_packet(AVFormatContext *s, AVPacket *pkt) { AVCodecContext *codec = s->streams[pkt->stream_index]->codec; if (codec->codec_type == AVMEDIA_TYPE_AUDIO) return 0; /* just ignore audio */ else return gif_write_video(s, codec, pkt->data, pkt->size); } static int gif_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; put_byte(pb, 0x3b); put_flush_packet(s->pb); return 0; } AVOutputFormat gif_muxer = { "gif", NULL_IF_CONFIG_SMALL("GIF Animation"), "image/gif", "gif", sizeof(GIFContext), CODEC_ID_NONE, CODEC_ID_RAWVIDEO, gif_write_header, gif_write_packet, gif_write_trailer, };
123linslouis-android-video-cutter
jni/libavformat/gif.c
C
asf20
14,808
/** Copyright (C) 2005 Michael Ahlberg, Måns Rullgård Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **/ #include <stdlib.h> #include "libavutil/intreadwrite.h" #include "libavcodec/get_bits.h" #include "libavcodec/bytestream.h" #include "avformat.h" #include "oggdec.h" #include "riff.h" static int ogm_header(AVFormatContext *s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; const uint8_t *p = os->buf + os->pstart; uint64_t time_unit; uint64_t spu; uint32_t default_len; if(!(*p & 1)) return 0; if(*p == 1) { p++; if(*p == 'v'){ int tag; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; p += 8; tag = bytestream_get_le32(&p); st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag); st->codec->codec_tag = tag; } else if (*p == 't') { st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codec->codec_id = CODEC_ID_TEXT; p += 12; } else { uint8_t acid[5]; int cid; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; p += 8; bytestream_get_buffer(&p, acid, 4); acid[4] = 0; cid = strtol(acid, NULL, 16); st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, cid); st->need_parsing = AVSTREAM_PARSE_FULL; } p += 4; /* useless size field */ time_unit = bytestream_get_le64(&p); spu = bytestream_get_le64(&p); default_len = bytestream_get_le32(&p); p += 8; /* buffersize + bits_per_sample */ if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ st->codec->width = bytestream_get_le32(&p); st->codec->height = bytestream_get_le32(&p); st->codec->time_base.den = spu * 10000000; st->codec->time_base.num = time_unit; st->time_base = st->codec->time_base; } else { st->codec->channels = bytestream_get_le16(&p); p += 2; /* block_align */ st->codec->bit_rate = bytestream_get_le32(&p) * 8; st->codec->sample_rate = spu * 10000000 / time_unit; st->time_base.num = 1; st->time_base.den = st->codec->sample_rate; } } else if (*p == 3) { if (os->psize > 8) ff_vorbis_comment(s, &st->metadata, p+7, os->psize-8); } return 1; } static int ogm_dshow_header(AVFormatContext *s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; uint8_t *p = os->buf + os->pstart; uint32_t t; if(!(*p & 1)) return 0; if(*p != 1) return 1; t = AV_RL32(p + 96); if(t == 0x05589f80){ st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, AV_RL32(p + 68)); st->codec->time_base.den = 10000000; st->codec->time_base.num = AV_RL64(p + 164); st->codec->width = AV_RL32(p + 176); st->codec->height = AV_RL32(p + 180); } else if(t == 0x05589f81){ st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, AV_RL16(p + 124)); st->codec->channels = AV_RL16(p + 126); st->codec->sample_rate = AV_RL32(p + 128); st->codec->bit_rate = AV_RL32(p + 132) * 8; } return 1; } static int ogm_packet(AVFormatContext *s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; uint8_t *p = os->buf + os->pstart; int lb; if(*p & 8) os->pflags |= AV_PKT_FLAG_KEY; lb = ((*p & 2) << 1) | ((*p >> 6) & 3); os->pstart += lb + 1; os->psize -= lb + 1; while (lb--) os->pduration += p[lb+1] << (lb*8); return 0; } const struct ogg_codec ff_ogm_video_codec = { .magic = "\001video", .magicsize = 6, .header = ogm_header, .packet = ogm_packet, .granule_is_start = 1, }; const struct ogg_codec ff_ogm_audio_codec = { .magic = "\001audio", .magicsize = 6, .header = ogm_header, .packet = ogm_packet, .granule_is_start = 1, }; const struct ogg_codec ff_ogm_text_codec = { .magic = "\001text", .magicsize = 5, .header = ogm_header, .packet = ogm_packet, .granule_is_start = 1, }; const struct ogg_codec ff_ogm_old_codec = { .magic = "\001Direct Show Samples embedded in Ogg", .magicsize = 35, .header = ogm_dshow_header, .packet = ogm_packet, .granule_is_start = 1, };
123linslouis-android-video-cutter
jni/libavformat/oggparseogm.c
C
asf20
5,813
/* * VQF demuxer * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "libavutil/intreadwrite.h" typedef struct VqfContext { int frame_bit_len; uint8_t last_frame_bits; int remaining_bits; } VqfContext; static int vqf_probe(AVProbeData *probe_packet) { if (AV_RL32(probe_packet->buf) != MKTAG('T','W','I','N')) return 0; if (!memcmp(probe_packet->buf + 4, "97012000", 8)) return AVPROBE_SCORE_MAX; if (!memcmp(probe_packet->buf + 4, "00052200", 8)) return AVPROBE_SCORE_MAX; return AVPROBE_SCORE_MAX/2; } static void add_metadata(AVFormatContext *s, const char *tag, unsigned int tag_len, unsigned int remaining) { int len = FFMIN(tag_len, remaining); char *buf; if (len == UINT_MAX) return; buf = av_malloc(len+1); if (!buf) return; get_buffer(s->pb, buf, len); buf[len] = 0; av_metadata_set2(&s->metadata, tag, buf, AV_METADATA_DONT_STRDUP_VAL); } static int vqf_read_header(AVFormatContext *s, AVFormatParameters *ap) { VqfContext *c = s->priv_data; AVStream *st = av_new_stream(s, 0); int chunk_tag; int rate_flag = -1; int header_size; int read_bitrate = 0; int size; if (!st) return AVERROR(ENOMEM); url_fskip(s->pb, 12); header_size = get_be32(s->pb); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_TWINVQ; st->start_time = 0; do { int len; chunk_tag = get_le32(s->pb); if (chunk_tag == MKTAG('D','A','T','A')) break; len = get_be32(s->pb); if ((unsigned) len > INT_MAX/2) { av_log(s, AV_LOG_ERROR, "Malformed header\n"); return -1; } header_size -= 8; switch(chunk_tag){ case MKTAG('C','O','M','M'): st->codec->channels = get_be32(s->pb) + 1; read_bitrate = get_be32(s->pb); rate_flag = get_be32(s->pb); url_fskip(s->pb, len-12); st->codec->bit_rate = read_bitrate*1000; st->codec->bits_per_coded_sample = 16; break; case MKTAG('N','A','M','E'): add_metadata(s, "title" , len, header_size); break; case MKTAG('(','c',')',' '): add_metadata(s, "copyright", len, header_size); break; case MKTAG('A','U','T','H'): add_metadata(s, "author" , len, header_size); break; case MKTAG('A','L','B','M'): add_metadata(s, "album" , len, header_size); break; case MKTAG('T','R','C','K'): add_metadata(s, "track" , len, header_size); break; case MKTAG('C','O','M','T'): add_metadata(s, "comment" , len, header_size); break; case MKTAG('F','I','L','E'): add_metadata(s, "filename" , len, header_size); break; case MKTAG('D','S','I','Z'): add_metadata(s, "size" , len, header_size); break; case MKTAG('D','A','T','E'): add_metadata(s, "date" , len, header_size); break; case MKTAG('G','E','N','R'): add_metadata(s, "genre" , len, header_size); break; default: av_log(s, AV_LOG_ERROR, "Unknown chunk: %c%c%c%c\n", ((char*)&chunk_tag)[0], ((char*)&chunk_tag)[1], ((char*)&chunk_tag)[2], ((char*)&chunk_tag)[3]); url_fskip(s->pb, FFMIN(len, header_size)); break; } header_size -= len; } while (header_size >= 0); switch (rate_flag) { case -1: av_log(s, AV_LOG_ERROR, "COMM tag not found!\n"); return -1; case 44: st->codec->sample_rate = 44100; break; case 22: st->codec->sample_rate = 22050; break; case 11: st->codec->sample_rate = 11025; break; default: st->codec->sample_rate = rate_flag*1000; break; } switch (((st->codec->sample_rate/1000) << 8) + read_bitrate/st->codec->channels) { case (11<<8) + 8 : case (8 <<8) + 8 : case (11<<8) + 10: case (22<<8) + 32: size = 512; break; case (16<<8) + 16: case (22<<8) + 20: case (22<<8) + 24: size = 1024; break; case (44<<8) + 40: case (44<<8) + 48: size = 2048; break; default: av_log(s, AV_LOG_ERROR, "Mode not suported: %d Hz, %d kb/s.\n", st->codec->sample_rate, st->codec->bit_rate); return -1; } c->frame_bit_len = st->codec->bit_rate*size/st->codec->sample_rate; av_set_pts_info(st, 64, 1, st->codec->sample_rate); return 0; } static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt) { VqfContext *c = s->priv_data; int ret; int size = (c->frame_bit_len - c->remaining_bits + 7)>>3; pkt->pos = url_ftell(s->pb); pkt->stream_index = 0; if (av_new_packet(pkt, size+2) < 0) return AVERROR(EIO); pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip pkt->data[1] = c->last_frame_bits; ret = get_buffer(s->pb, pkt->data+2, size); if (ret<=0) { av_free_packet(pkt); return AVERROR(EIO); } c->last_frame_bits = pkt->data[size+1]; c->remaining_bits = (size << 3) - c->frame_bit_len + c->remaining_bits; return size+2; } static int vqf_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { VqfContext *c = s->priv_data; AVStream *st; int ret; int64_t pos; st = s->streams[stream_index]; pos = av_rescale_rnd(timestamp * st->codec->bit_rate, st->time_base.num, st->time_base.den * (int64_t)c->frame_bit_len, (flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP); pos *= c->frame_bit_len; st->cur_dts = av_rescale(pos, st->time_base.den, st->codec->bit_rate * (int64_t)st->time_base.num); if ((ret = url_fseek(s->pb, ((pos-7) >> 3) + s->data_offset, SEEK_SET)) < 0) return ret; c->remaining_bits = -7 - ((pos-7)&7); return 0; } AVInputFormat vqf_demuxer = { "vqf", NULL_IF_CONFIG_SMALL("Nippon Telegraph and Telephone Corporation (NTT) TwinVQ"), sizeof(VqfContext), vqf_probe, vqf_read_header, vqf_read_packet, NULL, vqf_read_seek, .extensions = "vqf", };
123linslouis-android-video-cutter
jni/libavformat/vqf.c
C
asf20
7,459
/* * RTMP network protocol * Copyright (c) 2010 Howard Chu * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * RTMP protocol based on http://rtmpdump.mplayerhq.hu/ librtmp */ #include "avformat.h" #include <librtmp/rtmp.h> #include <librtmp/log.h> static void rtmp_log(int level, const char *fmt, va_list args) { switch (level) { default: case RTMP_LOGCRIT: level = AV_LOG_FATAL; break; case RTMP_LOGERROR: level = AV_LOG_ERROR; break; case RTMP_LOGWARNING: level = AV_LOG_WARNING; break; case RTMP_LOGINFO: level = AV_LOG_INFO; break; case RTMP_LOGDEBUG: level = AV_LOG_VERBOSE; break; case RTMP_LOGDEBUG2: level = AV_LOG_DEBUG; break; } av_vlog(NULL, level, fmt, args); av_log(NULL, level, "\n"); } static int rtmp_close(URLContext *s) { RTMP *r = s->priv_data; RTMP_Close(r); av_free(r); return 0; } /** * Opens RTMP connection and verifies that the stream can be played. * * URL syntax: rtmp://server[:port][/app][/playpath][ keyword=value]... * where 'app' is first one or two directories in the path * (e.g. /ondemand/, /flash/live/, etc.) * and 'playpath' is a file name (the rest of the path, * may be prefixed with "mp4:") * * Additional RTMP library options may be appended as * space-separated key-value pairs. */ static int rtmp_open(URLContext *s, const char *uri, int flags) { RTMP *r; int rc; r = av_mallocz(sizeof(RTMP)); if (!r) return AVERROR(ENOMEM); switch (av_log_get_level()) { default: case AV_LOG_FATAL: rc = RTMP_LOGCRIT; break; case AV_LOG_ERROR: rc = RTMP_LOGERROR; break; case AV_LOG_WARNING: rc = RTMP_LOGWARNING; break; case AV_LOG_INFO: rc = RTMP_LOGINFO; break; case AV_LOG_VERBOSE: rc = RTMP_LOGDEBUG; break; case AV_LOG_DEBUG: rc = RTMP_LOGDEBUG2; break; } RTMP_LogSetLevel(rc); RTMP_LogSetCallback(rtmp_log); RTMP_Init(r); if (!RTMP_SetupURL(r, s->filename)) { rc = -1; goto fail; } if (flags & URL_WRONLY) r->Link.protocol |= RTMP_FEATURE_WRITE; if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0)) { rc = -1; goto fail; } s->priv_data = r; s->is_streamed = 1; return 0; fail: av_free(r); return rc; } static int rtmp_write(URLContext *s, uint8_t *buf, int size) { RTMP *r = s->priv_data; return RTMP_Write(r, buf, size); } static int rtmp_read(URLContext *s, uint8_t *buf, int size) { RTMP *r = s->priv_data; return RTMP_Read(r, buf, size); } static int rtmp_read_pause(URLContext *s, int pause) { RTMP *r = s->priv_data; if (pause) r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; if (!RTMP_SendPause(r, pause, r->m_pauseStamp)) return -1; return 0; } static int64_t rtmp_read_seek(URLContext *s, int stream_index, int64_t timestamp, int flags) { RTMP *r = s->priv_data; if (flags & AVSEEK_FLAG_BYTE) return AVERROR(ENOSYS); /* seeks are in milliseconds */ if (stream_index < 0) timestamp = av_rescale_rnd(timestamp, 1000, AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP); if (!RTMP_SendSeek(r, timestamp)) return -1; return timestamp; } static int rtmp_get_file_handle(URLContext *s) { RTMP *r = s->priv_data; return r->m_sb.sb_socket; } URLProtocol rtmp_protocol = { "rtmp", rtmp_open, rtmp_read, rtmp_write, NULL, /* seek */ rtmp_close, NULL, /* next */ rtmp_read_pause, rtmp_read_seek, rtmp_get_file_handle }; URLProtocol rtmpt_protocol = { "rtmpt", rtmp_open, rtmp_read, rtmp_write, NULL, /* seek */ rtmp_close, NULL, /* next */ rtmp_read_pause, rtmp_read_seek, rtmp_get_file_handle }; URLProtocol rtmpe_protocol = { "rtmpe", rtmp_open, rtmp_read, rtmp_write, NULL, /* seek */ rtmp_close, NULL, /* next */ rtmp_read_pause, rtmp_read_seek, rtmp_get_file_handle }; URLProtocol rtmpte_protocol = { "rtmpte", rtmp_open, rtmp_read, rtmp_write, NULL, /* seek */ rtmp_close, NULL, /* next */ rtmp_read_pause, rtmp_read_seek, rtmp_get_file_handle }; URLProtocol rtmps_protocol = { "rtmps", rtmp_open, rtmp_read, rtmp_write, NULL, /* seek */ rtmp_close, NULL, /* next */ rtmp_read_pause, rtmp_read_seek, rtmp_get_file_handle };
123linslouis-android-video-cutter
jni/libavformat/librtmp.c
C
asf20
5,538
/** Copyright (C) 2005 Matthieu CASTET, Alex Beregszaszi Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **/ #include <stdlib.h> #include "libavutil/bswap.h" #include "libavcodec/get_bits.h" #include "avformat.h" #include "oggdec.h" struct theora_params { int gpshift; int gpmask; unsigned version; }; static int theora_header (AVFormatContext * s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; struct theora_params *thp = os->private; int cds = st->codec->extradata_size + os->psize + 2; uint8_t *cdp; if(!(os->buf[os->pstart] & 0x80)) return 0; if(!thp){ thp = av_mallocz(sizeof(*thp)); os->private = thp; } if (os->buf[os->pstart] == 0x80) { GetBitContext gb; int width, height; init_get_bits(&gb, os->buf + os->pstart, os->psize*8); skip_bits_long(&gb, 7*8); /* 0x80"theora" */ thp->version = get_bits_long(&gb, 24); if (thp->version < 0x030100) { av_log(s, AV_LOG_ERROR, "Too old or unsupported Theora (%x)\n", thp->version); return -1; } width = get_bits(&gb, 16) << 4; height = get_bits(&gb, 16) << 4; avcodec_set_dimensions(st->codec, width, height); if (thp->version >= 0x030400) skip_bits(&gb, 100); if (thp->version >= 0x030200) { width = get_bits_long(&gb, 24); height = get_bits_long(&gb, 24); if ( width <= st->codec->width && width > st->codec->width-16 && height <= st->codec->height && height > st->codec->height-16) avcodec_set_dimensions(st->codec, width, height); skip_bits(&gb, 16); } st->codec->time_base.den = get_bits_long(&gb, 32); st->codec->time_base.num = get_bits_long(&gb, 32); if (!(st->codec->time_base.num > 0 && st->codec->time_base.den > 0)) { av_log(s, AV_LOG_WARNING, "Invalid time base in theora stream, assuming 25 FPS\n"); st->codec->time_base.num = 1; st->codec->time_base.den = 25; } st->time_base = st->codec->time_base; st->sample_aspect_ratio.num = get_bits_long(&gb, 24); st->sample_aspect_ratio.den = get_bits_long(&gb, 24); if (thp->version >= 0x030200) skip_bits_long(&gb, 38); if (thp->version >= 0x304000) skip_bits(&gb, 2); thp->gpshift = get_bits(&gb, 5); thp->gpmask = (1 << thp->gpshift) - 1; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_THEORA; st->need_parsing = AVSTREAM_PARSE_HEADERS; } else if (os->buf[os->pstart] == 0x83) { ff_vorbis_comment (s, &st->metadata, os->buf + os->pstart + 7, os->psize - 8); } st->codec->extradata = av_realloc (st->codec->extradata, cds + FF_INPUT_BUFFER_PADDING_SIZE); cdp = st->codec->extradata + st->codec->extradata_size; *cdp++ = os->psize >> 8; *cdp++ = os->psize & 0xff; memcpy (cdp, os->buf + os->pstart, os->psize); st->codec->extradata_size = cds; return 1; } static uint64_t theora_gptopts(AVFormatContext *ctx, int idx, uint64_t gp, int64_t *dts) { struct ogg *ogg = ctx->priv_data; struct ogg_stream *os = ogg->streams + idx; struct theora_params *thp = os->private; uint64_t iframe = gp >> thp->gpshift; uint64_t pframe = gp & thp->gpmask; if (thp->version < 0x030201) iframe++; if(!pframe) os->pflags |= AV_PKT_FLAG_KEY; if (dts) *dts = iframe + pframe; return iframe + pframe; } const struct ogg_codec ff_theora_codec = { .magic = "\200theora", .magicsize = 7, .header = theora_header, .gptopts = theora_gptopts };
123linslouis-android-video-cutter
jni/libavformat/oggparsetheora.c
C
asf20
4,994
/* * "Real" compatible muxer and demuxer. * Copyright (c) 2000, 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_RM_H #define AVFORMAT_RM_H #include "avformat.h" extern const char * const ff_rm_metadata[4]; extern const unsigned char ff_sipr_subpk_size[4]; typedef struct RMStream RMStream; RMStream *ff_rm_alloc_rmstream (void); void ff_rm_free_rmstream (RMStream *rms); /*< input format for Realmedia-style RTSP streams */ extern AVInputFormat rdt_demuxer; /** * Read the MDPR chunk, which contains stream-specific codec initialization * parameters. * * @param s context containing RMContext and ByteIOContext for stream reading * @param pb context to read the data from * @param st the stream that the MDPR chunk belongs to and where to store the * parameters read from the chunk into * @param rst real-specific stream information * @param codec_data_size size of the MDPR chunk * @return 0 on success, errno codes on error */ int ff_rm_read_mdpr_codecdata (AVFormatContext *s, ByteIOContext *pb, AVStream *st, RMStream *rst, int codec_data_size); /** * Parse one rm-stream packet from the input bytestream. * * @param s context containing RMContext and ByteIOContext for stream reading * @param pb context to read the data from * @param st stream to which the packet to be read belongs * @param rst Real-specific stream information * @param len packet length to read from the input * @param pkt packet location to store the parsed packet data * @param seq pointer to an integer containing the sequence number, may be * updated * @param flags the packet flags * @param ts timestamp of the current packet * @return <0 on error, 0 if a packet was placed in the pkt pointer. A * value >0 means that no data was placed in pkt, but that cached * data is available by calling ff_rm_retrieve_cache(). */ int ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb, AVStream *st, RMStream *rst, int len, AVPacket *pkt, int *seq, int flags, int64_t ts); /** * Retrieve one cached packet from the rm-context. The real container can * store several packets (as interpreted by the codec) in a single container * packet, which means the demuxer holds some back when the first container * packet is parsed and returned. The result is that rm->audio_pkt_cnt is * a positive number, the amount of cached packets. Using this function, each * of those packets can be retrieved sequentially. * * @param s context containing RMContext and ByteIOContext for stream reading * @param pb context to read the data from * @param st stream that this packet belongs to * @param rst Real-specific stream information * @param pkt location to store the packet data * @return the number of samples left for subsequent calls to this same * function, or 0 if all samples have been retrieved. */ int ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb, AVStream *st, RMStream *rst, AVPacket *pkt); /** * Perform 4-bit block reordering for SIPR data. * * @param buf SIPR data */ void ff_rm_reorder_sipr_data(uint8_t *buf, int sub_packet_h, int framesize); #endif /* AVFORMAT_RM_H */
123linslouis-android-video-cutter
jni/libavformat/rm.h
C
asf20
4,059
/* * Renderware TeXture Dictionary (.txd) demuxer * Copyright (c) 2007 Ivo van Poorten * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avformat.h" #define TXD_FILE 0x16 #define TXD_INFO 0x01 #define TXD_EXTRA 0x03 #define TXD_TEXTURE 0x15 #define TXD_TEXTURE_DATA 0x01 #define TXD_MARKER 0x1803ffff #define TXD_MARKER2 0x1003ffff static int txd_probe(AVProbeData * pd) { if (AV_RL32(pd->buf ) == TXD_FILE && (AV_RL32(pd->buf+8) == TXD_MARKER || AV_RL32(pd->buf+8) == TXD_MARKER2)) return AVPROBE_SCORE_MAX; return 0; } static int txd_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_TXD; st->codec->time_base.den = 5; st->codec->time_base.num = 1; /* the parameters will be extracted from the compressed bitstream */ return 0; } static int txd_read_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; unsigned int id, chunk_size, marker; int ret; next_chunk: id = get_le32(pb); chunk_size = get_le32(pb); marker = get_le32(pb); if (url_feof(s->pb)) return AVERROR_EOF; if (marker != TXD_MARKER && marker != TXD_MARKER2) { av_log(s, AV_LOG_ERROR, "marker does not match\n"); return AVERROR_INVALIDDATA; } switch (id) { case TXD_INFO: if (chunk_size > 100) break; case TXD_EXTRA: url_fskip(s->pb, chunk_size); case TXD_FILE: case TXD_TEXTURE: goto next_chunk; default: av_log(s, AV_LOG_ERROR, "unknown chunk id %i\n", id); return AVERROR_INVALIDDATA; } ret = av_get_packet(s->pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = 0; return 0; } AVInputFormat txd_demuxer = { "txd", NULL_IF_CONFIG_SMALL("Renderware TeXture Dictionary"), 0, txd_probe, txd_read_header, txd_read_packet, };
123linslouis-android-video-cutter
jni/libavformat/txd.c
C
asf20
2,922
/* * Matroska common data * Copyright (c) 2003-2004 The ffmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "matroska.h" const CodecTags ff_mkv_codec_tags[]={ {"A_AAC" , CODEC_ID_AAC}, {"A_AC3" , CODEC_ID_AC3}, {"A_DTS" , CODEC_ID_DTS}, {"A_EAC3" , CODEC_ID_EAC3}, {"A_FLAC" , CODEC_ID_FLAC}, {"A_MLP" , CODEC_ID_MLP}, {"A_MPEG/L2" , CODEC_ID_MP2}, {"A_MPEG/L1" , CODEC_ID_MP2}, {"A_MPEG/L3" , CODEC_ID_MP3}, {"A_PCM/FLOAT/IEEE" , CODEC_ID_PCM_F32LE}, {"A_PCM/FLOAT/IEEE" , CODEC_ID_PCM_F64LE}, {"A_PCM/INT/BIG" , CODEC_ID_PCM_S16BE}, {"A_PCM/INT/BIG" , CODEC_ID_PCM_S24BE}, {"A_PCM/INT/BIG" , CODEC_ID_PCM_S32BE}, {"A_PCM/INT/LIT" , CODEC_ID_PCM_S16LE}, {"A_PCM/INT/LIT" , CODEC_ID_PCM_S24LE}, {"A_PCM/INT/LIT" , CODEC_ID_PCM_S32LE}, {"A_PCM/INT/LIT" , CODEC_ID_PCM_U8}, {"A_QUICKTIME/QDM2" , CODEC_ID_QDM2}, {"A_REAL/14_4" , CODEC_ID_RA_144}, {"A_REAL/28_8" , CODEC_ID_RA_288}, {"A_REAL/ATRC" , CODEC_ID_ATRAC3}, {"A_REAL/COOK" , CODEC_ID_COOK}, {"A_REAL/SIPR" , CODEC_ID_SIPR}, {"A_TRUEHD" , CODEC_ID_TRUEHD}, {"A_TTA1" , CODEC_ID_TTA}, {"A_VORBIS" , CODEC_ID_VORBIS}, {"A_WAVPACK4" , CODEC_ID_WAVPACK}, {"S_TEXT/UTF8" , CODEC_ID_TEXT}, {"S_TEXT/ASCII" , CODEC_ID_TEXT}, {"S_TEXT/ASS" , CODEC_ID_SSA}, {"S_TEXT/SSA" , CODEC_ID_SSA}, {"S_ASS" , CODEC_ID_SSA}, {"S_SSA" , CODEC_ID_SSA}, {"S_VOBSUB" , CODEC_ID_DVD_SUBTITLE}, {"S_HDMV/PGS" , CODEC_ID_HDMV_PGS_SUBTITLE}, {"V_DIRAC" , CODEC_ID_DIRAC}, {"V_MJPEG" , CODEC_ID_MJPEG}, {"V_MPEG1" , CODEC_ID_MPEG1VIDEO}, {"V_MPEG2" , CODEC_ID_MPEG2VIDEO}, {"V_MPEG4/ISO/ASP" , CODEC_ID_MPEG4}, {"V_MPEG4/ISO/AP" , CODEC_ID_MPEG4}, {"V_MPEG4/ISO/SP" , CODEC_ID_MPEG4}, {"V_MPEG4/ISO/AVC" , CODEC_ID_H264}, {"V_MPEG4/MS/V3" , CODEC_ID_MSMPEG4V3}, {"V_REAL/RV10" , CODEC_ID_RV10}, {"V_REAL/RV20" , CODEC_ID_RV20}, {"V_REAL/RV30" , CODEC_ID_RV30}, {"V_REAL/RV40" , CODEC_ID_RV40}, {"V_SNOW" , CODEC_ID_SNOW}, {"V_THEORA" , CODEC_ID_THEORA}, {"V_UNCOMPRESSED" , CODEC_ID_RAWVIDEO}, {"V_VP8" , CODEC_ID_VP8}, {"" , CODEC_ID_NONE} }; const CodecMime ff_mkv_mime_tags[] = { {"text/plain" , CODEC_ID_TEXT}, {"image/gif" , CODEC_ID_GIF}, {"image/jpeg" , CODEC_ID_MJPEG}, {"image/png" , CODEC_ID_PNG}, {"image/tiff" , CODEC_ID_TIFF}, {"application/x-truetype-font", CODEC_ID_TTF}, {"application/x-font" , CODEC_ID_TTF}, {"" , CODEC_ID_NONE} }; const AVMetadataConv ff_mkv_metadata_conv[] = { { "LEAD_PERFORMER", "performer" }, { "PART_NUMBER" , "track" }, { 0 } };
123linslouis-android-video-cutter
jni/libavformat/matroska.c
C
asf20
3,855
/* * RTP input/output format * Copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avformat.h" #include "rtp.h" //#define DEBUG /* from http://www.iana.org/assignments/rtp-parameters last updated 05 January 2005 */ /* payload types >= 96 are dynamic; * payload types between 72 and 76 are reserved for RTCP conflict avoidance; * all the other payload types not present in the table are unassigned or * reserved */ static const struct { int pt; const char enc_name[6]; enum AVMediaType codec_type; enum CodecID codec_id; int clock_rate; int audio_channels; } AVRtpPayloadTypes[]= { {0, "PCMU", AVMEDIA_TYPE_AUDIO, CODEC_ID_PCM_MULAW, 8000, 1}, {3, "GSM", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {4, "G723", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {5, "DVI4", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {6, "DVI4", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 16000, 1}, {7, "LPC", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {8, "PCMA", AVMEDIA_TYPE_AUDIO, CODEC_ID_PCM_ALAW, 8000, 1}, {9, "G722", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {10, "L16", AVMEDIA_TYPE_AUDIO, CODEC_ID_PCM_S16BE, 44100, 2}, {11, "L16", AVMEDIA_TYPE_AUDIO, CODEC_ID_PCM_S16BE, 44100, 1}, {12, "QCELP", AVMEDIA_TYPE_AUDIO, CODEC_ID_QCELP, 8000, 1}, {13, "CN", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {14, "MPA", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP2, -1, -1}, {14, "MPA", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3, -1, -1}, {15, "G728", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {16, "DVI4", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 11025, 1}, {17, "DVI4", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 22050, 1}, {18, "G729", AVMEDIA_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1}, {25, "CelB", AVMEDIA_TYPE_VIDEO, CODEC_ID_NONE, 90000, -1}, {26, "JPEG", AVMEDIA_TYPE_VIDEO, CODEC_ID_MJPEG, 90000, -1}, {28, "nv", AVMEDIA_TYPE_VIDEO, CODEC_ID_NONE, 90000, -1}, {31, "H261", AVMEDIA_TYPE_VIDEO, CODEC_ID_H261, 90000, -1}, {32, "MPV", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG1VIDEO, 90000, -1}, {32, "MPV", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG2VIDEO, 90000, -1}, {33, "MP2T", AVMEDIA_TYPE_DATA, CODEC_ID_MPEG2TS, 90000, -1}, {34, "H263", AVMEDIA_TYPE_VIDEO, CODEC_ID_H263, 90000, -1}, {-1, "", AVMEDIA_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1} }; int ff_rtp_get_codec_info(AVCodecContext *codec, int payload_type) { int i = 0; for (i = 0; AVRtpPayloadTypes[i].pt >= 0; i++) if (AVRtpPayloadTypes[i].pt == payload_type) { if (AVRtpPayloadTypes[i].codec_id != CODEC_ID_NONE) { codec->codec_type = AVRtpPayloadTypes[i].codec_type; codec->codec_id = AVRtpPayloadTypes[i].codec_id; if (AVRtpPayloadTypes[i].audio_channels > 0) codec->channels = AVRtpPayloadTypes[i].audio_channels; if (AVRtpPayloadTypes[i].clock_rate > 0) codec->sample_rate = AVRtpPayloadTypes[i].clock_rate; return 0; } } return -1; } int ff_rtp_get_payload_type(AVCodecContext *codec) { int i, payload_type; /* compute the payload type */ for (payload_type = -1, i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i) if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) { if (codec->codec_id == CODEC_ID_H263) continue; if (codec->codec_id == CODEC_ID_PCM_S16BE) if (codec->channels != AVRtpPayloadTypes[i].audio_channels) continue; payload_type = AVRtpPayloadTypes[i].pt; } return payload_type; } const char *ff_rtp_enc_name(int payload_type) { int i; for (i = 0; AVRtpPayloadTypes[i].pt >= 0; i++) if (AVRtpPayloadTypes[i].pt == payload_type) { return AVRtpPayloadTypes[i].enc_name; } return ""; } enum CodecID ff_rtp_codec_id(const char *buf, enum AVMediaType codec_type) { int i; for (i = 0; AVRtpPayloadTypes[i].pt >= 0; i++) if (!strcmp(buf, AVRtpPayloadTypes[i].enc_name) && (codec_type == AVRtpPayloadTypes[i].codec_type)){ return AVRtpPayloadTypes[i].codec_id; } return CODEC_ID_NONE; }
123linslouis-android-video-cutter
jni/libavformat/rtp.c
C
asf20
5,146
LIBAVFORMAT_$MAJOR { global: *; };
123linslouis-android-video-cutter
jni/libavformat/libavformat.v
Verilog
asf20
43
/* * FLV demuxer * Copyright (c) 2003 The FFmpeg Project * * This demuxer will generate a 1 byte extradata for VP6F content. * It is composed of: * - upper 4bits: difference between encoded width and visible width * - lower 4bits: difference between encoded height and visible height * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/avstring.h" #include "libavcodec/bytestream.h" #include "libavcodec/mpeg4audio.h" #include "avformat.h" #include "flv.h" typedef struct { int wrong_dts; ///< wrong dts due to negative cts } FLVContext; static int flv_probe(AVProbeData *p) { const uint8_t *d; d = p->buf; if (d[0] == 'F' && d[1] == 'L' && d[2] == 'V' && d[3] < 5 && d[5]==0 && AV_RB32(d+5)>8) { return AVPROBE_SCORE_MAX; } return 0; } static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream, int flv_codecid) { AVCodecContext *acodec = astream->codec; switch(flv_codecid) { //no distinction between S16 and S8 PCM codec flags case FLV_CODECID_PCM: acodec->codec_id = acodec->bits_per_coded_sample == 8 ? CODEC_ID_PCM_U8 : #if HAVE_BIGENDIAN CODEC_ID_PCM_S16BE; #else CODEC_ID_PCM_S16LE; #endif break; case FLV_CODECID_PCM_LE: acodec->codec_id = acodec->bits_per_coded_sample == 8 ? CODEC_ID_PCM_U8 : CODEC_ID_PCM_S16LE; break; case FLV_CODECID_AAC : acodec->codec_id = CODEC_ID_AAC; break; case FLV_CODECID_ADPCM: acodec->codec_id = CODEC_ID_ADPCM_SWF; break; case FLV_CODECID_SPEEX: acodec->codec_id = CODEC_ID_SPEEX; acodec->sample_rate = 16000; break; case FLV_CODECID_MP3 : acodec->codec_id = CODEC_ID_MP3 ; astream->need_parsing = AVSTREAM_PARSE_FULL; break; case FLV_CODECID_NELLYMOSER_8KHZ_MONO: acodec->sample_rate = 8000; //in case metadata does not otherwise declare samplerate case FLV_CODECID_NELLYMOSER: acodec->codec_id = CODEC_ID_NELLYMOSER; break; default: av_log(s, AV_LOG_INFO, "Unsupported audio codec (%x)\n", flv_codecid >> FLV_AUDIO_CODECID_OFFSET); acodec->codec_tag = flv_codecid >> FLV_AUDIO_CODECID_OFFSET; } } static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream, int flv_codecid) { AVCodecContext *vcodec = vstream->codec; switch(flv_codecid) { case FLV_CODECID_H263 : vcodec->codec_id = CODEC_ID_FLV1 ; break; case FLV_CODECID_SCREEN: vcodec->codec_id = CODEC_ID_FLASHSV; break; case FLV_CODECID_SCREEN2: vcodec->codec_id = CODEC_ID_FLASHSV2; break; case FLV_CODECID_VP6 : vcodec->codec_id = CODEC_ID_VP6F ; case FLV_CODECID_VP6A : if(flv_codecid == FLV_CODECID_VP6A) vcodec->codec_id = CODEC_ID_VP6A; if(vcodec->extradata_size != 1) { vcodec->extradata_size = 1; vcodec->extradata = av_malloc(1); } vcodec->extradata[0] = get_byte(s->pb); return 1; // 1 byte body size adjustment for flv_read_packet() case FLV_CODECID_H264: vcodec->codec_id = CODEC_ID_H264; return 3; // not 4, reading packet type will consume one byte default: av_log(s, AV_LOG_INFO, "Unsupported video codec (%x)\n", flv_codecid); vcodec->codec_tag = flv_codecid; } return 0; } static int amf_get_string(ByteIOContext *ioc, char *buffer, int buffsize) { int length = get_be16(ioc); if(length >= buffsize) { url_fskip(ioc, length); return -1; } get_buffer(ioc, buffer, length); buffer[length] = '\0'; return length; } static int amf_parse_object(AVFormatContext *s, AVStream *astream, AVStream *vstream, const char *key, int64_t max_pos, int depth) { AVCodecContext *acodec, *vcodec; ByteIOContext *ioc; AMFDataType amf_type; char str_val[256]; double num_val; num_val = 0; ioc = s->pb; amf_type = get_byte(ioc); switch(amf_type) { case AMF_DATA_TYPE_NUMBER: num_val = av_int2dbl(get_be64(ioc)); break; case AMF_DATA_TYPE_BOOL: num_val = get_byte(ioc); break; case AMF_DATA_TYPE_STRING: if(amf_get_string(ioc, str_val, sizeof(str_val)) < 0) return -1; break; case AMF_DATA_TYPE_OBJECT: { unsigned int keylen; while(url_ftell(ioc) < max_pos - 2 && (keylen = get_be16(ioc))) { url_fskip(ioc, keylen); //skip key string if(amf_parse_object(s, NULL, NULL, NULL, max_pos, depth + 1) < 0) return -1; //if we couldn't skip, bomb out. } if(get_byte(ioc) != AMF_END_OF_OBJECT) return -1; } break; case AMF_DATA_TYPE_NULL: case AMF_DATA_TYPE_UNDEFINED: case AMF_DATA_TYPE_UNSUPPORTED: break; //these take up no additional space case AMF_DATA_TYPE_MIXEDARRAY: url_fskip(ioc, 4); //skip 32-bit max array index while(url_ftell(ioc) < max_pos - 2 && amf_get_string(ioc, str_val, sizeof(str_val)) > 0) { //this is the only case in which we would want a nested parse to not skip over the object if(amf_parse_object(s, astream, vstream, str_val, max_pos, depth + 1) < 0) return -1; } if(get_byte(ioc) != AMF_END_OF_OBJECT) return -1; break; case AMF_DATA_TYPE_ARRAY: { unsigned int arraylen, i; arraylen = get_be32(ioc); for(i = 0; i < arraylen && url_ftell(ioc) < max_pos - 1; i++) { if(amf_parse_object(s, NULL, NULL, NULL, max_pos, depth + 1) < 0) return -1; //if we couldn't skip, bomb out. } } break; case AMF_DATA_TYPE_DATE: url_fskip(ioc, 8 + 2); //timestamp (double) and UTC offset (int16) break; default: //unsupported type, we couldn't skip return -1; } if(depth == 1 && key) { //only look for metadata values when we are not nested and key != NULL acodec = astream ? astream->codec : NULL; vcodec = vstream ? vstream->codec : NULL; if(amf_type == AMF_DATA_TYPE_BOOL) { av_strlcpy(str_val, num_val > 0 ? "true" : "false", sizeof(str_val)); av_metadata_set2(&s->metadata, key, str_val, 0); } else if(amf_type == AMF_DATA_TYPE_NUMBER) { snprintf(str_val, sizeof(str_val), "%.f", num_val); av_metadata_set2(&s->metadata, key, str_val, 0); if(!strcmp(key, "duration")) s->duration = num_val * AV_TIME_BASE; else if(!strcmp(key, "videodatarate") && vcodec && 0 <= (int)(num_val * 1024.0)) vcodec->bit_rate = num_val * 1024.0; else if(!strcmp(key, "audiodatarate") && acodec && 0 <= (int)(num_val * 1024.0)) acodec->bit_rate = num_val * 1024.0; } else if (amf_type == AMF_DATA_TYPE_STRING) av_metadata_set2(&s->metadata, key, str_val, 0); } return 0; } static int flv_read_metabody(AVFormatContext *s, int64_t next_pos) { AMFDataType type; AVStream *stream, *astream, *vstream; ByteIOContext *ioc; int i; char buffer[11]; //only needs to hold the string "onMetaData". Anything longer is something we don't want. astream = NULL; vstream = NULL; ioc = s->pb; //first object needs to be "onMetaData" string type = get_byte(ioc); if(type != AMF_DATA_TYPE_STRING || amf_get_string(ioc, buffer, sizeof(buffer)) < 0 || strcmp(buffer, "onMetaData")) return -1; //find the streams now so that amf_parse_object doesn't need to do the lookup every time it is called. for(i = 0; i < s->nb_streams; i++) { stream = s->streams[i]; if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) astream = stream; else if(stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) vstream = stream; } //parse the second object (we want a mixed array) if(amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0) return -1; return 0; } static AVStream *create_stream(AVFormatContext *s, int is_audio){ AVStream *st = av_new_stream(s, is_audio); if (!st) return NULL; st->codec->codec_type = is_audio ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO; av_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */ return st; } static int flv_read_header(AVFormatContext *s, AVFormatParameters *ap) { int offset, flags; url_fskip(s->pb, 4); flags = get_byte(s->pb); /* old flvtool cleared this field */ /* FIXME: better fix needed */ if (!flags) { flags = FLV_HEADER_FLAG_HASVIDEO | FLV_HEADER_FLAG_HASAUDIO; av_log(s, AV_LOG_WARNING, "Broken FLV file, which says no streams present, this might fail\n"); } if((flags & (FLV_HEADER_FLAG_HASVIDEO|FLV_HEADER_FLAG_HASAUDIO)) != (FLV_HEADER_FLAG_HASVIDEO|FLV_HEADER_FLAG_HASAUDIO)) s->ctx_flags |= AVFMTCTX_NOHEADER; if(flags & FLV_HEADER_FLAG_HASVIDEO){ if(!create_stream(s, 0)) return AVERROR(ENOMEM); } if(flags & FLV_HEADER_FLAG_HASAUDIO){ if(!create_stream(s, 1)) return AVERROR(ENOMEM); } offset = get_be32(s->pb); url_fseek(s->pb, offset, SEEK_SET); url_fskip(s->pb, 4); s->start_time = 0; return 0; } static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size) { av_free(st->codec->extradata); st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = size; get_buffer(s->pb, st->codec->extradata, st->codec->extradata_size); return 0; } static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) { FLVContext *flv = s->priv_data; int ret, i, type, size, flags, is_audio; int64_t next, pos; int64_t dts, pts = AV_NOPTS_VALUE; AVStream *st = NULL; for(;;url_fskip(s->pb, 4)){ /* pkt size is repeated at end. skip it */ pos = url_ftell(s->pb); type = get_byte(s->pb); size = get_be24(s->pb); dts = get_be24(s->pb); dts |= get_byte(s->pb) << 24; // av_log(s, AV_LOG_DEBUG, "type:%d, size:%d, dts:%d\n", type, size, dts); if (url_feof(s->pb)) return AVERROR_EOF; url_fskip(s->pb, 3); /* stream id, always 0 */ flags = 0; if(size == 0) continue; next= size + url_ftell(s->pb); if (type == FLV_TAG_TYPE_AUDIO) { is_audio=1; flags = get_byte(s->pb); size--; } else if (type == FLV_TAG_TYPE_VIDEO) { is_audio=0; flags = get_byte(s->pb); size--; if ((flags & 0xf0) == 0x50) /* video info / command frame */ goto skip; } else { if (type == FLV_TAG_TYPE_META && size > 13+1+4) flv_read_metabody(s, next); else /* skip packet */ av_log(s, AV_LOG_DEBUG, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags); skip: url_fseek(s->pb, next, SEEK_SET); continue; } /* skip empty data packets */ if (!size) continue; /* now find stream */ for(i=0;i<s->nb_streams;i++) { st = s->streams[i]; if (st->id == is_audio) break; } if(i == s->nb_streams){ av_log(s, AV_LOG_ERROR, "invalid stream\n"); st= create_stream(s, is_audio); s->ctx_flags &= ~AVFMTCTX_NOHEADER; } // av_log(s, AV_LOG_DEBUG, "%d %X %d \n", is_audio, flags, st->discard); if( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || is_audio)) ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && !is_audio)) || st->discard >= AVDISCARD_ALL ){ url_fseek(s->pb, next, SEEK_SET); continue; } if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME); break; } // if not streamed and no duration from metadata then seek to end to find the duration from the timestamps if(!url_is_streamed(s->pb) && (!s->duration || s->duration==AV_NOPTS_VALUE)){ int size; const int64_t pos= url_ftell(s->pb); const int64_t fsize= url_fsize(s->pb); url_fseek(s->pb, fsize-4, SEEK_SET); size= get_be32(s->pb); url_fseek(s->pb, fsize-3-size, SEEK_SET); if(size == get_be24(s->pb) + 11){ uint32_t ts = get_be24(s->pb); ts |= get_byte(s->pb) << 24; s->duration = ts * (int64_t)AV_TIME_BASE / 1000; } url_fseek(s->pb, pos, SEEK_SET); } if(is_audio){ if(!st->codec->channels || !st->codec->sample_rate || !st->codec->bits_per_coded_sample) { st->codec->channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1; st->codec->sample_rate = (44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3); st->codec->bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8; } if(!st->codec->codec_id){ flv_set_audio_codec(s, st, flags & FLV_AUDIO_CODECID_MASK); } }else{ size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK); } if (st->codec->codec_id == CODEC_ID_AAC || st->codec->codec_id == CODEC_ID_H264) { int type = get_byte(s->pb); size--; if (st->codec->codec_id == CODEC_ID_H264) { int32_t cts = (get_be24(s->pb)+0xff800000)^0xff800000; // sign extension pts = dts + cts; if (cts < 0) { // dts are wrong flv->wrong_dts = 1; av_log(s, AV_LOG_WARNING, "negative cts, previous timestamps might be wrong\n"); } if (flv->wrong_dts) dts = AV_NOPTS_VALUE; } if (type == 0) { if ((ret = flv_get_extradata(s, st, size)) < 0) return ret; if (st->codec->codec_id == CODEC_ID_AAC) { MPEG4AudioConfig cfg; ff_mpeg4audio_get_config(&cfg, st->codec->extradata, st->codec->extradata_size); st->codec->channels = cfg.channels; st->codec->sample_rate = cfg.sample_rate; dprintf(s, "mp4a config channels %d sample rate %d\n", st->codec->channels, st->codec->sample_rate); } ret = AVERROR(EAGAIN); goto leave; } } /* skip empty data packets */ if (!size) { ret = AVERROR(EAGAIN); goto leave; } ret= av_get_packet(s->pb, pkt, size); if (ret < 0) { return AVERROR(EIO); } /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret; pkt->dts = dts; pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts; pkt->stream_index = st->index; if (is_audio || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)) pkt->flags |= AV_PKT_FLAG_KEY; leave: url_fskip(s->pb, 4); return ret; } static int flv_read_seek(AVFormatContext *s, int stream_index, int64_t ts, int flags) { return av_url_read_fseek(s->pb, stream_index, ts, flags); } #if 0 /* don't know enough to implement this */ static int flv_read_seek2(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) { int ret = AVERROR(ENOSYS); if (ts - min_ts > (uint64_t)(max_ts - ts)) flags |= AVSEEK_FLAG_BACKWARD; if (url_is_streamed(s->pb)) { if (stream_index < 0) { stream_index = av_find_default_stream_index(s); if (stream_index < 0) return -1; /* timestamp for default must be expressed in AV_TIME_BASE units */ ts = av_rescale_rnd(ts, 1000, AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP); } ret = av_url_read_fseek(s->pb, stream_index, ts, flags); } if (ret == AVERROR(ENOSYS)) ret = av_seek_frame(s, stream_index, ts, flags); return ret; } #endif AVInputFormat flv_demuxer = { "flv", NULL_IF_CONFIG_SMALL("FLV format"), sizeof(FLVContext), flv_probe, flv_read_header, flv_read_packet, .read_seek = flv_read_seek, #if 0 .read_seek2 = flv_read_seek2, #endif .extensions = "flv", .value = CODEC_ID_FLV1, };
123linslouis-android-video-cutter
jni/libavformat/flvdec.c
C
asf20
17,677
/* * RIFF codec tags * copyright (c) 2000 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * internal header for RIFF based (de)muxers * do NOT include this in end user applications */ #ifndef AVFORMAT_RIFF_H #define AVFORMAT_RIFF_H #include "libavcodec/avcodec.h" #include "avio.h" int64_t ff_start_tag(ByteIOContext *pb, const char *tag); void ff_end_tag(ByteIOContext *pb, int64_t start); typedef struct AVCodecTag { enum CodecID id; unsigned int tag; } AVCodecTag; void ff_put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf); int ff_put_wav_header(ByteIOContext *pb, AVCodecContext *enc); enum CodecID ff_wav_codec_get_id(unsigned int tag, int bps); void ff_get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size); extern const AVCodecTag ff_codec_bmp_tags[]; extern const AVCodecTag ff_codec_wav_tags[]; unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id); enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag); void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale); #endif /* AVFORMAT_RIFF_H */
123linslouis-android-video-cutter
jni/libavformat/riff.h
C
asf20
1,888