diff options
| author | Tim Redfern <tim@eclectronics.org> | 2013-12-29 12:19:38 +0000 |
|---|---|---|
| committer | Tim Redfern <tim@eclectronics.org> | 2013-12-29 12:19:38 +0000 |
| commit | f7813a5324be39d13ab536c245d15dfc602a7849 (patch) | |
| tree | fad99148b88823d34a5df2f0a25881a002eb291b /ffmpeg/libavcodec/utils.c | |
| parent | b7a5a477b8ff4d4e3028b9dfb9a9df0a41463f92 (diff) | |
basic type mechanism working
Diffstat (limited to 'ffmpeg/libavcodec/utils.c')
| -rw-r--r-- | ffmpeg/libavcodec/utils.c | 871 |
1 files changed, 619 insertions, 252 deletions
diff --git a/ffmpeg/libavcodec/utils.c b/ffmpeg/libavcodec/utils.c index c15b772..c000d27 100644 --- a/ffmpeg/libavcodec/utils.c +++ b/ffmpeg/libavcodec/utils.c @@ -26,18 +26,20 @@ */ #include "config.h" +#include "libavutil/atomic.h" +#include "libavutil/attributes.h" #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/bprint.h" #include "libavutil/channel_layout.h" #include "libavutil/crc.h" #include "libavutil/frame.h" +#include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "libavutil/samplefmt.h" #include "libavutil/dict.h" -#include "libavutil/avassert.h" #include "avcodec.h" #include "dsputil.h" #include "libavutil/opt.h" @@ -54,30 +56,84 @@ # include <iconv.h> #endif +#if HAVE_PTHREADS +#include <pthread.h> +#elif HAVE_W32THREADS +#include "compat/w32pthreads.h" +#elif HAVE_OS2THREADS +#include "compat/os2threads.h" +#endif + +#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS +static int default_lockmgr_cb(void **arg, enum AVLockOp op) +{ + void * volatile * mutex = arg; + int err; + + switch (op) { + case AV_LOCK_CREATE: + return 0; + case AV_LOCK_OBTAIN: + if (!*mutex) { + pthread_mutex_t *tmp = av_malloc(sizeof(pthread_mutex_t)); + if (!tmp) + return AVERROR(ENOMEM); + if ((err = pthread_mutex_init(tmp, NULL))) { + av_free(tmp); + return AVERROR(err); + } + if (avpriv_atomic_ptr_cas(mutex, NULL, tmp)) { + pthread_mutex_destroy(tmp); + av_free(tmp); + } + } + + if ((err = pthread_mutex_lock(*mutex))) + return AVERROR(err); + + return 0; + case AV_LOCK_RELEASE: + if ((err = pthread_mutex_unlock(*mutex))) + return AVERROR(err); + + return 0; + case AV_LOCK_DESTROY: + if (*mutex) + pthread_mutex_destroy(*mutex); + av_free(*mutex); + avpriv_atomic_ptr_cas(mutex, *mutex, NULL); + return 0; + } + return 1; +} +static int (*lockmgr_cb)(void **mutex, enum AVLockOp op) = default_lockmgr_cb; +#else +static int (*lockmgr_cb)(void **mutex, enum AVLockOp op) = NULL; +#endif + + volatile int ff_avcodec_locked; static int volatile entangled_thread_counter = 0; -static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op); static void *codec_mutex; static void *avformat_mutex; -void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size) -{ - if (min_size < *size) - return ptr; - - min_size = FFMAX(17 * min_size / 16 + 32, min_size); - - ptr = av_realloc(ptr, min_size); - /* we could set this to the unmodified min_size but this is safer - * if the user lost the ptr and uses NULL now - */ - if (!ptr) - min_size = 0; +#if CONFIG_RAISE_MAJOR +# define LIBNAME "LIBAVCODEC_155" +#else +# define LIBNAME "LIBAVCODEC_55" +#endif - *size = min_size; +#if FF_API_FAST_MALLOC && CONFIG_SHARED && HAVE_SYMVER +FF_SYMVER(void*, av_fast_realloc, (void *ptr, unsigned int *size, size_t min_size), LIBNAME) +{ + return av_fast_realloc(ptr, size, min_size); +} - return ptr; +FF_SYMVER(void, av_fast_malloc, (void *ptr, unsigned int *size, size_t min_size), LIBNAME) +{ + av_fast_malloc(ptr, size, min_size); } +#endif static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc) { @@ -93,11 +149,6 @@ static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, return 1; } -void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size) -{ - ff_fast_malloc(ptr, size, min_size, 0); -} - void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) { uint8_t **p = ptr; @@ -124,6 +175,7 @@ void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size) /* encoder management */ static AVCodec *first_avcodec = NULL; +static AVCodec **last_avcodec = &first_avcodec; AVCodec *av_codec_next(const AVCodec *c) { @@ -133,7 +185,7 @@ AVCodec *av_codec_next(const AVCodec *c) return first_avcodec; } -static void avcodec_init(void) +static av_cold void avcodec_init(void) { static int initialized = 0; @@ -155,16 +207,17 @@ int av_codec_is_decoder(const AVCodec *codec) return codec && codec->decode; } -void avcodec_register(AVCodec *codec) +av_cold void avcodec_register(AVCodec *codec) { AVCodec **p; avcodec_init(); - p = &first_avcodec; - while (*p != NULL) - p = &(*p)->next; - *p = codec; + p = last_avcodec; codec->next = NULL; + while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, codec)) + p = &(*p)->next; + last_avcodec = &codec->next; + if (codec->init_static_data) codec->init_static_data(codec); } @@ -174,15 +227,32 @@ unsigned avcodec_get_edge_width(void) return EDGE_WIDTH; } +#if FF_API_SET_DIMENSIONS void avcodec_set_dimensions(AVCodecContext *s, int width, int height) { + int ret = ff_set_dimensions(s, width, height); + if (ret < 0) { + av_log(s, AV_LOG_WARNING, "Failed to set dimensions %d %d\n", width, height); + } +} +#endif + +int ff_set_dimensions(AVCodecContext *s, int width, int height) +{ + int ret = av_image_check_size(width, height, 0, s); + + if (ret < 0) + width = height = 0; + s->coded_width = width; s->coded_height = height; - s->width = -((-width ) >> s->lowres); - s->height = -((-height) >> s->lowres); + s->width = FF_CEIL_RSHIFT(width, s->lowres); + s->height = FF_CEIL_RSHIFT(height, s->lowres); + + return ret; } -#if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX +#if HAVE_NEON || ARCH_PPC || HAVE_MMX # define STRIDE_ALIGN 16 #else # define STRIDE_ALIGN 8 @@ -202,6 +272,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_GBRAP: case AV_PIX_FMT_GBRP: case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_GRAY16BE: @@ -221,6 +292,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, case AV_PIX_FMT_YUV420P12BE: case AV_PIX_FMT_YUV420P14LE: case AV_PIX_FMT_YUV420P14BE: + case AV_PIX_FMT_YUV420P16LE: + case AV_PIX_FMT_YUV420P16BE: case AV_PIX_FMT_YUV422P9LE: case AV_PIX_FMT_YUV422P9BE: case AV_PIX_FMT_YUV422P10LE: @@ -229,6 +302,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, case AV_PIX_FMT_YUV422P12BE: case AV_PIX_FMT_YUV422P14LE: case AV_PIX_FMT_YUV422P14BE: + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV422P16BE: case AV_PIX_FMT_YUV444P9LE: case AV_PIX_FMT_YUV444P9BE: case AV_PIX_FMT_YUV444P10LE: @@ -237,6 +312,26 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, case AV_PIX_FMT_YUV444P12BE: case AV_PIX_FMT_YUV444P14LE: case AV_PIX_FMT_YUV444P14BE: + case AV_PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUV444P16BE: + case AV_PIX_FMT_YUVA420P9LE: + case AV_PIX_FMT_YUVA420P9BE: + case AV_PIX_FMT_YUVA420P10LE: + case AV_PIX_FMT_YUVA420P10BE: + case AV_PIX_FMT_YUVA420P16LE: + case AV_PIX_FMT_YUVA420P16BE: + case AV_PIX_FMT_YUVA422P9LE: + case AV_PIX_FMT_YUVA422P9BE: + case AV_PIX_FMT_YUVA422P10LE: + case AV_PIX_FMT_YUVA422P10BE: + case AV_PIX_FMT_YUVA422P16LE: + case AV_PIX_FMT_YUVA422P16BE: + case AV_PIX_FMT_YUVA444P9LE: + case AV_PIX_FMT_YUVA444P9BE: + case AV_PIX_FMT_YUVA444P10LE: + case AV_PIX_FMT_YUVA444P10BE: + case AV_PIX_FMT_YUVA444P16LE: + case AV_PIX_FMT_YUVA444P16BE: case AV_PIX_FMT_GBRP9LE: case AV_PIX_FMT_GBRP9BE: case AV_PIX_FMT_GBRP10LE: @@ -249,6 +344,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, h_align = 16 * 2; // interlaced needs 2 macroblocks height break; case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_YUVJ411P: case AV_PIX_FMT_UYYVYY411: w_align = 32; h_align = 8; @@ -323,6 +419,29 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height) *width = FFALIGN(*width, align); } +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos) +{ + if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB) + return AVERROR(EINVAL); + pos--; + + *xpos = (pos&1) * 128; + *ypos = ((pos>>1)^(pos<4)) * 128; + + return 0; +} + +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos) +{ + int pos, xout, yout; + + for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) { + if (avcodec_enum_to_chroma_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos) + return pos; + } + return AVCHROMA_LOC_UNSPECIFIED; +} + int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align) @@ -408,7 +527,10 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) av_buffer_pool_uninit(&pool->pools[i]); pool->linesize[i] = picture.linesize[i]; if (size[i]) { - pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL); + pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1, + CONFIG_MEMORY_POISONING ? + NULL : + av_buffer_allocz); if (!pool->pools[i]) { ret = AVERROR(ENOMEM); goto fail; @@ -526,6 +648,7 @@ static int video_get_buffer(AVCodecContext *s, AVFrame *pic) for (i = 0; i < 4 && pool->pools[i]; i++) { const int h_shift = i == 0 ? 0 : h_chroma_shift; const int v_shift = i == 0 ? 0 : v_chroma_shift; + int is_planar = pool->pools[2] || (i==0 && s->pix_fmt == AV_PIX_FMT_GRAY8); pic->linesize[i] = pool->linesize[i]; @@ -534,7 +657,7 @@ static int video_get_buffer(AVCodecContext *s, AVFrame *pic) goto fail; // no edge if EDGE EMU or not planar YUV - if ((s->flags & CODEC_FLAG_EMU_EDGE) || !pool->pools[2]) + if ((s->flags & CODEC_FLAG_EMU_EDGE) || !is_planar) pic->data[i] = pic->buf[i]->data; else { pic->data[i] = pic->buf[i]->data + @@ -563,13 +686,14 @@ void avpriv_color_frame(AVFrame *frame, const int c[4]) const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); int p, y, x; - av_assert0(desc->flags & PIX_FMT_PLANAR); + av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR); for (p = 0; p<desc->nb_components; p++) { uint8_t *dst = frame->data[p]; int is_chroma = p == 1 || p == 2; - int bytes = -((-frame->width) >> (is_chroma ? desc->log2_chroma_w : 0)); - for (y = 0; y<-((-frame->height) >> (is_chroma ? desc->log2_chroma_h : 0)); y++){ + int bytes = is_chroma ? FF_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width; + int height = is_chroma ? FF_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height; + for (y = 0; y < height; y++) { if (desc->comp[0].depth_minus1 >= 8) { for (x = 0; x<bytes; x++) ((uint16_t*)dst)[x] = c[p]; @@ -588,7 +712,9 @@ int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags return ret; #if FF_API_GET_BUFFER +FF_DISABLE_DEPRECATION_WARNINGS frame->type = FF_BUFFER_TYPE_INTERNAL; +FF_ENABLE_DEPRECATION_WARNINGS #endif switch (avctx->codec_type) { @@ -603,11 +729,11 @@ int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame) { - if (avctx->pkt) { - frame->pkt_pts = avctx->pkt->pts; - av_frame_set_pkt_pos (frame, avctx->pkt->pos); - av_frame_set_pkt_duration(frame, avctx->pkt->duration); - av_frame_set_pkt_size (frame, avctx->pkt->size); + if (avctx->internal->pkt) { + frame->pkt_pts = avctx->internal->pkt->pts; + av_frame_set_pkt_pos (frame, avctx->internal->pkt->pos); + av_frame_set_pkt_duration(frame, avctx->internal->pkt->duration); + av_frame_set_pkt_size (frame, avctx->internal->pkt->size); } else { frame->pkt_pts = AV_NOPTS_VALUE; av_frame_set_pkt_pos (frame, -1); @@ -618,14 +744,16 @@ int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame) switch (avctx->codec->type) { case AVMEDIA_TYPE_VIDEO: - if (!frame->width) - frame->width = avctx->width; - if (!frame->height) - frame->height = avctx->height; + frame->width = FFMAX(avctx->width, FF_CEIL_RSHIFT(avctx->coded_width, avctx->lowres)); + frame->height = FFMAX(avctx->height, FF_CEIL_RSHIFT(avctx->coded_height, avctx->lowres)); if (frame->format < 0) frame->format = avctx->pix_fmt; if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio; + if (av_frame_get_colorspace(frame) == AVCOL_SPC_UNSPECIFIED) + av_frame_set_colorspace(frame, avctx->colorspace); + if (av_frame_get_color_range(frame) == AVCOL_RANGE_UNSPECIFIED) + av_frame_set_color_range(frame, avctx->color_range); break; case AVMEDIA_TYPE_AUDIO: if (!frame->sample_rate) @@ -648,8 +776,6 @@ int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame) avctx->channels); return AVERROR(ENOSYS); } - - frame->channel_layout = av_get_default_channel_layout(avctx->channels); } } av_frame_set_channels(frame, avctx->channels); @@ -659,6 +785,7 @@ int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame) } #if FF_API_GET_BUFFER +FF_DISABLE_DEPRECATION_WARNINGS int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame) { return avcodec_default_get_buffer2(avctx, frame, 0); @@ -682,6 +809,7 @@ static void compat_release_buffer(void *opaque, uint8_t *data) AVBufferRef *buf = opaque; av_buffer_unref(&buf); } +FF_ENABLE_DEPRECATION_WARNINGS #endif static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) @@ -698,8 +826,9 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) return ret; #if FF_API_GET_BUFFER +FF_DISABLE_DEPRECATION_WARNINGS /* - * Wrap an old get_buffer()-allocated buffer in an bunch of AVBuffers. + * Wrap an old get_buffer()-allocated buffer in a bunch of AVBuffers. * We wrap each plane in its own AVBuffer. Each of those has a reference to * a dummy AVBuffer as its private data, unreffing it on free. * When all the planes are freed, the dummy buffer's free callback calls @@ -722,7 +851,7 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) * avcodec_default_get_buffer */ if (frame->buf[0]) - return 0; + goto end; priv = av_mallocz(sizeof(*priv)); if (!priv) { @@ -758,6 +887,10 @@ do { \ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); planes = av_pix_fmt_count_planes(frame->format); + /* workaround for AVHWAccel plane count of 0, buf[0] is used as + check for allocated buffers: make libavcodec happy */ + if (desc && desc->flags & AV_PIX_FMT_FLAG_HWACCEL) + planes = 1; if (!desc || planes <= 0) { ret = AVERROR(EINVAL); goto fail; @@ -794,6 +927,10 @@ do { \ av_buffer_unref(&dummy_buf); +end: + frame->width = avctx->width; + frame->height = avctx->height; + return 0; fail: @@ -802,9 +939,17 @@ fail: av_buffer_unref(&dummy_buf); return ret; } +FF_ENABLE_DEPRECATION_WARNINGS #endif - return avctx->get_buffer2(avctx, frame, flags); + ret = avctx->get_buffer2(avctx, frame, flags); + + if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { + frame->width = avctx->width; + frame->height = avctx->height; + } + + return ret; } int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) @@ -871,6 +1016,7 @@ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic) int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic) { av_assert0(0); + return AVERROR_BUG; } #endif @@ -901,7 +1047,7 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); - return desc->flags & PIX_FMT_HWACCEL; + return desc->flags & AV_PIX_FMT_FLAG_HWACCEL; } enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat *fmt) @@ -911,6 +1057,7 @@ enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const en return fmt[0]; } +#if FF_API_AVFRAME_LAVC void avcodec_get_frame_defaults(AVFrame *frame) { #if LIBAVCODEC_VERSION_MAJOR >= 55 @@ -921,54 +1068,29 @@ void avcodec_get_frame_defaults(AVFrame *frame) #endif memset(frame, 0, sizeof(AVFrame)); - - frame->pts = - frame->pkt_dts = - frame->pkt_pts = AV_NOPTS_VALUE; - av_frame_set_best_effort_timestamp(frame, AV_NOPTS_VALUE); - av_frame_set_pkt_duration (frame, 0); - av_frame_set_pkt_pos (frame, -1); - av_frame_set_pkt_size (frame, -1); - frame->key_frame = 1; - frame->sample_aspect_ratio = (AVRational) {0, 1 }; - frame->format = -1; /* unknown */ - frame->extended_data = frame->data; + av_frame_unref(frame); } AVFrame *avcodec_alloc_frame(void) { - AVFrame *frame = av_malloc(sizeof(AVFrame)); - - if (frame == NULL) - return NULL; - - frame->extended_data = NULL; - avcodec_get_frame_defaults(frame); - - return frame; + return av_frame_alloc(); } void avcodec_free_frame(AVFrame **frame) { - AVFrame *f; - - if (!frame || !*frame) - return; - - f = *frame; - - if (f->extended_data != f->data) - av_freep(&f->extended_data); - - av_freep(frame); + av_frame_free(frame); } - -#define MAKE_ACCESSORS(str, name, type, field) \ - type av_##name##_get_##field(const str *s) { return s->field; } \ - void av_##name##_set_##field(str *s, type v) { s->field = v; } +#endif MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase) MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor) +MAKE_ACCESSORS(AVCodecContext, codec, int, lowres) +MAKE_ACCESSORS(AVCodecContext, codec, int, seek_preroll) + +int av_codec_get_max_lowres(const AVCodec *codec) +{ + return codec->max_lowres; +} static void avcodec_get_subtitle_defaults(AVSubtitle *sub) { @@ -999,13 +1121,6 @@ static int get_bit_rate(AVCodecContext *ctx) return bit_rate; } -#if FF_API_AVCODEC_OPEN -int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) -{ - return avcodec_open2(avctx, codec, NULL); -} -#endif - int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; @@ -1060,6 +1175,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code goto free_and_end; } + avctx->internal->to_free = av_frame_alloc(); + if (!avctx->internal->to_free) { + ret = AVERROR(ENOMEM); + goto free_and_end; + } + if (codec->priv_data_size > 0) { if (!avctx->priv_data) { avctx->priv_data = av_mallocz(codec->priv_data_size); @@ -1080,20 +1201,22 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) goto free_and_end; - // only call avcodec_set_dimensions() for non H.264/VP6F codecs so as not to overwrite previously setup dimensions + // only call ff_set_dimensions() for non H.264/VP6F codecs so as not to overwrite previously setup dimensions if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F))) { if (avctx->coded_width && avctx->coded_height) - avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); + ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if (avctx->width && avctx->height) - avcodec_set_dimensions(avctx, avctx->width, avctx->height); + ret = ff_set_dimensions(avctx, avctx->width, avctx->height); + if (ret < 0) + goto free_and_end; } if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) && ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0 || av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) { av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); - avcodec_set_dimensions(avctx, 0, 0); + ff_set_dimensions(avctx, 0, 0); } /* if the decoder init function was already called previously, @@ -1125,13 +1248,13 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; AVCodec *codec2; - av_log(NULL, AV_LOG_ERROR, + av_log(avctx, AV_LOG_ERROR, "The %s '%s' is experimental but experimental codecs are not enabled, " "add '-strict %d' if you want to use it.\n", codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); if (!(codec2->capabilities & CODEC_CAP_EXPERIMENTAL)) - av_log(NULL, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", + av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", codec_string, codec2->name); ret = AVERROR_EXPERIMENTAL; goto free_and_end; @@ -1154,7 +1277,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code goto free_and_end; } - if (HAVE_THREADS && !avctx->thread_opaque + if (HAVE_THREADS && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { ret = ff_thread_init(avctx); if (ret < 0) { @@ -1354,17 +1477,24 @@ end: free_and_end: av_dict_free(&tmp); av_freep(&avctx->priv_data); - if (avctx->internal) + if (avctx->internal) { av_freep(&avctx->internal->pool); + av_frame_free(&avctx->internal->to_free); + } av_freep(&avctx->internal); avctx->codec = NULL; goto end; } -int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int size) +int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size) { - if (size < 0 || avpkt->size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { - av_log(avctx, AV_LOG_ERROR, "Size %d invalid\n", size); + if (avpkt->size < 0) { + av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size); + return AVERROR(EINVAL); + } + if (size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { + av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n", + size, INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE); return AVERROR(EINVAL); } @@ -1381,17 +1511,21 @@ int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int size) if (avpkt->data) { AVBufferRef *buf = avpkt->buf; #if FF_API_DESTRUCT_PACKET +FF_DISABLE_DEPRECATION_WARNINGS void *destruct = avpkt->destruct; +FF_ENABLE_DEPRECATION_WARNINGS #endif if (avpkt->size < size) { - av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %d)\n", avpkt->size, size); + av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size); return AVERROR(EINVAL); } av_init_packet(avpkt); #if FF_API_DESTRUCT_PACKET +FF_DISABLE_DEPRECATION_WARNINGS avpkt->destruct = destruct; +FF_ENABLE_DEPRECATION_WARNINGS #endif avpkt->buf = buf; avpkt->size = size; @@ -1399,7 +1533,7 @@ int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int size) } else { int ret = av_new_packet(avpkt, size); if (ret < 0) - av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", size); + av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size); return ret; } } @@ -1415,26 +1549,23 @@ int ff_alloc_packet(AVPacket *avpkt, int size) static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src) { AVFrame *frame = NULL; - uint8_t *buf = NULL; int ret; - if (!(frame = avcodec_alloc_frame())) + if (!(frame = av_frame_alloc())) return AVERROR(ENOMEM); - *frame = *src; - if ((ret = av_samples_get_buffer_size(&frame->linesize[0], s->channels, - s->frame_size, s->sample_fmt, 0)) < 0) + frame->format = src->format; + frame->channel_layout = src->channel_layout; + av_frame_set_channels(frame, av_frame_get_channels(src)); + frame->nb_samples = s->frame_size; + ret = av_frame_get_buffer(frame, 32); + if (ret < 0) goto fail; - if (!(buf = av_malloc(ret))) { - ret = AVERROR(ENOMEM); + ret = av_frame_copy_props(frame, src); + if (ret < 0) goto fail; - } - frame->nb_samples = s->frame_size; - if ((ret = avcodec_fill_audio_frame(frame, s->channels, s->sample_fmt, - buf, ret, 0)) < 0) - goto fail; if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0, src->nb_samples, s->channels, s->sample_fmt)) < 0) goto fail; @@ -1448,10 +1579,7 @@ static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src) return 0; fail: - if (frame->extended_data != frame->data) - av_freep(&frame->extended_data); - av_freep(&buf); - av_freep(&frame); + av_frame_free(&frame); return ret; } @@ -1573,12 +1701,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, avpkt->flags |= AV_PKT_FLAG_KEY; end: - if (padded_frame) { - av_freep(&padded_frame->data[0]); - if (padded_frame->extended_data != padded_frame->data) - av_freep(&padded_frame->extended_data); - av_freep(&padded_frame); - } + av_frame_free(&padded_frame); return ret; } @@ -1589,7 +1712,6 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, const short *samples) { AVPacket pkt; - AVFrame frame0 = { { 0 } }; AVFrame *frame; int ret, samples_size, got_packet; @@ -1598,8 +1720,7 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, pkt.size = buf_size; if (samples) { - frame = &frame0; - avcodec_get_frame_defaults(frame); + frame = av_frame_alloc(); if (avctx->frame_size) { frame->nb_samples = avctx->frame_size; @@ -1610,13 +1731,16 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, if (!av_get_bits_per_sample(avctx->codec_id)) { av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not " "support this codec\n"); + av_frame_free(&frame); return AVERROR(EINVAL); } nb_samples = (int64_t)buf_size * 8 / (av_get_bits_per_sample(avctx->codec_id) * avctx->channels); - if (nb_samples >= INT_MAX) + if (nb_samples >= INT_MAX) { + av_frame_free(&frame); return AVERROR(EINVAL); + } frame->nb_samples = nb_samples; } @@ -1628,8 +1752,10 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, if ((ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt, (const uint8_t *)samples, - samples_size, 1)) < 0) + samples_size, 1)) < 0) { + av_frame_free(&frame); return ret; + } /* fabricate frame pts from sample count. * this is needed because the avcodec_encode_audio() API does not have @@ -1651,11 +1777,12 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); } /* free any side data since we cannot return it */ - ff_packet_free_side_data(&pkt); + av_packet_free_side_data(&pkt); if (frame && frame->extended_data != frame->data) av_freep(&frame->extended_data); + av_frame_free(&frame); return ret ? ret : pkt.size; } @@ -1767,6 +1894,8 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, if (ret < 0 || !*got_packet_ptr) av_free_packet(avpkt); + else + av_packet_merge_side_data(avpkt); emms_c(); return ret; @@ -1818,69 +1947,119 @@ static int64_t guess_correct_pts(AVCodecContext *ctx, return pts; } -static void apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) +static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) { - int size = 0; + int size = 0, ret; const uint8_t *data; uint32_t flags; - if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) - return; - data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); - if (!data || size < 4) - return; + if (!data) + return 0; + + if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) { + av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter " + "changes, but PARAM_CHANGE side data was sent to it.\n"); + return AVERROR(EINVAL); + } + + if (size < 4) + goto fail; + flags = bytestream_get_le32(&data); size -= 4; - if (size < 4) /* Required for any of the changes */ - return; + if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) { + if (size < 4) + goto fail; avctx->channels = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) { if (size < 8) - return; + goto fail; avctx->channel_layout = bytestream_get_le64(&data); size -= 8; } - if (size < 4) - return; if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { + if (size < 4) + goto fail; avctx->sample_rate = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { if (size < 8) - return; + goto fail; avctx->width = bytestream_get_le32(&data); avctx->height = bytestream_get_le32(&data); - avcodec_set_dimensions(avctx, avctx->width, avctx->height); size -= 8; + ret = ff_set_dimensions(avctx, avctx->width, avctx->height); + if (ret < 0) + return ret; } + + return 0; +fail: + av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n"); + return AVERROR_INVALIDDATA; } static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame) { - int size, ret = 0; + int size; const uint8_t *side_metadata; - const uint8_t *end; - side_metadata = av_packet_get_side_data(avctx->pkt, + AVDictionary **frame_md = avpriv_frame_get_metadatap(frame); + + side_metadata = av_packet_get_side_data(avctx->internal->pkt, AV_PKT_DATA_STRINGS_METADATA, &size); - if (!side_metadata) - goto end; - end = side_metadata + size; - while (side_metadata < end) { - const uint8_t *key = side_metadata; - const uint8_t *val = side_metadata + strlen(key) + 1; - int ret = av_dict_set(avpriv_frame_get_metadatap(frame), key, val, 0); - if (ret < 0) - break; - side_metadata = val + strlen(val) + 1; - } -end: - return ret; + return av_packet_unpack_dictionary(side_metadata, size, frame_md); +} + +static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame) +{ + int ret; + + /* move the original frame to our backup */ + av_frame_unref(avci->to_free); + av_frame_move_ref(avci->to_free, frame); + + /* now copy everything except the AVBufferRefs back + * note that we make a COPY of the side data, so calling av_frame_free() on + * the caller's frame will work properly */ + ret = av_frame_copy_props(frame, avci->to_free); + if (ret < 0) + return ret; + + memcpy(frame->data, avci->to_free->data, sizeof(frame->data)); + memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize)); + if (avci->to_free->extended_data != avci->to_free->data) { + int planes = av_frame_get_channels(avci->to_free); + int size = planes * sizeof(*frame->extended_data); + + if (!size) { + av_frame_unref(frame); + return AVERROR_BUG; + } + + frame->extended_data = av_malloc(size); + if (!frame->extended_data) { + av_frame_unref(frame); + return AVERROR(ENOMEM); + } + memcpy(frame->extended_data, avci->to_free->extended_data, + size); + } else + frame->extended_data = frame->data; + + frame->format = avci->to_free->format; + frame->width = avci->to_free->width; + frame->height = avci->to_free->height; + frame->channel_layout = avci->to_free->channel_layout; + frame->nb_samples = avci->to_free->nb_samples; + av_frame_set_channels(frame, av_frame_get_channels(avci->to_free)); + + return 0; } int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, @@ -1892,6 +2071,8 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi // copy to ensure we do not change avpkt AVPacket tmp = *avpkt; + if (!avctx->codec) + return AVERROR(EINVAL); if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) { av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n"); return AVERROR(EINVAL); @@ -1901,15 +2082,18 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) return AVERROR(EINVAL); - avcodec_get_frame_defaults(picture); - - if (!avctx->refcounted_frames) - av_frame_unref(&avci->to_free); + av_frame_unref(picture); if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { int did_split = av_packet_split_side_data(&tmp); - apply_param_change(avctx, &tmp); - avctx->pkt = &tmp; + ret = apply_param_change(avctx, &tmp); + if (ret < 0) { + av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); + if (avctx->err_recognition & AV_EF_EXPLODE) + goto fail; + } + + avctx->internal->pkt = &tmp; if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, &tmp); @@ -1932,22 +2116,21 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi } add_metadata_from_side_data(avctx, picture); +fail: emms_c(); //needed to avoid an emms_c() call before every return; - avctx->pkt = NULL; + avctx->internal->pkt = NULL; if (did_split) { - ff_packet_free_side_data(&tmp); + av_packet_free_side_data(&tmp); if(ret == tmp.size) ret = avpkt->size; } - if (ret < 0 && picture->data[0]) - av_frame_unref(picture); - if (*got_picture_ptr) { if (!avctx->refcounted_frames) { - avci->to_free = *picture; - avci->to_free.extended_data = avci->to_free.data; + int err = unrefcount_frame(avci, picture); + if (err < 0) + return err; } avctx->frame_number++; @@ -1955,13 +2138,14 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi guess_correct_pts(avctx, picture->pkt_pts, picture->pkt_dts)); - } + } else + av_frame_unref(picture); } else ret = 0; /* many decoders assign whole AVFrames, thus overwriting extended_data; * make sure it's set correctly */ - picture->extended_data = picture->data; + av_assert0(!picture->extended_data || picture->extended_data == picture->data); return ret; } @@ -1971,7 +2155,7 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa int *frame_size_ptr, AVPacket *avpkt) { - AVFrame frame = { { 0 } }; + AVFrame *frame = av_frame_alloc(); int ret, got_frame = 0; if (avctx->get_buffer != avcodec_default_get_buffer) { @@ -1983,26 +2167,27 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa avctx->release_buffer = avcodec_default_release_buffer; } - ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt); + ret = avcodec_decode_audio4(avctx, frame, &got_frame, avpkt); if (ret >= 0 && got_frame) { int ch, plane_size; int planar = av_sample_fmt_is_planar(avctx->sample_fmt); int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels, - frame.nb_samples, + frame->nb_samples, avctx->sample_fmt, 1); if (*frame_size_ptr < data_size) { av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for " "the current frame (%d < %d)\n", *frame_size_ptr, data_size); + av_frame_free(&frame); return AVERROR(EINVAL); } - memcpy(samples, frame.extended_data[0], plane_size); + memcpy(samples, frame->extended_data[0], plane_size); if (planar && avctx->channels > 1) { uint8_t *out = ((uint8_t *)samples) + plane_size; for (ch = 1; ch < avctx->channels; ch++) { - memcpy(out, frame.extended_data[ch], plane_size); + memcpy(out, frame->extended_data[ch], plane_size); out += plane_size; } } @@ -2010,6 +2195,7 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa } else { *frame_size_ptr = 0; } + av_frame_free(&frame); return ret; } @@ -2021,7 +2207,6 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, const AVPacket *avpkt) { AVCodecInternal *avci = avctx->internal; - int planar, channels; int ret = 0; *got_frame_ptr = 0; @@ -2030,30 +2215,39 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n"); return AVERROR(EINVAL); } + if (!avctx->codec) + return AVERROR(EINVAL); if (avctx->codec->type != AVMEDIA_TYPE_AUDIO) { av_log(avctx, AV_LOG_ERROR, "Invalid media type for audio\n"); return AVERROR(EINVAL); } - avcodec_get_frame_defaults(frame); - - if (!avctx->refcounted_frames) - av_frame_unref(&avci->to_free); + av_frame_unref(frame); - if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) { + if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { uint8_t *side; int side_size; + uint32_t discard_padding = 0; // copy to ensure we do not change avpkt AVPacket tmp = *avpkt; int did_split = av_packet_split_side_data(&tmp); - apply_param_change(avctx, &tmp); + ret = apply_param_change(avctx, &tmp); + if (ret < 0) { + av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); + if (avctx->err_recognition & AV_EF_EXPLODE) + goto fail; + } - avctx->pkt = &tmp; - ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp); + avctx->internal->pkt = &tmp; + if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) + ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp); + else { + ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp); + frame->pkt_dts = avpkt->dts; + } if (ret >= 0 && *got_frame_ptr) { add_metadata_from_side_data(avctx, frame); avctx->frame_number++; - frame->pkt_dts = avpkt->dts; av_frame_set_best_effort_timestamp(frame, guess_correct_pts(avctx, frame->pkt_pts, @@ -2066,17 +2260,14 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, av_frame_set_channels(frame, avctx->channels); if (!frame->sample_rate) frame->sample_rate = avctx->sample_rate; - if (!avctx->refcounted_frames) { - avci->to_free = *frame; - avci->to_free.extended_data = avci->to_free.data; - } } - side= av_packet_get_side_data(avctx->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size); + side= av_packet_get_side_data(avctx->internal->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size); if(side && side_size>=10) { avctx->internal->skip_samples = AV_RL32(side); av_log(avctx, AV_LOG_DEBUG, "skip %d samples due to side data\n", avctx->internal->skip_samples); + discard_padding = AV_RL32(side + 4); } if (avctx->internal->skip_samples && *got_frame_ptr) { if(frame->nb_samples <= avctx->internal->skip_samples){ @@ -2107,29 +2298,42 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, } } - avctx->pkt = NULL; + if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr) { + if (discard_padding == frame->nb_samples) { + *got_frame_ptr = 0; + } else { + if(avctx->pkt_timebase.num && avctx->sample_rate) { + int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding, + (AVRational){1, avctx->sample_rate}, + avctx->pkt_timebase); + if (av_frame_get_pkt_duration(frame) >= diff_ts) + av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts); + } else { + av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n"); + } + av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n", + discard_padding, frame->nb_samples); + frame->nb_samples -= discard_padding; + } + } +fail: + avctx->internal->pkt = NULL; if (did_split) { - ff_packet_free_side_data(&tmp); + av_packet_free_side_data(&tmp); if(ret == tmp.size) ret = avpkt->size; } - if (ret < 0 && frame->data[0]) + if (ret >= 0 && *got_frame_ptr) { + if (!avctx->refcounted_frames) { + int err = unrefcount_frame(avci, frame); + if (err < 0) + return err; + } + } else av_frame_unref(frame); } - /* many decoders assign whole AVFrames, thus overwriting extended_data; - * make sure it's set correctly; assume decoders that actually use - * extended_data are doing it correctly */ - if (*got_frame_ptr) { - planar = av_sample_fmt_is_planar(frame->format); - channels = av_frame_get_channels(frame); - if (!(planar && channels > AV_NUM_DATA_POINTERS)) - frame->extended_data = frame->data; - } else { - frame->extended_data = NULL; - } - return ret; } @@ -2145,7 +2349,7 @@ static int recode_subtitle(AVCodecContext *avctx, AVPacket tmp; #endif - if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER) + if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) return 0; #if CONFIG_ICONV @@ -2180,7 +2384,7 @@ static int recode_subtitle(AVCodecContext *avctx, goto end; } outpkt->size -= outl; - outpkt->data[outpkt->size - 1] = '\0'; + memset(outpkt->data + outpkt->size, 0, outl); end: if (cd != (iconv_t)-1) @@ -2191,12 +2395,37 @@ end: #endif } +static int utf8_check(const uint8_t *str) +{ + const uint8_t *byte; + uint32_t codepoint, min; + + while (*str) { + byte = str; + GET_UTF8(codepoint, *(byte++), return 0;); + min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 : + 1 << (5 * (byte - str) - 4); + if (codepoint < min || codepoint >= 0x110000 || + codepoint == 0xFFFE /* BOM */ || + codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */) + return 0; + str = byte; + } + return 1; +} + int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt) { - int ret = 0; + int i, ret = 0; + if (!avpkt->data && avpkt->size) { + av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n"); + return AVERROR(EINVAL); + } + if (!avctx->codec) + return AVERROR(EINVAL); if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) { av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n"); return AVERROR(EINVAL); @@ -2205,18 +2434,28 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, *got_sub_ptr = 0; avcodec_get_subtitle_defaults(sub); - if (avpkt->size) { + if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) { AVPacket pkt_recoded; AVPacket tmp = *avpkt; int did_split = av_packet_split_side_data(&tmp); //apply_param_change(avctx, &tmp); + if (did_split) { + /* FFMIN() prevents overflow in case the packet wasn't allocated with + * proper padding. + * If the side data is smaller than the buffer padding size, the + * remaining bytes should have already been filled with zeros by the + * original packet allocation anyway. */ + memset(tmp.data + tmp.size, 0, + FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE)); + } + pkt_recoded = tmp; ret = recode_subtitle(avctx, &pkt_recoded, &tmp); if (ret < 0) { *got_sub_ptr = 0; } else { - avctx->pkt = &pkt_recoded; + avctx->internal->pkt = &pkt_recoded; if (avctx->pkt_timebase.den && avpkt->pts != AV_NOPTS_VALUE) sub->pts = av_rescale_q(avpkt->pts, @@ -2224,6 +2463,24 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded); av_assert1((ret >= 0) >= !!*got_sub_ptr && !!*got_sub_ptr >= !!sub->num_rects); + + if (sub->num_rects && !sub->end_display_time && avpkt->duration && + avctx->pkt_timebase.num) { + AVRational ms = { 1, 1000 }; + sub->end_display_time = av_rescale_q(avpkt->duration, + avctx->pkt_timebase, ms); + } + + for (i = 0; i < sub->num_rects; i++) { + if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) { + av_log(avctx, AV_LOG_ERROR, + "Invalid UTF-8 in decoded subtitles text; " + "maybe missing -sub_charenc option\n"); + avsubtitle_free(sub); + return AVERROR_INVALIDDATA; + } + } + if (tmp.data != pkt_recoded.data) { // did we recode? /* prevent from destroying side data from original packet */ pkt_recoded.side_data = NULL; @@ -2231,12 +2488,15 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, av_free_packet(&pkt_recoded); } - sub->format = !(avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB); - avctx->pkt = NULL; + if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) + sub->format = 0; + else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB) + sub->format = 1; + avctx->internal->pkt = NULL; } if (did_split) { - ff_packet_free_side_data(&tmp); + av_packet_free_side_data(&tmp); if(ret == tmp.size) ret = avpkt->size; } @@ -2281,7 +2541,12 @@ av_cold int ff_codec_close_recursive(AVCodecContext *avctx) av_cold int avcodec_close(AVCodecContext *avctx) { - int ret = ff_lock_avcodec(avctx); + int ret; + + if (!avctx) + return 0; + + ret = ff_lock_avcodec(avctx); if (ret < 0) return ret; @@ -2294,15 +2559,14 @@ av_cold int avcodec_close(AVCodecContext *avctx) ff_frame_thread_encoder_free(avctx); ff_lock_avcodec(avctx); } - if (HAVE_THREADS && avctx->thread_opaque) + if (HAVE_THREADS && avctx->internal->thread_ctx) ff_thread_free(avctx); if (avctx->codec && avctx->codec->close) avctx->codec->close(avctx); avctx->coded_frame = NULL; avctx->internal->byte_buffer_size = 0; av_freep(&avctx->internal->byte_buffer); - if (!avctx->refcounted_frames) - av_frame_unref(&avctx->internal->to_free); + av_frame_free(&avctx->internal->to_free); for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) av_buffer_pool_uninit(&pool->pools[i]); av_freep(&avctx->internal->pool); @@ -2330,6 +2594,12 @@ static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id) // case AV_CODEC_ID_UTVIDEO_DEPRECATED: return AV_CODEC_ID_UTVIDEO; case AV_CODEC_ID_OPUS_DEPRECATED: return AV_CODEC_ID_OPUS; case AV_CODEC_ID_TAK_DEPRECATED : return AV_CODEC_ID_TAK; + case AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED : return AV_CODEC_ID_PCM_S24LE_PLANAR; + case AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED : return AV_CODEC_ID_PCM_S32LE_PLANAR; + case AV_CODEC_ID_ESCAPE130_DEPRECATED : return AV_CODEC_ID_ESCAPE130; + case AV_CODEC_ID_G2M_DEPRECATED : return AV_CODEC_ID_G2M; + case AV_CODEC_ID_WEBP_DEPRECATED: return AV_CODEC_ID_WEBP; + case AV_CODEC_ID_HEVC_DEPRECATED: return AV_CODEC_ID_HEVC; default : return id; } } @@ -2453,9 +2723,13 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) profile = av_get_profile_name(p, enc->profile); } - snprintf(buf, buf_size, "%s: %s%s", codec_type ? codec_type : "unknown", - codec_name, enc->mb_decision ? " (hq)" : ""); + snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown", + codec_name); buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */ + + if (enc->codec && strcmp(enc->codec->name, codec_name)) + snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name); + if (profile) snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile); if (enc->codec_tag) { @@ -2468,13 +2742,26 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) switch (enc->codec_type) { case AVMEDIA_TYPE_VIDEO: if (enc->pix_fmt != AV_PIX_FMT_NONE) { + char detail[256] = "("; + const char *colorspace_name; snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %s", av_get_pix_fmt_name(enc->pix_fmt)); if (enc->bits_per_raw_sample && enc->bits_per_raw_sample <= av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth_minus1) - snprintf(buf + strlen(buf), buf_size - strlen(buf), - " (%d bpc)", enc->bits_per_raw_sample); + av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample); + if (enc->color_range != AVCOL_RANGE_UNSPECIFIED) + av_strlcatf(detail, sizeof(detail), + enc->color_range == AVCOL_RANGE_MPEG ? "tv, ": "pc, "); + + colorspace_name = av_get_colorspace_name(enc->colorspace); + if (colorspace_name) + av_strlcatf(detail, sizeof(detail), "%s, ", colorspace_name); + + if (strlen(detail) > 1) { + detail[strlen(detail) - 2] = 0; + av_strlcatf(buf, buf_size, "%s)", detail); + } } if (enc->width) { snprintf(buf + strlen(buf), buf_size - strlen(buf), @@ -2523,6 +2810,11 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) enc->time_base.num / g, enc->time_base.den / g); } break; + case AVMEDIA_TYPE_SUBTITLE: + if (enc->width) + snprintf(buf + strlen(buf), buf_size - strlen(buf), + ", %dx%d", enc->width, enc->height); + break; default: return; } @@ -2538,6 +2830,9 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) if (bitrate != 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d kb/s", bitrate / 1000); + } else if (enc->rc_max_rate > 0) { + snprintf(buf + strlen(buf), buf_size - strlen(buf), + ", max. %d kb/s", enc->rc_max_rate / 1000); } } @@ -2591,6 +2886,9 @@ void avcodec_flush_buffers(AVCodecContext *avctx) avctx->pts_correction_last_pts = avctx->pts_correction_last_dts = INT64_MIN; + + if (!avctx->refcounted_frames) + av_frame_unref(avctx->internal->to_free); } int av_get_exact_bits_per_sample(enum AVCodecID codec_id) @@ -2769,6 +3067,8 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) switch (id) { case AV_CODEC_ID_ADPCM_AFC: return frame_bytes / (9 * ch) * 16; + case AV_CODEC_ID_ADPCM_DTK: + return frame_bytes / (16 * ch) * 28; case AV_CODEC_ID_ADPCM_4XM: case AV_CODEC_ID_ADPCM_IMA_ISS: return (frame_bytes - 4 * ch) * 2 / ch; @@ -2810,11 +3110,15 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) int blocks = frame_bytes / ba; switch (avctx->codec_id) { case AV_CODEC_ID_ADPCM_IMA_WAV: - return blocks * (1 + (ba - 4 * ch) / (4 * ch) * 8); + if (bps < 2 || bps > 5) + return 0; + return blocks * (1 + (ba - 4 * ch) / (bps * ch) * 8); case AV_CODEC_ID_ADPCM_IMA_DK3: return blocks * (((ba - 16) * 2 / 3 * 4) / ch); case AV_CODEC_ID_ADPCM_IMA_DK4: return blocks * (1 + (ba - 4 * ch) * 2 / ch); + case AV_CODEC_ID_ADPCM_IMA_RAD: + return blocks * ((ba - 4 * ch) * 2 / ch); case AV_CODEC_ID_ADPCM_MS: return blocks * (2 + (ba - 7 * ch) * 2 / ch); } @@ -2871,6 +3175,7 @@ int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b) } #if FF_API_MISSING_SAMPLE +FF_DISABLE_DEPRECATION_WARNINGS void av_log_missing_feature(void *avc, const char *feature, int want_sample) { av_log(avc, AV_LOG_WARNING, "%s is not implemented. Update your FFmpeg " @@ -2895,17 +3200,19 @@ void av_log_ask_for_sample(void *avc, const char *msg, ...) va_end(argument_list); } +FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_MISSING_SAMPLE */ static AVHWAccel *first_hwaccel = NULL; +static AVHWAccel **last_hwaccel = &first_hwaccel; void av_register_hwaccel(AVHWAccel *hwaccel) { - AVHWAccel **p = &first_hwaccel; - while (*p) - p = &(*p)->next; - *p = hwaccel; + AVHWAccel **p = last_hwaccel; hwaccel->next = NULL; + while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, hwaccel)) + p = &(*p)->next; + last_hwaccel = &hwaccel->next; } AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel) @@ -2913,8 +3220,11 @@ AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel) return hwaccel ? hwaccel->next : first_hwaccel; } -AVHWAccel *ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt) +AVHWAccel *ff_find_hwaccel(AVCodecContext *avctx) { + enum AVCodecID codec_id = avctx->codec->id; + enum AVPixelFormat pix_fmt = avctx->pix_fmt; + AVHWAccel *hwaccel = NULL; while ((hwaccel = av_hwaccel_next(hwaccel))) @@ -2926,19 +3236,19 @@ AVHWAccel *ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt) int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) { - if (ff_lockmgr_cb) { - if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY)) + if (lockmgr_cb) { + if (lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY)) return -1; - if (ff_lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY)) + if (lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY)) return -1; } - ff_lockmgr_cb = cb; + lockmgr_cb = cb; - if (ff_lockmgr_cb) { - if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_CREATE)) + if (lockmgr_cb) { + if (lockmgr_cb(&codec_mutex, AV_LOCK_CREATE)) return -1; - if (ff_lockmgr_cb(&avformat_mutex, AV_LOCK_CREATE)) + if (lockmgr_cb(&avformat_mutex, AV_LOCK_CREATE)) return -1; } return 0; @@ -2946,13 +3256,15 @@ int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) int ff_lock_avcodec(AVCodecContext *log_ctx) { - if (ff_lockmgr_cb) { - if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) + if (lockmgr_cb) { + if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) return -1; } entangled_thread_counter++; if (entangled_thread_counter != 1) { av_log(log_ctx, AV_LOG_ERROR, "Insufficient thread locking around avcodec_open/close()\n"); + if (!lockmgr_cb) + av_log(log_ctx, AV_LOG_ERROR, "No lock manager is set, please see av_lockmgr_register()\n"); ff_avcodec_locked = 1; ff_unlock_avcodec(); return AVERROR(EINVAL); @@ -2967,8 +3279,8 @@ int ff_unlock_avcodec(void) av_assert0(ff_avcodec_locked); ff_avcodec_locked = 0; entangled_thread_counter--; - if (ff_lockmgr_cb) { - if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE)) + if (lockmgr_cb) { + if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE)) return -1; } return 0; @@ -2976,8 +3288,8 @@ int ff_unlock_avcodec(void) int avpriv_lock_avformat(void) { - if (ff_lockmgr_cb) { - if ((*ff_lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN)) + if (lockmgr_cb) { + if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN)) return -1; } return 0; @@ -2985,8 +3297,8 @@ int avpriv_lock_avformat(void) int avpriv_unlock_avformat(void) { - if (ff_lockmgr_cb) { - if ((*ff_lockmgr_cb)(&avformat_mutex, AV_LOCK_RELEASE)) + if (lockmgr_cb) { + if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_RELEASE)) return -1; } return 0; @@ -3021,6 +3333,11 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) #if !HAVE_THREADS +enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) +{ + return avctx->get_format(avctx, fmt); +} + int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) { f->owner = avctx; @@ -3049,6 +3366,23 @@ int ff_thread_can_start_frame(AVCodecContext *avctx) return 1; } +int ff_alloc_entries(AVCodecContext *avctx, int count) +{ + return 0; +} + +void ff_reset_entries(AVCodecContext *avctx) +{ +} + +void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift) +{ +} + +void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n) +{ +} + #endif enum AVMediaType avcodec_get_type(enum AVCodecID codec_id) @@ -3093,3 +3427,36 @@ int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf) avctx->extradata_size = buf->len; return 0; } + +const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p, + const uint8_t *end, + uint32_t *av_restrict state) +{ + int i; + + av_assert0(p <= end); + if (p >= end) + return end; + + for (i = 0; i < 3; i++) { + uint32_t tmp = *state << 8; + *state = tmp + *(p++); + if (tmp == 0x100 || p == end) + return p; + } + + while (p < end) { + if (p[-1] > 1 ) p += 3; + else if (p[-2] ) p += 2; + else if (p[-3]|(p[-1]-1)) p++; + else { + p++; + break; + } + } + + p = FFMIN(p, end) - 4; + *state = AV_RB32(p); + + return p + 4; +} |
