summaryrefslogtreecommitdiff
path: root/ffmpeg/libavfilter
diff options
context:
space:
mode:
Diffstat (limited to 'ffmpeg/libavfilter')
-rw-r--r--ffmpeg/libavfilter/Makefile93
-rw-r--r--ffmpeg/libavfilter/af_aconvert.c43
-rw-r--r--ffmpeg/libavfilter/af_afade.c109
-rw-r--r--ffmpeg/libavfilter/af_aformat.c42
-rw-r--r--ffmpeg/libavfilter/af_amerge.c23
-rw-r--r--ffmpeg/libavfilter/af_amix.c41
-rw-r--r--ffmpeg/libavfilter/af_anull.c20
-rw-r--r--ffmpeg/libavfilter/af_apad.c21
-rw-r--r--ffmpeg/libavfilter/af_aresample.c95
-rw-r--r--ffmpeg/libavfilter/af_asetnsamples.c69
-rw-r--r--ffmpeg/libavfilter/af_ashowinfo.c28
-rw-r--r--ffmpeg/libavfilter/af_astreamsync.c32
-rw-r--r--ffmpeg/libavfilter/af_asyncts.c82
-rw-r--r--ffmpeg/libavfilter/af_atempo.c90
-rw-r--r--ffmpeg/libavfilter/af_biquads.c85
-rw-r--r--ffmpeg/libavfilter/af_channelmap.c125
-rw-r--r--ffmpeg/libavfilter/af_channelsplit.c38
-rw-r--r--ffmpeg/libavfilter/af_earwax.c27
-rw-r--r--ffmpeg/libavfilter/af_join.c68
-rw-r--r--ffmpeg/libavfilter/af_pan.c71
-rw-r--r--ffmpeg/libavfilter/af_resample.c84
-rw-r--r--ffmpeg/libavfilter/af_silencedetect.c154
-rw-r--r--ffmpeg/libavfilter/af_volume.c187
-rw-r--r--ffmpeg/libavfilter/af_volume.h28
-rw-r--r--ffmpeg/libavfilter/af_volumedetect.c12
-rw-r--r--ffmpeg/libavfilter/allfilters.c57
-rw-r--r--ffmpeg/libavfilter/asink_anullsink.c2
-rw-r--r--ffmpeg/libavfilter/asrc_aevalsrc.c263
-rw-r--r--ffmpeg/libavfilter/asrc_anullsrc.c57
-rw-r--r--ffmpeg/libavfilter/asrc_flite.c28
-rw-r--r--ffmpeg/libavfilter/asrc_sine.c13
-rw-r--r--ffmpeg/libavfilter/audio.c34
-rw-r--r--ffmpeg/libavfilter/avcodec.h6
-rw-r--r--ffmpeg/libavfilter/avf_concat.c17
-rw-r--r--ffmpeg/libavfilter/avf_showspectrum.c276
-rw-r--r--ffmpeg/libavfilter/avf_showwaves.c53
-rw-r--r--ffmpeg/libavfilter/avfilter.c603
-rw-r--r--ffmpeg/libavfilter/avfilter.h710
-rw-r--r--ffmpeg/libavfilter/avfiltergraph.c405
-rw-r--r--ffmpeg/libavfilter/avfiltergraph.h252
-rw-r--r--ffmpeg/libavfilter/buffer.c6
-rw-r--r--ffmpeg/libavfilter/buffersink.c257
-rw-r--r--ffmpeg/libavfilter/buffersrc.c151
-rw-r--r--ffmpeg/libavfilter/buffersrc.h8
-rw-r--r--ffmpeg/libavfilter/drawutils.c47
-rw-r--r--ffmpeg/libavfilter/f_ebur128.c51
-rw-r--r--ffmpeg/libavfilter/f_perms.c45
-rw-r--r--ffmpeg/libavfilter/f_select.c188
-rw-r--r--ffmpeg/libavfilter/f_sendcmd.c82
-rw-r--r--ffmpeg/libavfilter/f_setpts.c269
-rw-r--r--ffmpeg/libavfilter/f_settb.c80
-rw-r--r--ffmpeg/libavfilter/fifo.c56
-rw-r--r--ffmpeg/libavfilter/filtfmts.c22
-rw-r--r--ffmpeg/libavfilter/formats.c57
-rw-r--r--ffmpeg/libavfilter/formats.h2
-rw-r--r--ffmpeg/libavfilter/gradfun.h4
-rw-r--r--ffmpeg/libavfilter/graphdump.c14
-rw-r--r--ffmpeg/libavfilter/graphparser.c82
-rw-r--r--ffmpeg/libavfilter/internal.h87
-rw-r--r--ffmpeg/libavfilter/lavfutils.c5
-rw-r--r--ffmpeg/libavfilter/lavfutils.h2
-rw-r--r--ffmpeg/libavfilter/libavfilter.pc8
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/help_mp.h2126
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/mpbswap.h34
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/pullup.c823
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/pullup.h102
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_detc.c453
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_dint.c214
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_divtc.c722
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_down3dright.c166
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_eq.c2
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_fil.c116
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_filmdint.c1461
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_ivtc.c550
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_mcdeint.c340
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_noise.c475
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_ow.c322
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_perspective.c345
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_phase.c303
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_pullup.c316
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_qp.c178
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_sab.c324
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_scale.h34
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_spp.c621
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_telecine.c158
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_tinterlace.c235
-rw-r--r--ffmpeg/libavfilter/libmpcodecs/vf_uspp.c4
-rw-r--r--ffmpeg/libavfilter/lswsutils.c2
-rw-r--r--ffmpeg/libavfilter/split.c87
-rw-r--r--ffmpeg/libavfilter/src_movie.c78
-rw-r--r--ffmpeg/libavfilter/transform.c1
-rw-r--r--ffmpeg/libavfilter/version.h43
-rw-r--r--ffmpeg/libavfilter/vf_alphaextract.c131
-rw-r--r--ffmpeg/libavfilter/vf_alphamerge.c3
-rw-r--r--ffmpeg/libavfilter/vf_aspect.c289
-rw-r--r--ffmpeg/libavfilter/vf_bbox.c51
-rw-r--r--ffmpeg/libavfilter/vf_blackdetect.c43
-rw-r--r--ffmpeg/libavfilter/vf_blackframe.c100
-rw-r--r--ffmpeg/libavfilter/vf_blend.c236
-rw-r--r--ffmpeg/libavfilter/vf_boxblur.c168
-rw-r--r--ffmpeg/libavfilter/vf_colormatrix.c85
-rw-r--r--ffmpeg/libavfilter/vf_copy.c18
-rw-r--r--ffmpeg/libavfilter/vf_crop.c300
-rw-r--r--ffmpeg/libavfilter/vf_cropdetect.c168
-rw-r--r--ffmpeg/libavfilter/vf_curves.c263
-rw-r--r--ffmpeg/libavfilter/vf_decimate.c498
-rw-r--r--ffmpeg/libavfilter/vf_delogo.c137
-rw-r--r--ffmpeg/libavfilter/vf_deshake.c157
-rw-r--r--ffmpeg/libavfilter/vf_drawbox.c353
-rw-r--r--ffmpeg/libavfilter/vf_drawtext.c527
-rw-r--r--ffmpeg/libavfilter/vf_edgedetect.c33
-rw-r--r--ffmpeg/libavfilter/vf_fade.c375
-rw-r--r--ffmpeg/libavfilter/vf_field.c48
-rw-r--r--ffmpeg/libavfilter/vf_fieldorder.c152
-rw-r--r--ffmpeg/libavfilter/vf_format.c81
-rw-r--r--ffmpeg/libavfilter/vf_fps.c63
-rw-r--r--ffmpeg/libavfilter/vf_framestep.c73
-rw-r--r--ffmpeg/libavfilter/vf_frei0r.c244
-rw-r--r--ffmpeg/libavfilter/vf_geq.c103
-rw-r--r--ffmpeg/libavfilter/vf_gradfun.c93
-rw-r--r--ffmpeg/libavfilter/vf_hflip.c109
-rw-r--r--ffmpeg/libavfilter/vf_histeq.c36
-rw-r--r--ffmpeg/libavfilter/vf_histogram.c191
-rw-r--r--ffmpeg/libavfilter/vf_hqdn3d.c185
-rw-r--r--ffmpeg/libavfilter/vf_hqdn3d.h8
-rw-r--r--ffmpeg/libavfilter/vf_hue.c378
-rw-r--r--ffmpeg/libavfilter/vf_idet.c58
-rw-r--r--ffmpeg/libavfilter/vf_il.c50
-rw-r--r--ffmpeg/libavfilter/vf_kerndeint.c26
-rw-r--r--ffmpeg/libavfilter/vf_libopencv.c123
-rw-r--r--ffmpeg/libavfilter/vf_lut.c256
-rw-r--r--ffmpeg/libavfilter/vf_mp.c170
-rw-r--r--ffmpeg/libavfilter/vf_noise.c308
-rw-r--r--ffmpeg/libavfilter/vf_null.c17
-rw-r--r--ffmpeg/libavfilter/vf_overlay.c456
-rw-r--r--ffmpeg/libavfilter/vf_pad.c216
-rw-r--r--ffmpeg/libavfilter/vf_pixdesctest.c31
-rw-r--r--ffmpeg/libavfilter/vf_pp.c24
-rw-r--r--ffmpeg/libavfilter/vf_removelogo.c118
-rw-r--r--ffmpeg/libavfilter/vf_scale.c283
-rw-r--r--ffmpeg/libavfilter/vf_setfield.c39
-rw-r--r--ffmpeg/libavfilter/vf_showinfo.c33
-rw-r--r--ffmpeg/libavfilter/vf_smartblur.c35
-rw-r--r--ffmpeg/libavfilter/vf_stereo3d.c348
-rw-r--r--ffmpeg/libavfilter/vf_subtitles.c27
-rw-r--r--ffmpeg/libavfilter/vf_super2xsai.c8
-rw-r--r--ffmpeg/libavfilter/vf_swapuv.c9
-rw-r--r--ffmpeg/libavfilter/vf_thumbnail.c37
-rw-r--r--ffmpeg/libavfilter/vf_tile.c38
-rw-r--r--ffmpeg/libavfilter/vf_tinterlace.c57
-rw-r--r--ffmpeg/libavfilter/vf_transpose.c198
-rw-r--r--ffmpeg/libavfilter/vf_unsharp.c182
-rw-r--r--ffmpeg/libavfilter/vf_vflip.c18
-rw-r--r--ffmpeg/libavfilter/vf_yadif.c357
-rw-r--r--ffmpeg/libavfilter/video.c34
-rw-r--r--ffmpeg/libavfilter/vsink_nullsink.c2
-rw-r--r--ffmpeg/libavfilter/vsrc_cellauto.c54
-rw-r--r--ffmpeg/libavfilter/vsrc_life.c77
-rw-r--r--ffmpeg/libavfilter/vsrc_mandelbrot.c95
-rw-r--r--ffmpeg/libavfilter/vsrc_mptestsrc.c111
-rw-r--r--ffmpeg/libavfilter/vsrc_testsrc.c682
-rw-r--r--ffmpeg/libavfilter/x86/Makefile7
-rw-r--r--ffmpeg/libavfilter/x86/af_volume_init.c13
-rw-r--r--ffmpeg/libavfilter/x86/vf_gradfun.c217
-rw-r--r--ffmpeg/libavfilter/x86/vf_hqdn3d_init.c22
-rw-r--r--ffmpeg/libavfilter/x86/vf_yadif_init.c100
-rw-r--r--ffmpeg/libavfilter/yadif.h59
167 files changed, 9357 insertions, 17938 deletions
diff --git a/ffmpeg/libavfilter/Makefile b/ffmpeg/libavfilter/Makefile
index 690b1cb..3d587fe 100644
--- a/ffmpeg/libavfilter/Makefile
+++ b/ffmpeg/libavfilter/Makefile
@@ -9,12 +9,15 @@ FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample
FFLIBS-$(CONFIG_ATEMPO_FILTER) += avcodec
FFLIBS-$(CONFIG_DECIMATE_FILTER) += avcodec
FFLIBS-$(CONFIG_DESHAKE_FILTER) += avcodec
+FFLIBS-$(CONFIG_ELBG_FILTER) += avcodec
+FFLIBS-$(CONFIG_MCDEINT_FILTER) += avcodec
FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_MP_FILTER) += avcodec
FFLIBS-$(CONFIG_PAN_FILTER) += swresample
FFLIBS-$(CONFIG_PP_FILTER) += postproc
FFLIBS-$(CONFIG_REMOVELOGO_FILTER) += avformat avcodec swscale
FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample
+FFLIBS-$(CONFIG_SAB_FILTER) += swscale
FFLIBS-$(CONFIG_SCALE_FILTER) += swscale
FFLIBS-$(CONFIG_SHOWSPECTRUM_FILTER) += avcodec
FFLIBS-$(CONFIG_SMARTBLUR_FILTER) += swscale
@@ -40,54 +43,65 @@ OBJS = allfilters.o \
formats.o \
graphdump.o \
graphparser.o \
+ opencl_allkernels.o \
transform.o \
video.o \
OBJS-$(CONFIG_AVCODEC) += avcodec.o
-OBJS-$(CONFIG_AVFORMAT) += lavfutils.o
-OBJS-$(CONFIG_SWSCALE) += lswsutils.o
OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o
+OBJS-$(CONFIG_ADELAY_FILTER) += af_adelay.o
+OBJS-$(CONFIG_AECHO_FILTER) += af_aecho.o
+OBJS-$(CONFIG_AEVAL_FILTER) += aeval.o
OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
+OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_APAD_FILTER) += af_apad.o
OBJS-$(CONFIG_APERMS_FILTER) += f_perms.o
+OBJS-$(CONFIG_APHASER_FILTER) += af_aphaser.o
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o
OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o
-OBJS-$(CONFIG_ASETPTS_FILTER) += f_setpts.o
+OBJS-$(CONFIG_ASETPTS_FILTER) += setpts.o
+OBJS-$(CONFIG_ASETRATE_FILTER) += af_asetrate.o
OBJS-$(CONFIG_ASETTB_FILTER) += f_settb.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
+OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
+OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
+OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o
OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o
OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o
+OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o
OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o
OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_JOIN_FILTER) += af_join.o
+OBJS-$(CONFIG_LADSPA_FILTER) += af_ladspa.o
OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
+OBJS-$(CONFIG_REPLAYGAIN_FILTER) += af_replaygain.o
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o
OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o
OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o
-OBJS-$(CONFIG_AEVALSRC_FILTER) += asrc_aevalsrc.o
+OBJS-$(CONFIG_AEVALSRC_FILTER) += aeval.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o
OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
@@ -95,26 +109,33 @@ OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o
-OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_alphaextract.o
+OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
-OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o
+OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o
+OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
+OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o
OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o
OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o
OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o
+OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o
OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o
OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o
OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o
+OBJS-$(CONFIG_DRAWGRID_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o
+OBJS-$(CONFIG_ELBG_FILTER) += vf_elbg.o
OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o
+OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o
+OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o
@@ -122,6 +143,7 @@ OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
+OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
@@ -129,88 +151,95 @@ OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
OBJS-$(CONFIG_IL_FILTER) += vf_il.o
+OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
+OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
+OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
+OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
+OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
OBJS-$(CONFIG_MP_FILTER) += vf_mp.o
+OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
-OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o
+OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o
+OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o
+OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
+OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
+OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
+OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o
+OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
+OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
+OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
+OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o
-OBJS-$(CONFIG_SETPTS_FILTER) += f_setpts.o
+OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETTB_FILTER) += f_settb.o
OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
+OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o
OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o
OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o
OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o
+OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o
OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o
OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o
OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
+OBJS-$(CONFIG_TRIM_FILTER) += trim.o
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o
OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o
+OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o
+OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o
+OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o
+OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o
+OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o
OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
+OBJS-$(CONFIG_HALDCLUTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o
OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o
OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o
OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_SMPTEHDBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/mp_image.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/img_format.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_detc.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_dint.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_divtc.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_down3dright.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq2.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fil.o
-#OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_filmdint.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fspp.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ilpack.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ivtc.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_mcdeint.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_noise.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ow.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_perspective.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_phase.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pp7.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pullup.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_qp.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_sab.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_softpulldown.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_spp.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_telecine.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_tinterlace.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_uspp.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/pullup.o
# multimedia filters
+OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o
OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o
OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o
OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o
@@ -219,8 +248,18 @@ OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o
OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o
+# Windows resource file
+SLIBOBJS-$(HAVE_GNU_WINDRES) += avfilterres.o
+
+SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h
+SKIPHEADERS-$(CONFIG_OPENCL) += opencl_internal.h deshake_opencl_kernel.h unsharp_opencl_kernel.h
+
+OBJS-$(HAVE_THREADS) += pthread.o
+
TOOLS = graph2dot
TESTPROGS = drawutils filtfmts formats
+TOOLS-$(CONFIG_LIBZMQ) += zmqsend
+
clean::
$(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%)
diff --git a/ffmpeg/libavfilter/af_aconvert.c b/ffmpeg/libavfilter/af_aconvert.c
index c05e571..19095cb 100644
--- a/ffmpeg/libavfilter/af_aconvert.c
+++ b/ffmpeg/libavfilter/af_aconvert.c
@@ -25,40 +25,48 @@
* sample format and channel layout conversion audio filter
*/
-#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
#include "libswresample/swresample.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
typedef struct {
+ const AVClass *class;
enum AVSampleFormat out_sample_fmt;
int64_t out_chlayout;
struct SwrContext *swr;
+ char *format_str;
+ char *channel_layout_str;
} AConvertContext;
-static av_cold int init(AVFilterContext *ctx, const char *args0)
+#define OFFSET(x) offsetof(AConvertContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption aconvert_options[] = {
+ { "sample_fmt", "", OFFSET(format_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { "channel_layout", "", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aconvert);
+
+static av_cold int init(AVFilterContext *ctx)
{
AConvertContext *aconvert = ctx->priv;
- char *arg, *ptr = NULL;
int ret = 0;
- char *args = av_strdup(args0);
+
+ av_log(ctx, AV_LOG_WARNING, "This filter is deprecated, use aformat instead\n");
aconvert->out_sample_fmt = AV_SAMPLE_FMT_NONE;
aconvert->out_chlayout = 0;
- if ((arg = av_strtok(args, ":", &ptr)) && strcmp(arg, "auto")) {
- if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0)
- goto end;
- }
- if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
- if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0)
- goto end;
- }
-
-end:
- av_freep(&args);
+ if (aconvert->format_str && strcmp(aconvert->format_str, "auto") &&
+ (ret = ff_parse_sample_format(&aconvert->out_sample_fmt, aconvert->format_str, ctx)) < 0)
+ return ret;
+ if (aconvert->channel_layout_str && strcmp(aconvert->channel_layout_str, "auto"))
+ return ff_parse_channel_layout(&aconvert->out_chlayout, NULL, aconvert->channel_layout_str, ctx);
return ret;
}
@@ -143,6 +151,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n);
int ret;
+ if (!outsamplesref)
+ return AVERROR(ENOMEM);
swr_convert(aconvert->swr, outsamplesref->extended_data, n,
(void *)insamplesref->extended_data, n);
@@ -173,10 +183,11 @@ static const AVFilterPad aconvert_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_aconvert = {
+AVFilter ff_af_aconvert = {
.name = "aconvert",
.description = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout."),
.priv_size = sizeof(AConvertContext),
+ .priv_class = &aconvert_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
diff --git a/ffmpeg/libavfilter/af_afade.c b/ffmpeg/libavfilter/af_afade.c
index 1134849..fbf9802 100644
--- a/ffmpeg/libavfilter/af_afade.c
+++ b/ffmpeg/libavfilter/af_afade.c
@@ -34,8 +34,8 @@ typedef struct {
int curve;
int nb_samples;
int64_t start_sample;
- double duration;
- double start_time;
+ int64_t duration;
+ int64_t start_time;
void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
int nb_samples, int channels, int direction,
@@ -50,18 +50,18 @@ enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, PAR, QUA, CUB, SQU, CBR };
static const AVOption afade_options[] = {
{ "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
{ "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
- { "in", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" },
- { "out", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" },
- { "start_sample", "set expression of sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
- { "ss", "set expression of sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
- { "nb_samples", "set expression for fade duration in samples", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
- { "ns", "set expression for fade duration in samples", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
- { "start_time", "set expression of second to start fading", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 7*24*60*60,FLAGS },
- { "st", "set expression of second to start fading", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 7*24*60*60,FLAGS },
- { "duration", "set expression for fade duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 24*60*60, FLAGS },
- { "d", "set expression for fade duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 24*60*60, FLAGS },
- { "curve", "set expression for fade curve", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
- { "c", "set expression for fade curve", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
+ { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" },
+ { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" },
+ { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
+ { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
+ { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
+ { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
+ { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
+ { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
{ "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
{ "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
{ "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
@@ -72,23 +72,16 @@ static const AVOption afade_options[] = {
{ "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
{ "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
{ "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
- {NULL},
+ { NULL }
};
AVFILTER_DEFINE_CLASS(afade);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- AudioFadeContext *afade = ctx->priv;
- int ret;
-
- afade->class = &afade_class;
- av_opt_set_defaults(afade);
-
- if ((ret = av_set_options_string(afade, args, "=", ":")) < 0)
- return ret;
+ AudioFadeContext *s = ctx->priv;
- if (INT64_MAX - afade->nb_samples < afade->start_sample)
+ if (INT64_MAX - s->nb_samples < s->start_sample)
return AVERROR(EINVAL);
return 0;
@@ -207,41 +200,40 @@ FADE(flt, float)
FADE(s16, int16_t)
FADE(s32, int32_t)
-static int config_output(AVFilterLink *outlink)
+static int config_input(AVFilterLink *inlink)
{
- AVFilterContext *ctx = outlink->src;
- AudioFadeContext *afade = ctx->priv;
- AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterContext *ctx = inlink->dst;
+ AudioFadeContext *s = ctx->priv;
switch (inlink->format) {
- case AV_SAMPLE_FMT_DBL: afade->fade_samples = fade_samples_dbl; break;
- case AV_SAMPLE_FMT_DBLP: afade->fade_samples = fade_samples_dblp; break;
- case AV_SAMPLE_FMT_FLT: afade->fade_samples = fade_samples_flt; break;
- case AV_SAMPLE_FMT_FLTP: afade->fade_samples = fade_samples_fltp; break;
- case AV_SAMPLE_FMT_S16: afade->fade_samples = fade_samples_s16; break;
- case AV_SAMPLE_FMT_S16P: afade->fade_samples = fade_samples_s16p; break;
- case AV_SAMPLE_FMT_S32: afade->fade_samples = fade_samples_s32; break;
- case AV_SAMPLE_FMT_S32P: afade->fade_samples = fade_samples_s32p; break;
+ case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
+ case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
+ case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
+ case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
}
- if (afade->duration)
- afade->nb_samples = afade->duration * inlink->sample_rate;
- if (afade->start_time)
- afade->start_sample = afade->start_time * inlink->sample_rate;
+ if (s->duration)
+ s->nb_samples = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
+ if (s->start_time)
+ s->start_sample = av_rescale(s->start_time, inlink->sample_rate, AV_TIME_BASE);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
- AudioFadeContext *afade = inlink->dst->priv;
+ AudioFadeContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base);
- if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) ||
- ( afade->type && (cur_sample + afade->nb_samples < afade->start_sample)))
+ if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
+ ( s->type && (cur_sample + s->nb_samples < s->start_sample)))
return ff_filter_frame(outlink, buf);
if (av_frame_is_writable(buf)) {
@@ -250,25 +242,25 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
- out_buf->pts = buf->pts;
+ av_frame_copy_props(out_buf, buf);
}
- if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) ||
- ( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) {
+ if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
+ ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
av_frame_get_channels(out_buf), out_buf->format);
} else {
int64_t start;
- if (!afade->type)
- start = cur_sample - afade->start_sample;
+ if (!s->type)
+ start = cur_sample - s->start_sample;
else
- start = afade->start_sample + afade->nb_samples - cur_sample;
+ start = s->start_sample + s->nb_samples - cur_sample;
- afade->fade_samples(out_buf->extended_data, buf->extended_data,
- nb_samples, av_frame_get_channels(buf),
- afade->type ? -1 : 1, start,
- afade->nb_samples, afade->curve);
+ s->fade_samples(out_buf->extended_data, buf->extended_data,
+ nb_samples, av_frame_get_channels(buf),
+ s->type ? -1 : 1, start,
+ s->nb_samples, s->curve);
}
if (buf != out_buf)
@@ -282,20 +274,20 @@ static const AVFilterPad avfilter_af_afade_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
static const AVFilterPad avfilter_af_afade_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .config_props = config_output,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
-AVFilter avfilter_af_afade = {
+AVFilter ff_af_afade = {
.name = "afade",
.description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
.query_formats = query_formats,
@@ -304,4 +296,5 @@ AVFilter avfilter_af_afade = {
.inputs = avfilter_af_afade_inputs,
.outputs = avfilter_af_afade_outputs,
.priv_class = &afade_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/af_aformat.c b/ffmpeg/libavfilter/af_aformat.c
index 9ac381f..5fd0308 100644
--- a/ffmpeg/libavfilter/af_aformat.c
+++ b/ffmpeg/libavfilter/af_aformat.c
@@ -52,24 +52,31 @@ static const AVOption aformat_options[] = {
{ "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(aformat);
#define PARSE_FORMATS(str, type, list, add_to_list, get_fmt, none, desc) \
do { \
- char *next, *cur = str; \
+ char *next, *cur = str, sep; \
+ \
+ if (str && strchr(str, ',')) { \
+ av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "\
+ "separate %s.\n", desc); \
+ sep = ','; \
+ } else \
+ sep = '|'; \
+ \
while (cur) { \
type fmt; \
- next = strchr(cur, ','); \
+ next = strchr(cur, sep); \
if (next) \
*next++ = 0; \
\
if ((fmt = get_fmt(cur)) == none) { \
av_log(ctx, AV_LOG_ERROR, "Error parsing " desc ": %s.\n", cur);\
- ret = AVERROR(EINVAL); \
- goto fail; \
+ return AVERROR(EINVAL); \
} \
add_to_list(&list, fmt); \
\
@@ -83,21 +90,9 @@ static int get_sample_rate(const char *samplerate)
return FFMAX(ret, 0);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
AFormatContext *s = ctx->priv;
- int ret;
-
- if (!args) {
- av_log(ctx, AV_LOG_ERROR, "No parameters supplied.\n");
- return AVERROR(EINVAL);
- }
-
- s->class = &aformat_class;
- av_opt_set_defaults(s);
-
- if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
- return ret;
PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats,
ff_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format");
@@ -107,9 +102,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
ff_add_channel_layout, av_get_channel_layout, 0,
"channel layout");
-fail:
- av_opt_free(s);
- return ret;
+ return 0;
}
static int query_formats(AVFilterContext *ctx)
@@ -117,7 +110,7 @@ static int query_formats(AVFilterContext *ctx)
AFormatContext *s = ctx->priv;
ff_set_common_formats(ctx, s->formats ? s->formats :
- ff_all_formats(AVMEDIA_TYPE_AUDIO));
+ ff_all_formats(AVMEDIA_TYPE_AUDIO));
ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
ff_all_samplerates());
ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :
@@ -142,14 +135,13 @@ static const AVFilterPad avfilter_af_aformat_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_aformat = {
+AVFilter ff_af_aformat = {
.name = "aformat",
.description = NULL_IF_CONFIG_SMALL("Convert the input audio to one of the specified formats."),
.init = init,
.query_formats = query_formats,
.priv_size = sizeof(AFormatContext),
-
+ .priv_class = &aformat_class,
.inputs = avfilter_af_aformat_inputs,
.outputs = avfilter_af_aformat_outputs,
- .priv_class = &aformat_class,
};
diff --git a/ffmpeg/libavfilter/af_amerge.c b/ffmpeg/libavfilter/af_amerge.c
index 28c3682..82b694b 100644
--- a/ffmpeg/libavfilter/af_amerge.c
+++ b/ffmpeg/libavfilter/af_amerge.c
@@ -52,7 +52,7 @@ typedef struct {
static const AVOption amerge_options[] = {
{ "inputs", "specify the number of inputs", OFFSET(nb_inputs),
AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
- {0}
+ { NULL }
};
AVFILTER_DEFINE_CLASS(amerge);
@@ -82,9 +82,9 @@ static int query_formats(AVFilterContext *ctx)
for (i = 0; i < am->nb_inputs; i++) {
if (!ctx->inputs[i]->in_channel_layouts ||
!ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
- av_log(ctx, AV_LOG_ERROR,
+ av_log(ctx, AV_LOG_WARNING,
"No channel layout for input %d\n", i + 1);
- return AVERROR(EINVAL);
+ return AVERROR(EAGAIN);
}
inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
@@ -234,7 +234,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
break;
av_assert1(input_number < am->nb_inputs);
if (ff_bufqueue_is_full(&am->in[input_number].queue)) {
- av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n");
av_frame_free(&insamples);
return AVERROR(ENOMEM);
}
@@ -248,6 +247,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
return 0;
outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
+ if (!outbuf)
+ return AVERROR(ENOMEM);
outs = outbuf->data[0];
for (i = 0; i < am->nb_inputs; i++) {
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
@@ -302,18 +303,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
return ff_filter_frame(ctx->outputs[0], outbuf);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
AMergeContext *am = ctx->priv;
- int ret, i;
+ int i;
- am->class = &amerge_class;
- av_opt_set_defaults(am);
- ret = av_set_options_string(am, args, "=", ":");
- if (ret < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args);
- return ret;
- }
am->in = av_calloc(am->nb_inputs, sizeof(*am->in));
if (!am->in)
return AVERROR(ENOMEM);
@@ -341,7 +335,7 @@ static const AVFilterPad amerge_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_amerge = {
+AVFilter ff_af_amerge = {
.name = "amerge",
.description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
"a single multi-channel stream."),
@@ -352,4 +346,5 @@ AVFilter avfilter_af_amerge = {
.inputs = NULL,
.outputs = amerge_outputs,
.priv_class = &amerge_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/ffmpeg/libavfilter/af_amix.c b/ffmpeg/libavfilter/af_amix.c
index dcb24b0..7140b6c 100644
--- a/ffmpeg/libavfilter/af_amix.c
+++ b/ffmpeg/libavfilter/af_amix.c
@@ -2,20 +2,20 @@
* Audio Mix Filter
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,6 +28,7 @@
* output.
*/
+#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
@@ -186,7 +187,7 @@ static const AVOption amix_options[] = {
{ "dropout_transition", "Transition time, in seconds, for volume "
"renormalization when an input stream ends.",
OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(amix);
@@ -483,17 +484,10 @@ fail:
return ret;
}
-static int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
MixContext *s = ctx->priv;
- int i, ret;
-
- s->class = &amix_class;
- av_opt_set_defaults(s);
-
- if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
- return ret;
- av_opt_free(s);
+ int i;
for (i = 0; i < s->nb_inputs; i++) {
char name[32];
@@ -512,7 +506,7 @@ static int init(AVFilterContext *ctx, const char *args)
return 0;
}
-static void uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
int i;
MixContext *s = ctx->priv;
@@ -552,16 +546,15 @@ static const AVFilterPad avfilter_af_amix_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_amix = {
- .name = "amix",
- .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
- .priv_size = sizeof(MixContext),
-
+AVFilter ff_af_amix = {
+ .name = "amix",
+ .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
+ .priv_size = sizeof(MixContext),
+ .priv_class = &amix_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_af_amix_outputs,
- .priv_class = &amix_class,
+ .inputs = NULL,
+ .outputs = avfilter_af_amix_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/ffmpeg/libavfilter/af_anull.c b/ffmpeg/libavfilter/af_anull.c
index c61da3b..fff456e 100644
--- a/ffmpeg/libavfilter/af_anull.c
+++ b/ffmpeg/libavfilter/af_anull.c
@@ -29,9 +29,8 @@
static const AVFilterPad avfilter_af_anull_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
@@ -44,15 +43,10 @@ static const AVFilterPad avfilter_af_anull_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_anull = {
- .name = "anull",
- .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
-
- .priv_size = 0,
-
+AVFilter ff_af_anull = {
+ .name = "anull",
+ .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
.query_formats = ff_query_formats_all,
-
- .inputs = avfilter_af_anull_inputs,
-
- .outputs = avfilter_af_anull_outputs,
+ .inputs = avfilter_af_anull_inputs,
+ .outputs = avfilter_af_anull_outputs,
};
diff --git a/ffmpeg/libavfilter/af_apad.c b/ffmpeg/libavfilter/af_apad.c
index b4a0fc8..88a3a77 100644
--- a/ffmpeg/libavfilter/af_apad.c
+++ b/ffmpeg/libavfilter/af_apad.c
@@ -51,24 +51,16 @@ static const AVOption apad_options[] = {
{ "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A },
{ "pad_len", "number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A },
{ "whole_len", "target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(apad);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- int ret;
APadContext *apad = ctx->priv;
- apad->class = &apad_class;
apad->next_pts = AV_NOPTS_VALUE;
-
- av_opt_set_defaults(apad);
-
- if ((ret = av_opt_set_from_string(apad, args, NULL, "=", ":")) < 0)
- return ret;
-
if (apad->whole_len && apad->pad_len) {
av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n");
return AVERROR(EINVAL);
@@ -97,7 +89,7 @@ static int request_frame(AVFilterLink *outlink)
ret = ff_request_frame(ctx->inputs[0]);
- if (ret == AVERROR_EOF) {
+ if (ret == AVERROR_EOF && !ctx->is_disabled) {
int n_out = apad->packet_size;
AVFrame *outsamplesref;
@@ -140,7 +132,7 @@ static const AVFilterPad apad_inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
- { NULL },
+ { NULL }
};
static const AVFilterPad apad_outputs[] = {
@@ -149,10 +141,10 @@ static const AVFilterPad apad_outputs[] = {
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
- { NULL },
+ { NULL }
};
-AVFilter avfilter_af_apad = {
+AVFilter ff_af_apad = {
.name = "apad",
.description = NULL_IF_CONFIG_SMALL("Pad audio with silence."),
.init = init,
@@ -160,4 +152,5 @@ AVFilter avfilter_af_apad = {
.inputs = apad_inputs,
.outputs = apad_outputs,
.priv_class = &apad_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/ffmpeg/libavfilter/af_aresample.c b/ffmpeg/libavfilter/af_aresample.c
index 80351c3..e05c0a1 100644
--- a/ffmpeg/libavfilter/af_aresample.c
+++ b/ffmpeg/libavfilter/af_aresample.c
@@ -35,17 +35,18 @@
#include "internal.h"
typedef struct {
+ const AVClass *class;
+ int sample_rate_arg;
double ratio;
struct SwrContext *swr;
int64_t next_pts;
int req_fullfilled;
} AResampleContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
AResampleContext *aresample = ctx->priv;
int ret = 0;
- char *argd = av_strdup(args);
aresample->next_pts = AV_NOPTS_VALUE;
aresample->swr = swr_alloc();
@@ -54,27 +55,18 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
goto end;
}
- if (args) {
- char *ptr = argd, *token;
-
- while (token = av_strtok(ptr, ":", &ptr)) {
- char *value;
- av_strtok(token, "=", &value);
-
- if (value) {
- if ((ret = av_opt_set(aresample->swr, token, value, 0)) < 0)
- goto end;
- } else {
- int out_rate;
- if ((ret = ff_parse_sample_rate(&out_rate, token, ctx)) < 0)
- goto end;
- if ((ret = av_opt_set_int(aresample->swr, "osr", out_rate, 0)) < 0)
- goto end;
- }
+ if (opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(aresample->swr, e->key, e->value, 0)) < 0)
+ goto end;
}
+ av_dict_free(opts);
}
+ if (aresample->sample_rate_arg > 0)
+ av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
end:
- av_free(argd);
return ret;
}
@@ -178,11 +170,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AResampleContext *aresample = inlink->dst->priv;
const int n_in = insamplesref->nb_samples;
- int n_out = n_in * aresample->ratio * 2 + 256;
+ int64_t delay;
+ int n_out = n_in * aresample->ratio + 32;
AVFilterLink *const outlink = inlink->dst->outputs[0];
- AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out);
+ AVFrame *outsamplesref;
int ret;
+ delay = swr_get_delay(aresample->swr, outlink->sample_rate);
+ if (delay > 0)
+ n_out += delay;
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+
if(!outsamplesref)
return AVERROR(ENOMEM);
@@ -231,10 +230,15 @@ static int request_frame(AVFilterLink *outlink)
if (ret == AVERROR_EOF) {
AVFrame *outsamplesref;
int n_out = 4096;
+ int64_t pts;
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if (!outsamplesref)
return AVERROR(ENOMEM);
+
+ pts = swr_next_pts(aresample->swr, INT64_MIN);
+ pts = ROUNDED_DIV(pts, inlink->sample_rate);
+
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0);
if (n_out <= 0) {
av_frame_free(&outsamplesref);
@@ -243,27 +247,49 @@ static int request_frame(AVFilterLink *outlink)
outsamplesref->sample_rate = outlink->sample_rate;
outsamplesref->nb_samples = n_out;
-#if 0
- outsamplesref->pts = aresample->next_pts;
- if(aresample->next_pts != AV_NOPTS_VALUE)
- aresample->next_pts += av_rescale_q(n_out, (AVRational){1 ,outlink->sample_rate}, outlink->time_base);
-#else
- outsamplesref->pts = swr_next_pts(aresample->swr, INT64_MIN);
- outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate);
-#endif
+
+ outsamplesref->pts = pts;
return ff_filter_frame(outlink, outsamplesref);
}
return ret;
}
+static const AVClass *resample_child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : swr_get_class();
+}
+
+static void *resample_child_next(void *obj, void *prev)
+{
+ AResampleContext *s = obj;
+ return prev ? NULL : s->swr;
+}
+
+#define OFFSET(x) offsetof(AResampleContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption options[] = {
+ {"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ {NULL}
+};
+
+static const AVClass aresample_class = {
+ .class_name = "aresample",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_class_next = resample_child_class_next,
+ .child_next = resample_child_next,
+};
+
static const AVFilterPad aresample_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
- { NULL },
+ { NULL }
};
static const AVFilterPad aresample_outputs[] = {
@@ -273,16 +299,17 @@ static const AVFilterPad aresample_outputs[] = {
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
- { NULL },
+ { NULL }
};
-AVFilter avfilter_af_aresample = {
+AVFilter ff_af_aresample = {
.name = "aresample",
.description = NULL_IF_CONFIG_SMALL("Resample audio data."),
- .init = init,
+ .init_dict = init_dict,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(AResampleContext),
+ .priv_class = &aresample_class,
.inputs = aresample_inputs,
.outputs = aresample_outputs,
};
diff --git a/ffmpeg/libavfilter/af_asetnsamples.c b/ffmpeg/libavfilter/af_asetnsamples.c
index 08e5279..fbcf275 100644
--- a/ffmpeg/libavfilter/af_asetnsamples.c
+++ b/ffmpeg/libavfilter/af_asetnsamples.c
@@ -38,7 +38,6 @@ typedef struct {
int nb_out_samples; ///< how many samples to output
AVAudioFifo *fifo; ///< samples are queued here
int64_t next_out_pts;
- int req_fullfilled;
int pad;
} ASNSContext;
@@ -46,25 +45,18 @@ typedef struct {
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption asetnsamples_options[] = {
-{ "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
-{ "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
-{ "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
-{ "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
-{ NULL }
+ { "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(asetnsamples);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
ASNSContext *asns = ctx->priv;
- int err;
-
- asns->class = &asetnsamples_class;
- av_opt_set_defaults(asns);
-
- if ((err = av_set_options_string(asns, args, "=", ":")) < 0)
- return err;
asns->next_out_pts = AV_NOPTS_VALUE;
av_log(ctx, AV_LOG_VERBOSE, "nb_out_samples:%d pad:%d\n", asns->nb_out_samples, asns->pad);
@@ -81,11 +73,11 @@ static av_cold void uninit(AVFilterContext *ctx)
static int config_props_output(AVFilterLink *outlink)
{
ASNSContext *asns = outlink->src->priv;
- int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
- asns->fifo = av_audio_fifo_alloc(outlink->format, nb_channels, asns->nb_out_samples);
+ asns->fifo = av_audio_fifo_alloc(outlink->format, outlink->channels, asns->nb_out_samples);
if (!asns->fifo)
return AVERROR(ENOMEM);
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
@@ -108,14 +100,15 @@ static int push_samples(AVFilterLink *outlink)
return 0;
outsamples = ff_get_audio_buffer(outlink, nb_out_samples);
- av_assert0(outsamples);
+ if (!outsamples)
+ return AVERROR(ENOMEM);
av_audio_fifo_read(asns->fifo,
(void **)outsamples->extended_data, nb_out_samples);
if (nb_pad_samples)
av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples,
- nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout),
+ nb_pad_samples, outlink->channels,
outlink->format);
outsamples->nb_samples = nb_out_samples;
outsamples->channel_layout = outlink->channel_layout;
@@ -128,7 +121,6 @@ static int push_samples(AVFilterLink *outlink)
ret = ff_filter_frame(outlink, outsamples);
if (ret < 0)
return ret;
- asns->req_fullfilled = 1;
return nb_out_samples;
}
@@ -161,19 +153,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
static int request_frame(AVFilterLink *outlink)
{
- ASNSContext *asns = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
- asns->req_fullfilled = 0;
- do {
- ret = ff_request_frame(inlink);
- } while (!asns->req_fullfilled && ret >= 0);
-
+ ret = ff_request_frame(inlink);
if (ret == AVERROR_EOF) {
- do {
- ret = push_samples(outlink);
- } while (ret > 0);
+ ret = push_samples(outlink);
+ return ret < 0 ? ret : ret > 0 ? 0 : AVERROR_EOF;
}
return ret;
@@ -181,12 +167,11 @@ static int request_frame(AVFilterLink *outlink)
static const AVFilterPad asetnsamples_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
- { NULL }
+ { NULL }
};
static const AVFilterPad asetnsamples_outputs[] = {
@@ -199,13 +184,13 @@ static const AVFilterPad asetnsamples_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_asetnsamples = {
- .name = "asetnsamples",
- .description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
- .priv_size = sizeof(ASNSContext),
- .init = init,
- .uninit = uninit,
- .inputs = asetnsamples_inputs,
- .outputs = asetnsamples_outputs,
- .priv_class = &asetnsamples_class,
+AVFilter ff_af_asetnsamples = {
+ .name = "asetnsamples",
+ .description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
+ .priv_size = sizeof(ASNSContext),
+ .priv_class = &asetnsamples_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = asetnsamples_inputs,
+ .outputs = asetnsamples_outputs,
};
diff --git a/ffmpeg/libavfilter/af_ashowinfo.c b/ffmpeg/libavfilter/af_ashowinfo.c
index f53584e..783f9a6 100644
--- a/ffmpeg/libavfilter/af_ashowinfo.c
+++ b/ffmpeg/libavfilter/af_ashowinfo.c
@@ -27,6 +27,7 @@
#include <stddef.h>
#include "libavutil/adler32.h"
+#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/mem.h"
@@ -42,14 +43,9 @@ typedef struct AShowInfoContext {
* Scratch space for individual plane checksums for planar audio
*/
uint32_t *plane_checksums;
-
- /**
- * Frame counter
- */
- uint64_t frame;
} AShowInfoContext;
-static void uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
AShowInfoContext *s = ctx->priv;
av_freep(&s->plane_checksums);
@@ -61,7 +57,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
- int channels = av_get_channel_layout_nb_channels(buf->channel_layout);
+ int channels = inlink->channels;
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->nb_samples * block_align;
@@ -85,10 +81,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
- "n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" "
+ "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
"checksum:%08X ",
- s->frame,
+ inlink->frame_count,
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
av_frame_get_pkt_pos(buf),
av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
@@ -100,18 +96,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_log(ctx, AV_LOG_INFO, "%08X ", s->plane_checksums[i]);
av_log(ctx, AV_LOG_INFO, "]\n");
- s->frame++;
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
static const AVFilterPad inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
- { NULL },
+ { NULL }
};
static const AVFilterPad outputs[] = {
@@ -119,10 +113,10 @@ static const AVFilterPad outputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
- { NULL },
+ { NULL }
};
-AVFilter avfilter_af_ashowinfo = {
+AVFilter ff_af_ashowinfo = {
.name = "ashowinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each audio frame."),
.priv_size = sizeof(AShowInfoContext),
diff --git a/ffmpeg/libavfilter/af_astreamsync.c b/ffmpeg/libavfilter/af_astreamsync.c
index 79f703a..becfe34 100644
--- a/ffmpeg/libavfilter/af_astreamsync.c
+++ b/ffmpeg/libavfilter/af_astreamsync.c
@@ -24,6 +24,7 @@
*/
#include "libavutil/eval.h"
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
@@ -45,7 +46,9 @@ enum var_name {
};
typedef struct {
+ const AVClass *class;
AVExpr *expr;
+ char *expr_str;
double var_values[VAR_NB];
struct buf_queue {
AVFrame *buf[QUEUE_SIZE];
@@ -58,18 +61,25 @@ typedef struct {
int eof; /* bitmask, one bit for each stream */
} AStreamSyncContext;
-static const char *default_expr = "t1-t2";
+#define OFFSET(x) offsetof(AStreamSyncContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption astreamsync_options[] = {
+ { "expr", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
+ { "e", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
+ { NULL }
+};
-static av_cold int init(AVFilterContext *ctx, const char *args0)
+AVFILTER_DEFINE_CLASS(astreamsync);
+
+static av_cold int init(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
- const char *expr = args0 ? args0 : default_expr;
int r, i;
- r = av_expr_parse(&as->expr, expr, var_names,
+ r = av_expr_parse(&as->expr, as->expr_str, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (r < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", expr);
+ av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", as->expr_str);
return r;
}
for (i = 0; i < 42; i++)
@@ -180,6 +190,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
return 0;
}
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AStreamSyncContext *as = ctx->priv;
+
+ av_expr_free(as->expr);
+ as->expr = NULL;
+}
+
static const AVFilterPad astreamsync_inputs[] = {
{
.name = "in1",
@@ -208,13 +226,15 @@ static const AVFilterPad astreamsync_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_astreamsync = {
+AVFilter ff_af_astreamsync = {
.name = "astreamsync",
.description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
"in a configurable order."),
.priv_size = sizeof(AStreamSyncContext),
.init = init,
+ .uninit = uninit,
.query_formats = query_formats,
.inputs = astreamsync_inputs,
.outputs = astreamsync_outputs,
+ .priv_class = &astreamsync_class,
};
diff --git a/ffmpeg/libavfilter/af_asyncts.c b/ffmpeg/libavfilter/af_asyncts.c
index c2441a4..5f8e1f6 100644
--- a/ffmpeg/libavfilter/af_asyncts.c
+++ b/ffmpeg/libavfilter/af_asyncts.c
@@ -1,22 +1,25 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <stdint.h>
+
#include "libavresample/avresample.h"
+#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
@@ -35,6 +38,7 @@ typedef struct ASyncContext {
int min_delta; ///< pad/trim min threshold in samples
int first_frame; ///< 1 until filter_frame() has processed at least 1 frame with a pts != AV_NOPTS_VALUE
int64_t first_pts; ///< user-specified first expected pts, in samples
+ int comp; ///< current resample compensation
/* options */
int resample;
@@ -54,22 +58,14 @@ static const AVOption asyncts_options[] = {
"(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F },
{ "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F },
{ "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(asyncts);
-static int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
- int ret;
-
- s->class = &asyncts_class;
- av_opt_set_defaults(s);
-
- if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
- return ret;
- av_opt_free(s);
s->pts = AV_NOPTS_VALUE;
s->first_frame = 1;
@@ -77,7 +73,7 @@ static int init(AVFilterContext *ctx, const char *args)
return 0;
}
-static void uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
@@ -188,6 +184,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
int out_size, ret;
int64_t delta;
+ int64_t new_pts;
/* buffer data until we get the next timestamp */
if (s->pts == AV_NOPTS_VALUE || pts == AV_NOPTS_VALUE) {
@@ -214,10 +211,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
out_size = av_clipl_int32((int64_t)out_size + delta);
} else {
if (s->resample) {
- int comp = av_clip(delta, -s->max_comp, s->max_comp);
- av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
- avresample_set_compensation(s->avr, comp, inlink->sample_rate);
+ // adjust the compensation if delta is non-zero
+ int delay = get_delay(s);
+ int comp = s->comp + av_clip(delta * inlink->sample_rate / delay,
+ -s->max_comp, s->max_comp);
+ if (comp != s->comp) {
+ av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
+ if (avresample_set_compensation(s->avr, comp, inlink->sample_rate) == 0) {
+ s->comp = comp;
+ }
+ }
}
+ // adjust PTS to avoid monotonicity errors with input PTS jitter
+ pts -= delta;
delta = 0;
}
@@ -229,18 +235,23 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
if (s->first_frame && delta > 0) {
+ int planar = av_sample_fmt_is_planar(buf_out->format);
+ int planes = planar ? nb_channels : 1;
+ int block_size = av_get_bytes_per_sample(buf_out->format) *
+ (planar ? 1 : nb_channels);
+
int ch;
av_samples_set_silence(buf_out->extended_data, 0, delta,
nb_channels, buf->format);
- for (ch = 0; ch < nb_channels; ch++)
- buf_out->extended_data[ch] += delta;
+ for (ch = 0; ch < planes; ch++)
+ buf_out->extended_data[ch] += delta * block_size;
avresample_read(s->avr, buf_out->extended_data, out_size);
- for (ch = 0; ch < nb_channels; ch++)
- buf_out->extended_data[ch] -= delta;
+ for (ch = 0; ch < planes; ch++)
+ buf_out->extended_data[ch] -= delta * block_size;
} else {
avresample_read(s->avr, buf_out->extended_data, out_size);
@@ -262,9 +273,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
/* drain any remaining buffered data */
avresample_read(s->avr, NULL, avresample_available(s->avr));
- s->pts = pts - avresample_get_delay(s->avr);
- ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
- buf->linesize[0], buf->nb_samples);
+ new_pts = pts - avresample_get_delay(s->avr);
+ /* check for s->pts monotonicity */
+ if (new_pts > s->pts) {
+ s->pts = new_pts;
+ ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
+ buf->linesize[0], buf->nb_samples);
+ } else {
+ av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
+ "whole buffer.\n");
+ ret = 0;
+ }
s->first_frame = 0;
fail:
@@ -275,9 +294,9 @@ fail:
static const AVFilterPad avfilter_af_asyncts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame
},
{ NULL }
};
@@ -292,16 +311,13 @@ static const AVFilterPad avfilter_af_asyncts_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_asyncts = {
+AVFilter ff_af_asyncts = {
.name = "asyncts",
.description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"),
-
.init = init,
.uninit = uninit,
-
.priv_size = sizeof(ASyncContext),
-
+ .priv_class = &asyncts_class,
.inputs = avfilter_af_asyncts_inputs,
.outputs = avfilter_af_asyncts_outputs,
- .priv_class = &asyncts_class,
};
diff --git a/ffmpeg/libavfilter/af_atempo.c b/ffmpeg/libavfilter/af_atempo.c
index 9547969..c474d6a 100644
--- a/ffmpeg/libavfilter/af_atempo.c
+++ b/ffmpeg/libavfilter/af_atempo.c
@@ -85,6 +85,8 @@ typedef enum {
* Filter state machine
*/
typedef struct {
+ const AVClass *class;
+
// ring-buffer of input samples, necessary because some times
// input fragment position may be adjusted backwards:
uint8_t *buffer;
@@ -121,8 +123,9 @@ typedef struct {
// tempo scaling factor:
double tempo;
- // cumulative alignment drift:
- int drift;
+ // a snapshot of previous fragment input and output position values
+ // captured when the tempo scale factor was set most recently:
+ int64_t origin[2];
// current/previous fragment ring-buffer:
AudioFragment frag[2];
@@ -139,7 +142,6 @@ typedef struct {
FFTSample *correlation;
// for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
- int request_fulfilled;
AVFrame *dst_buffer;
uint8_t *dst;
uint8_t *dst_end;
@@ -147,6 +149,27 @@ typedef struct {
uint64_t nsamples_out;
} ATempoContext;
+#define OFFSET(x) offsetof(ATempoContext, x)
+
+static const AVOption atempo_options[] = {
+ { "tempo", "set tempo scale factor",
+ OFFSET(tempo), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0.5, 2.0,
+ AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(atempo);
+
+inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
+{
+ return &atempo->frag[atempo->nfrag % 2];
+}
+
+inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
+{
+ return &atempo->frag[(atempo->nfrag + 1) % 2];
+}
+
/**
* Reset filter to initial state, do not deallocate existing local buffers.
*/
@@ -156,13 +179,15 @@ static void yae_clear(ATempoContext *atempo)
atempo->head = 0;
atempo->tail = 0;
- atempo->drift = 0;
atempo->nfrag = 0;
atempo->state = YAE_LOAD_FRAGMENT;
atempo->position[0] = 0;
atempo->position[1] = 0;
+ atempo->origin[0] = 0;
+ atempo->origin[1] = 0;
+
atempo->frag[0].position[0] = 0;
atempo->frag[0].position[1] = 0;
atempo->frag[0].nsamples = 0;
@@ -181,7 +206,6 @@ static void yae_clear(ATempoContext *atempo)
atempo->dst = NULL;
atempo->dst_end = NULL;
- atempo->request_fulfilled = 0;
atempo->nsamples_in = 0;
atempo->nsamples_out = 0;
}
@@ -297,6 +321,7 @@ static int yae_reset(ATempoContext *atempo,
static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo)
{
+ const AudioFragment *prev;
ATempoContext *atempo = ctx->priv;
char *tail = NULL;
double tempo = av_strtod(arg_tempo, &tail);
@@ -312,20 +337,13 @@ static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo)
return AVERROR(EINVAL);
}
+ prev = yae_prev_frag(atempo);
+ atempo->origin[0] = prev->position[0] + atempo->window / 2;
+ atempo->origin[1] = prev->position[1] + atempo->window / 2;
atempo->tempo = tempo;
return 0;
}
-inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
-{
- return &atempo->frag[atempo->nfrag % 2];
-}
-
-inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
-{
- return &atempo->frag[(atempo->nfrag + 1) % 2];
-}
-
/**
* A helper macro for initializing complex data buffer with scalar data
* of a given type.
@@ -678,12 +696,21 @@ static int yae_adjust_position(ATempoContext *atempo)
const AudioFragment *prev = yae_prev_frag(atempo);
AudioFragment *frag = yae_curr_frag(atempo);
+ const double prev_output_position =
+ (double)(prev->position[1] - atempo->origin[1] + atempo->window / 2);
+
+ const double ideal_output_position =
+ (double)(prev->position[0] - atempo->origin[0] + atempo->window / 2) /
+ atempo->tempo;
+
+ const int drift = (int)(prev_output_position - ideal_output_position);
+
const int delta_max = atempo->window / 2;
const int correction = yae_align(frag,
prev,
atempo->window,
delta_max,
- atempo->drift,
+ drift,
atempo->correlation,
atempo->complex_to_real);
@@ -693,9 +720,6 @@ static int yae_adjust_position(ATempoContext *atempo)
// clear so that the fragment can be reloaded:
frag->nsamples = 0;
-
- // update cumulative correction drift counter:
- atempo->drift += correction;
}
return correction;
@@ -949,16 +973,12 @@ static int yae_flush(ATempoContext *atempo,
return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
ATempoContext *atempo = ctx->priv;
-
- // NOTE: this assumes that the caller has memset ctx->priv to 0:
atempo->format = AV_SAMPLE_FMT_NONE;
- atempo->tempo = 1.0;
atempo->state = YAE_LOAD_FRAGMENT;
-
- return args ? yae_set_tempo(ctx, args) : 0;
+ return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
@@ -1017,6 +1037,8 @@ static int config_props(AVFilterLink *inlink)
int sample_rate = (int)inlink->sample_rate;
int channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ ctx->outputs[0]->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
return yae_reset(atempo, format, sample_rate, channels);
}
@@ -1062,6 +1084,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
while (src < src_end) {
if (!atempo->dst_buffer) {
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
+ if (!atempo->dst_buffer)
+ return AVERROR(ENOMEM);
av_frame_copy_props(atempo->dst_buffer, src_buffer);
atempo->dst = atempo->dst_buffer->data[0];
@@ -1071,10 +1095,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end);
if (atempo->dst == atempo->dst_end) {
- ret = push_samples(atempo, outlink, n_out);
+ int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) /
+ atempo->stride);
+ ret = push_samples(atempo, outlink, n_samples);
if (ret < 0)
goto end;
- atempo->request_fulfilled = 1;
}
}
@@ -1090,11 +1115,7 @@ static int request_frame(AVFilterLink *outlink)
ATempoContext *atempo = ctx->priv;
int ret;
- atempo->request_fulfilled = 0;
- do {
- ret = ff_request_frame(ctx->inputs[0]);
- }
- while (!atempo->request_fulfilled && ret >= 0);
+ ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF) {
// flush the filter:
@@ -1105,6 +1126,8 @@ static int request_frame(AVFilterLink *outlink)
while (err == AVERROR(EAGAIN)) {
if (!atempo->dst_buffer) {
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
+ if (!atempo->dst_buffer)
+ return AVERROR(ENOMEM);
atempo->dst = atempo->dst_buffer->data[0];
atempo->dst_end = atempo->dst + n_max * atempo->stride;
@@ -1159,7 +1182,7 @@ static const AVFilterPad atempo_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_atempo = {
+AVFilter ff_af_atempo = {
.name = "atempo",
.description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."),
.init = init,
@@ -1167,6 +1190,7 @@ AVFilter avfilter_af_atempo = {
.query_formats = query_formats,
.process_command = process_command,
.priv_size = sizeof(ATempoContext),
+ .priv_class = &atempo_class,
.inputs = atempo_inputs,
.outputs = atempo_outputs,
};
diff --git a/ffmpeg/libavfilter/af_biquads.c b/ffmpeg/libavfilter/af_biquads.c
index 0bd61fd..5bafad1 100644
--- a/ffmpeg/libavfilter/af_biquads.c
+++ b/ffmpeg/libavfilter/af_biquads.c
@@ -62,8 +62,8 @@
* V
*/
-#include "libavutil/opt.h"
#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
@@ -83,7 +83,7 @@ enum FilterType {
enum WidthType {
NONE,
- HZ,
+ HERTZ,
OCTAVE,
QFACTOR,
SLOPE,
@@ -116,15 +116,9 @@ typedef struct {
double b0, double b1, double b2, double a1, double a2);
} BiquadsContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
BiquadsContext *p = ctx->priv;
- int ret;
-
- av_opt_set_defaults(p);
-
- if ((ret = av_set_options_string(p, args, "=", ":")) < 0)
- return ret;
if (p->filter_type != biquad) {
if (p->frequency <= 0 || p->width <= 0) {
@@ -256,7 +250,7 @@ static int config_output(AVFilterLink *outlink)
case NONE:
alpha = 0.0;
break;
- case HZ:
+ case HERTZ:
alpha = sin(w0) / (2 * p->frequency / p->width);
break;
case OCTAVE:
@@ -406,7 +400,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
- out_buf->pts = buf->pts;
+ av_frame_copy_props(out_buf, buf);
}
for (ch = 0; ch < av_frame_get_channels(buf); ch++)
@@ -427,7 +421,6 @@ static av_cold void uninit(AVFilterContext *ctx)
BiquadsContext *p = ctx->priv;
av_freep(&p->cache);
- av_opt_free(p);
}
static const AVFilterPad inputs[] = {
@@ -453,15 +446,15 @@ static const AVFilterPad outputs[] = {
#define DEFINE_BIQUAD_FILTER(name_, description_) \
AVFILTER_DEFINE_CLASS(name_); \
-static av_cold int name_##_init(AVFilterContext *ctx, const char *args) \
+static av_cold int name_##_init(AVFilterContext *ctx) \
{ \
BiquadsContext *p = ctx->priv; \
p->class = &name_##_class; \
p->filter_type = name_; \
- return init(ctx, args); \
+ return init(ctx); \
} \
\
-AVFilter avfilter_af_##name_ = { \
+AVFilter ff_af_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(BiquadsContext), \
@@ -477,8 +470,8 @@ AVFilter avfilter_af_##name_ = { \
static const AVOption equalizer_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
@@ -486,7 +479,7 @@ static const AVOption equalizer_options[] = {
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter.");
@@ -495,8 +488,8 @@ DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter
static const AVOption bass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
@@ -504,7 +497,7 @@ static const AVOption bass_options[] = {
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
@@ -513,8 +506,8 @@ DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
static const AVOption treble_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
@@ -522,7 +515,7 @@ static const AVOption treble_options[] = {
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
@@ -531,15 +524,15 @@ DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
static const AVOption bandpass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.");
@@ -548,14 +541,14 @@ DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.")
static const AVOption bandreject_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter.");
@@ -564,8 +557,8 @@ DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filte
static const AVOption lowpass_options[] = {
{"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
{"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
@@ -573,7 +566,7 @@ static const AVOption lowpass_options[] = {
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.");
@@ -582,8 +575,8 @@ DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.
static const AVOption highpass_options[] = {
{"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
@@ -591,7 +584,7 @@ static const AVOption highpass_options[] = {
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency.");
@@ -600,27 +593,27 @@ DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequenc
static const AVOption allpass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
- {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HZ}, HZ, SLOPE, FLAGS, "width_type"},
- {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HERTZ}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
{"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
- {NULL},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter.");
#endif /* CONFIG_ALLPASS_FILTER */
#if CONFIG_BIQUAD_FILTER
static const AVOption biquad_options[] = {
- {"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS},
- {"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS},
- {"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS},
- {"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS},
- {"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS},
- {"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS},
- {NULL},
+ {"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {NULL}
};
DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients.");
diff --git a/ffmpeg/libavfilter/af_channelmap.c b/ffmpeg/libavfilter/af_channelmap.c
index e73c4bc..e5e8987 100644
--- a/ffmpeg/libavfilter/af_channelmap.c
+++ b/ffmpeg/libavfilter/af_channelmap.c
@@ -69,20 +69,15 @@ typedef struct ChannelMapContext {
#define OFFSET(x) offsetof(ChannelMapContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
-static const AVOption options[] = {
+static const AVOption channelmap_options[] = {
{ "map", "A comma-separated list of input channel numbers in output order.",
OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layout", "Output channel layout.",
OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
- { NULL },
+ { NULL }
};
-static const AVClass channelmap_class = {
- .class_name = "channel map filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(channelmap);
static char* split(char *message, char delim) {
char *next = strchr(message, delim);
@@ -120,28 +115,16 @@ static int get_channel(char **map, uint64_t *ch, char delim)
return 0;
}
-static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
+static av_cold int channelmap_init(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
- int ret;
- char *mapping;
+ char *mapping, separator = '|';
int map_entries = 0;
char buf[256];
enum MappingMode mode;
uint64_t out_ch_mask = 0;
int i;
- if (!args) {
- av_log(ctx, AV_LOG_ERROR, "No parameters supplied.\n");
- return AVERROR(EINVAL);
- }
-
- s->class = &channelmap_class;
- av_opt_set_defaults(s);
-
- if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
- return ret;
-
mapping = s->mapping_str;
if (!mapping) {
@@ -164,21 +147,27 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
else
mode = MAP_PAIR_STR_STR;
}
+#if FF_API_OLD_FILTER_OPTS
+ if (strchr(mapping, ',')) {
+ av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use "
+ "'|' to separate the mappings.\n");
+ separator = ',';
+ }
+#endif
}
if (mode != MAP_NONE) {
- char *comma = mapping;
+ char *sep = mapping;
map_entries = 1;
- while ((comma = strchr(comma, ','))) {
- if (*++comma) // Allow trailing comma
+ while ((sep = strchr(sep, separator))) {
+ if (*++sep) // Allow trailing comma
map_entries++;
}
}
if (map_entries > MAX_CH) {
av_log(ctx, AV_LOG_ERROR, "Too many channels mapped: '%d'.\n", map_entries);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
for (i = 0; i < map_entries; i++) {
@@ -187,40 +176,36 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
static const char err[] = "Failed to parse channel map\n";
switch (mode) {
case MAP_ONE_INT:
- if (get_channel_idx(&mapping, &in_ch_idx, ',', MAX_CH) < 0) {
- ret = AVERROR(EINVAL);
+ if (get_channel_idx(&mapping, &in_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
- goto fail;
+ return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel_idx = i;
break;
case MAP_ONE_STR:
- if (!get_channel(&mapping, &in_ch, ',')) {
+ if (!get_channel(&mapping, &in_ch, separator)) {
av_log(ctx, AV_LOG_ERROR, err);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel_idx = i;
break;
case MAP_PAIR_INT_INT:
if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 ||
- get_channel_idx(&mapping, &out_ch_idx, ',', MAX_CH) < 0) {
+ get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel_idx = out_ch_idx;
break;
case MAP_PAIR_INT_STR:
if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 ||
- get_channel(&mapping, &out_ch, ',') < 0 ||
+ get_channel(&mapping, &out_ch, separator) < 0 ||
out_ch & out_ch_mask) {
av_log(ctx, AV_LOG_ERROR, err);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel = out_ch;
@@ -228,21 +213,19 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
break;
case MAP_PAIR_STR_INT:
if (get_channel(&mapping, &in_ch, '-') < 0 ||
- get_channel_idx(&mapping, &out_ch_idx, ',', MAX_CH) < 0) {
+ get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel_idx = out_ch_idx;
break;
case MAP_PAIR_STR_STR:
if (get_channel(&mapping, &in_ch, '-') < 0 ||
- get_channel(&mapping, &out_ch, ',') < 0 ||
+ get_channel(&mapping, &out_ch, separator) < 0 ||
out_ch & out_ch_mask) {
av_log(ctx, AV_LOG_ERROR, err);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel = out_ch;
@@ -260,8 +243,7 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
if ((fmt = av_get_channel_layout(s->channel_layout_str)) == 0) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout: '%s'.\n",
s->channel_layout_str);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
if (mode == MAP_NONE) {
int i;
@@ -275,17 +257,21 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
av_log(ctx, AV_LOG_ERROR,
"Output channel layout '%s' does not match the list of channel mapped: '%s'.\n",
s->channel_layout_str, buf);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
} else if (s->nch != av_get_channel_layout_nb_channels(fmt)) {
av_log(ctx, AV_LOG_ERROR,
"Output channel layout %s does not match the number of channels mapped %d.\n",
s->channel_layout_str, s->nch);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->output_layout = fmt;
}
+ if (!s->output_layout) {
+ av_log(ctx, AV_LOG_ERROR, "Output channel layout is not set and "
+ "cannot be guessed from the maps.\n");
+ return AVERROR(EINVAL);
+ }
+
ff_add_channel_layout(&s->channel_layouts, s->output_layout);
if (mode == MAP_PAIR_INT_STR || mode == MAP_PAIR_STR_STR) {
@@ -295,9 +281,7 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
}
}
-fail:
- av_opt_free(s);
- return ret;
+ return 0;
}
static int channelmap_query_formats(AVFilterContext *ctx)
@@ -361,23 +345,33 @@ static int channelmap_config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ChannelMapContext *s = ctx->priv;
+ int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
int i, err = 0;
const char *channel_name;
char layout_name[256];
- if (s->mode == MAP_PAIR_STR_INT || s->mode == MAP_PAIR_STR_STR) {
- for (i = 0; i < s->nch; i++) {
- s->map[i].in_channel_idx = av_get_channel_layout_channel_index(
- inlink->channel_layout, s->map[i].in_channel);
- if (s->map[i].in_channel_idx < 0) {
- channel_name = av_get_channel_name(s->map[i].in_channel);
- av_get_channel_layout_string(layout_name, sizeof(layout_name),
- 0, inlink->channel_layout);
+ for (i = 0; i < s->nch; i++) {
+ struct ChannelMap *m = &s->map[i];
+
+ if (s->mode == MAP_PAIR_STR_INT || s->mode == MAP_PAIR_STR_STR) {
+ m->in_channel_idx = av_get_channel_layout_channel_index(
+ inlink->channel_layout, m->in_channel);
+ }
+
+ if (m->in_channel_idx < 0 || m->in_channel_idx >= nb_channels) {
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ 0, inlink->channel_layout);
+ if (m->in_channel) {
+ channel_name = av_get_channel_name(m->in_channel);
av_log(ctx, AV_LOG_ERROR,
"input channel '%s' not available from input layout '%s'\n",
channel_name, layout_name);
- err = AVERROR(EINVAL);
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "input channel #%d not available from input layout '%s'\n",
+ m->in_channel_idx, layout_name);
}
+ err = AVERROR(EINVAL);
}
}
@@ -403,14 +397,13 @@ static const AVFilterPad avfilter_af_channelmap_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_channelmap = {
+AVFilter ff_af_channelmap = {
.name = "channelmap",
.description = NULL_IF_CONFIG_SMALL("Remap audio channels."),
.init = channelmap_init,
.query_formats = channelmap_query_formats,
.priv_size = sizeof(ChannelMapContext),
-
+ .priv_class = &channelmap_class,
.inputs = avfilter_af_channelmap_inputs,
.outputs = avfilter_af_channelmap_outputs,
- .priv_class = &channelmap_class,
};
diff --git a/ffmpeg/libavfilter/af_channelsplit.c b/ffmpeg/libavfilter/af_channelsplit.c
index 9bcdc54..b3756e2 100644
--- a/ffmpeg/libavfilter/af_channelsplit.c
+++ b/ffmpeg/libavfilter/af_channelsplit.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,6 +23,7 @@
* Split an audio stream into per-channel streams.
*/
+#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -44,21 +45,17 @@ typedef struct ChannelSplitContext {
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption channelsplit_options[] = {
{ "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(channelsplit);
-static int init(AVFilterContext *ctx, const char *arg)
+static av_cold int init(AVFilterContext *ctx)
{
ChannelSplitContext *s = ctx->priv;
int nb_channels;
int ret = 0, i;
- s->class = &channelsplit_class;
- av_opt_set_defaults(s);
- if ((ret = av_set_options_string(s, arg, "=", ":")) < 0)
- return ret;
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
@@ -78,7 +75,6 @@ static int init(AVFilterContext *ctx, const char *arg)
}
fail:
- av_opt_free(s);
return ret;
}
@@ -121,6 +117,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
+ av_frame_set_channels(buf_out, 1);
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
@@ -132,22 +129,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_af_channelsplit = {
+AVFilter ff_af_channelsplit = {
.name = "channelsplit",
- .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams"),
+ .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams."),
.priv_size = sizeof(ChannelSplitContext),
-
+ .priv_class = &channelsplit_class,
.init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_af_channelsplit_inputs,
- .outputs = NULL,
- .priv_class = &channelsplit_class,
+ .inputs = avfilter_af_channelsplit_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/ffmpeg/libavfilter/af_earwax.c b/ffmpeg/libavfilter/af_earwax.c
index b1d3d6f..c310997 100644
--- a/ffmpeg/libavfilter/af_earwax.c
+++ b/ffmpeg/libavfilter/af_earwax.c
@@ -98,10 +98,10 @@ static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, in
int16_t j;
while (in < endin) {
- sample = 32;
+ sample = 0;
for (j = 0; j < NUMTAPS; j++)
sample += in[j] * filt[j];
- *out = sample >> 6;
+ *out = av_clip_int16(sample >> 6);
out++;
in++;
}
@@ -114,7 +114,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
AVFilterLink *outlink = inlink->dst->outputs[0];
int16_t *taps, *endin, *in, *out;
AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
- int ret;
+ int len;
if (!outsamples) {
av_frame_free(&insamples);
@@ -126,20 +126,23 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
out = (int16_t *)outsamples->data[0];
in = (int16_t *)insamples ->data[0];
+ len = FFMIN(NUMTAPS, 2*insamples->nb_samples);
// copy part of new input and process with saved input
- memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps));
- out = scalarproduct(taps, taps + NUMTAPS, out);
+ memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
+ out = scalarproduct(taps, taps + len, out);
// process current input
- endin = in + insamples->nb_samples * 2 - NUMTAPS;
- scalarproduct(in, endin, out);
+ if (2*insamples->nb_samples >= NUMTAPS ){
+ endin = in + insamples->nb_samples * 2 - NUMTAPS;
+ scalarproduct(in, endin, out);
- // save part of input for next round
- memcpy(taps, endin, NUMTAPS * sizeof(*taps));
+ // save part of input for next round
+ memcpy(taps, endin, NUMTAPS * sizeof(*taps));
+ } else
+ memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps));
- ret = ff_filter_frame(outlink, outsamples);
av_frame_free(&insamples);
- return ret;
+ return ff_filter_frame(outlink, outsamples);
}
static const AVFilterPad earwax_inputs[] = {
@@ -159,7 +162,7 @@ static const AVFilterPad earwax_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_earwax = {
+AVFilter ff_af_earwax = {
.name = "earwax",
.description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
.query_formats = query_formats,
diff --git a/ffmpeg/libavfilter/af_join.c b/ffmpeg/libavfilter/af_join.c
index 8dffda0..3e9ccc8 100644
--- a/ffmpeg/libavfilter/af_join.c
+++ b/ffmpeg/libavfilter/af_join.c
@@ -1,19 +1,18 @@
/*
+ * This file is part of FFmpeg.
*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -74,15 +73,10 @@ static const AVOption join_options[] = {
{ "map", "A comma-separated list of channels maps in the format "
"'input_stream.input_channel-output_channel.",
OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
- { NULL },
+ { NULL }
};
-static const AVClass join_class = {
- .class_name = "join filter",
- .item_name = av_default_item_name,
- .option = join_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(join);
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
@@ -103,14 +97,23 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
static int parse_maps(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
+ char separator = '|';
char *cur = s->map;
+#if FF_API_OLD_FILTER_OPTS
+ if (cur && strchr(cur, ',')) {
+ av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "
+ "separate the mappings.\n");
+ separator = ',';
+ }
+#endif
+
while (cur && *cur) {
char *sep, *next, *p;
uint64_t in_channel = 0, out_channel = 0;
int input_idx, out_ch_idx, in_ch_idx;
- next = strchr(cur, ',');
+ next = strchr(cur, separator);
if (next)
*next++ = 0;
@@ -178,31 +181,23 @@ static int parse_maps(AVFilterContext *ctx)
return 0;
}
-static int join_init(AVFilterContext *ctx, const char *args)
+static av_cold int join_init(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int ret, i;
- s->class = &join_class;
- av_opt_set_defaults(s);
- if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
- return ret;
-
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
- if (!s->channels || !s->buffers|| !s->input_frames) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
+ if (!s->channels || !s->buffers|| !s->input_frames)
+ return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
@@ -210,7 +205,7 @@ static int join_init(AVFilterContext *ctx, const char *args)
}
if ((ret = parse_maps(ctx)) < 0)
- goto fail;
+ return ret;
for (i = 0; i < s->inputs; i++) {
char name[32];
@@ -226,12 +221,10 @@ static int join_init(AVFilterContext *ctx, const char *args)
ff_insert_inpad(ctx, i, &pad);
}
-fail:
- av_opt_free(s);
- return ret;
+ return 0;
}
-static void join_uninit(AVFilterContext *ctx)
+static av_cold void join_uninit(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int i;
@@ -476,6 +469,8 @@ static int join_request_frame(AVFilterLink *outlink)
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(frame, outlink->channels);
+ frame->format = outlink->format;
frame->sample_rate = outlink->sample_rate;
frame->pts = s->input_frames[0]->pts;
frame->linesize[0] = linesize;
@@ -506,17 +501,16 @@ static const AVFilterPad avfilter_af_join_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_join = {
+AVFilter ff_af_join = {
.name = "join",
.description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
- "multi-channel output"),
+ "multi-channel output."),
.priv_size = sizeof(JoinContext),
-
+ .priv_class = &join_class,
.init = join_init,
.uninit = join_uninit,
.query_formats = join_query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_af_join_outputs,
- .priv_class = &join_class,
+ .inputs = NULL,
+ .outputs = avfilter_af_join_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/ffmpeg/libavfilter/af_pan.c b/ffmpeg/libavfilter/af_pan.c
index ae2e0aa..d28f382 100644
--- a/ffmpeg/libavfilter/af_pan.c
+++ b/ffmpeg/libavfilter/af_pan.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au>
- * Copyright (c) 2011 Clément Bœsch <ubitux@gmail.com>
+ * Copyright (c) 2011 Clément Bœsch <u pkh me>
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
@@ -40,11 +40,12 @@
#define MAX_CHANNELS 63
typedef struct PanContext {
+ const AVClass *class;
+ char *args;
int64_t out_channel_layout;
double gain[MAX_CHANNELS][MAX_CHANNELS];
int64_t need_renorm;
int need_renumber;
- int nb_input_channels;
int nb_output_channels;
int pure_gains;
@@ -53,12 +54,21 @@ typedef struct PanContext {
struct SwrContext *swr;
} PanContext;
+static void skip_spaces(char **arg)
+{
+ int len = 0;
+
+ sscanf(*arg, " %n", &len);
+ *arg += len;
+}
+
static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
{
char buf[8];
int len, i, channel_id = 0;
int64_t layout, layout0;
+ skip_spaces(arg);
/* try to parse a channel name, e.g. "FL" */
if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
layout0 = layout = av_get_channel_layout(buf);
@@ -88,23 +98,15 @@ static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
return AVERROR(EINVAL);
}
-static void skip_spaces(char **arg)
-{
- int len = 0;
-
- sscanf(*arg, " %n", &len);
- *arg += len;
-}
-
-static av_cold int init(AVFilterContext *ctx, const char *args0)
+static av_cold int init(AVFilterContext *ctx)
{
PanContext *const pan = ctx->priv;
- char *arg, *arg0, *tokenizer, *args = av_strdup(args0);
+ char *arg, *arg0, *tokenizer, *args = av_strdup(pan->args);
int out_ch_id, in_ch_id, len, named, ret;
int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels
double gain;
- if (!args0) {
+ if (!pan->args) {
av_log(ctx, AV_LOG_ERROR,
"pan filter needs a channel layout and a set "
"of channels definitions as parameter\n");
@@ -112,14 +114,14 @@ static av_cold int init(AVFilterContext *ctx, const char *args0)
}
if (!args)
return AVERROR(ENOMEM);
- arg = av_strtok(args, ":", &tokenizer);
- ret = ff_parse_channel_layout(&pan->out_channel_layout, arg, ctx);
+ arg = av_strtok(args, "|", &tokenizer);
+ ret = ff_parse_channel_layout(&pan->out_channel_layout,
+ &pan->nb_output_channels, arg, ctx);
if (ret < 0)
goto fail;
- pan->nb_output_channels = av_get_channel_layout_nb_channels(pan->out_channel_layout);
/* parse channel specifications */
- while ((arg = arg0 = av_strtok(NULL, ":", &tokenizer))) {
+ while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) {
/* channel name */
if (parse_channel_name(&arg, &out_ch_id, &named)) {
av_log(ctx, AV_LOG_ERROR,
@@ -236,12 +238,14 @@ static int query_formats(AVFilterContext *ctx)
ff_set_common_samplerates(ctx, formats);
// inlink supports any channel layout
- layouts = ff_all_channel_layouts();
+ layouts = ff_all_channel_counts();
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
// outlink supports only requested output channel layout
layouts = NULL;
- ff_add_channel_layout(&layouts, pan->out_channel_layout);
+ ff_add_channel_layout(&layouts,
+ pan->out_channel_layout ? pan->out_channel_layout :
+ FF_COUNT2LAYOUT(pan->nb_output_channels));
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
return 0;
}
@@ -254,7 +258,6 @@ static int config_props(AVFilterLink *link)
int i, j, k, r;
double t;
- pan->nb_input_channels = av_get_channel_layout_nb_channels(link->channel_layout);
if (pan->need_renumber) {
// input channels were given by their name: renumber them
for (i = j = 0; i < MAX_CHANNELS; i++) {
@@ -268,7 +271,7 @@ static int config_props(AVFilterLink *link)
// sanity check; can't be done in query_formats since the inlink
// channel layout is unknown at that time
- if (pan->nb_input_channels > SWR_CH_MAX ||
+ if (link->channels > SWR_CH_MAX ||
pan->nb_output_channels > SWR_CH_MAX) {
av_log(ctx, AV_LOG_ERROR,
"libswresample support a maximum of %d channels. "
@@ -283,6 +286,10 @@ static int config_props(AVFilterLink *link)
0, ctx);
if (!pan->swr)
return AVERROR(ENOMEM);
+ if (!link->channel_layout)
+ av_opt_set_int(pan->swr, "ich", link->channels, 0);
+ if (!pan->out_channel_layout)
+ av_opt_set_int(pan->swr, "och", pan->nb_output_channels, 0);
// gains are pure, init the channel mapping
if (pan->pure_gains) {
@@ -290,7 +297,7 @@ static int config_props(AVFilterLink *link)
// get channel map from the pure gains
for (i = 0; i < pan->nb_output_channels; i++) {
int ch_id = -1;
- for (j = 0; j < pan->nb_input_channels; j++) {
+ for (j = 0; j < link->channels; j++) {
if (pan->gain[i][j]) {
ch_id = j;
break;
@@ -308,7 +315,7 @@ static int config_props(AVFilterLink *link)
if (!((pan->need_renorm >> i) & 1))
continue;
t = 0;
- for (j = 0; j < pan->nb_input_channels; j++)
+ for (j = 0; j < link->channels; j++)
t += pan->gain[i][j];
if (t > -1E-5 && t < 1E-5) {
// t is almost 0 but not exactly, this is probably a mistake
@@ -317,7 +324,7 @@ static int config_props(AVFilterLink *link)
"Degenerate coefficients while renormalizing\n");
continue;
}
- for (j = 0; j < pan->nb_input_channels; j++)
+ for (j = 0; j < link->channels; j++)
pan->gain[i][j] /= t;
}
av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
@@ -332,7 +339,7 @@ static int config_props(AVFilterLink *link)
// summary
for (i = 0; i < pan->nb_output_channels; i++) {
cur = buf;
- for (j = 0; j < pan->nb_input_channels; j++) {
+ for (j = 0; j < link->channels; j++) {
r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
j ? " + " : "", pan->gain[i][j], j);
cur += FFMIN(buf + sizeof(buf) - cur, r);
@@ -361,6 +368,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
AVFrame *outsamples = ff_get_audio_buffer(outlink, n);
PanContext *pan = inlink->dst->priv;
+ if (!outsamples)
+ return AVERROR(ENOMEM);
swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n);
av_frame_copy_props(outsamples, insamples);
outsamples->channel_layout = outlink->channel_layout;
@@ -377,6 +386,15 @@ static av_cold void uninit(AVFilterContext *ctx)
swr_free(&pan->swr);
}
+#define OFFSET(x) offsetof(PanContext, x)
+
+static const AVOption pan_options[] = {
+ { "args", NULL, OFFSET(args), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pan);
+
static const AVFilterPad pan_inputs[] = {
{
.name = "default",
@@ -395,10 +413,11 @@ static const AVFilterPad pan_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_pan = {
+AVFilter ff_af_pan = {
.name = "pan",
.description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."),
.priv_size = sizeof(PanContext),
+ .priv_class = &pan_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
diff --git a/ffmpeg/libavfilter/af_resample.c b/ffmpeg/libavfilter/af_resample.c
index f82a970..bf32aa7 100644
--- a/ffmpeg/libavfilter/af_resample.c
+++ b/ffmpeg/libavfilter/af_resample.c
@@ -1,19 +1,18 @@
/*
+ * This file is part of FFmpeg.
*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,6 +36,7 @@
#include "internal.h"
typedef struct ResampleContext {
+ const AVClass *class;
AVAudioResampleContext *avr;
AVDictionary *options;
@@ -46,26 +46,30 @@ typedef struct ResampleContext {
int got_output;
} ResampleContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx, AVDictionary **opts)
{
ResampleContext *s = ctx->priv;
+ const AVClass *avr_class = avresample_get_class();
+ AVDictionaryEntry *e = NULL;
- if (args) {
- int ret = av_dict_parse_string(&s->options, args, "=", ":", 0);
- if (ret < 0) {
- av_log(ctx, AV_LOG_ERROR, "error setting option string: %s\n", args);
- return ret;
- }
-
- /* do not allow the user to override basic format options */
- av_dict_set(&s->options, "in_channel_layout", NULL, 0);
- av_dict_set(&s->options, "out_channel_layout", NULL, 0);
- av_dict_set(&s->options, "in_sample_fmt", NULL, 0);
- av_dict_set(&s->options, "out_sample_fmt", NULL, 0);
- av_dict_set(&s->options, "in_sample_rate", NULL, 0);
- av_dict_set(&s->options, "out_sample_rate", NULL, 0);
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if (av_opt_find(&avr_class, e->key, NULL, 0,
+ AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN))
+ av_dict_set(&s->options, e->key, e->value, 0);
}
+ e = NULL;
+ while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
+ av_dict_set(opts, e->key, NULL, 0);
+
+ /* do not allow the user to override basic format options */
+ av_dict_set(&s->options, "in_channel_layout", NULL, 0);
+ av_dict_set(&s->options, "out_channel_layout", NULL, 0);
+ av_dict_set(&s->options, "in_sample_fmt", NULL, 0);
+ av_dict_set(&s->options, "out_sample_fmt", NULL, 0);
+ av_dict_set(&s->options, "in_sample_rate", NULL, 0);
+ av_dict_set(&s->options, "out_sample_rate", NULL, 0);
+
return 0;
}
@@ -272,11 +276,30 @@ fail:
return ret;
}
+static const AVClass *resample_child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : avresample_get_class();
+}
+
+static void *resample_child_next(void *obj, void *prev)
+{
+ ResampleContext *s = obj;
+ return prev ? NULL : s->avr;
+}
+
+static const AVClass resample_class = {
+ .class_name = "resample",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_class_next = resample_child_class_next,
+ .child_next = resample_child_next,
+};
+
static const AVFilterPad avfilter_af_resample_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -291,15 +314,14 @@ static const AVFilterPad avfilter_af_resample_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_resample = {
+AVFilter ff_af_resample = {
.name = "resample",
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
.priv_size = sizeof(ResampleContext),
-
- .init = init,
- .uninit = uninit,
- .query_formats = query_formats,
-
- .inputs = avfilter_af_resample_inputs,
- .outputs = avfilter_af_resample_outputs,
+ .priv_class = &resample_class,
+ .init_dict = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_resample_inputs,
+ .outputs = avfilter_af_resample_outputs,
};
diff --git a/ffmpeg/libavfilter/af_silencedetect.c b/ffmpeg/libavfilter/af_silencedetect.c
index dbd9f5f..687d2e7 100644
--- a/ffmpeg/libavfilter/af_silencedetect.c
+++ b/ffmpeg/libavfilter/af_silencedetect.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Clément Bœsch <ubitux@gmail.com>
+ * Copyright (c) 2012 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
@@ -25,7 +25,6 @@
#include <float.h> /* DBL_MAX */
-#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "audio.h"
@@ -33,13 +32,17 @@
#include "avfilter.h"
#include "internal.h"
-typedef struct {
+typedef struct SilenceDetectContext {
const AVClass *class;
double noise; ///< noise amplitude ratio
double duration; ///< minimum duration of silence until notification
int64_t nb_null_samples; ///< current number of continuous zero samples
int64_t start; ///< if silence is detected, this value contains the time of the first zero sample
int last_sample_rate; ///< last sample rate to check for sample rate changes
+
+ void (*silencedetect)(struct SilenceDetectContext *s, AVFrame *insamples,
+ int nb_samples, int64_t nb_samples_notify,
+ AVRational time_base);
} SilenceDetectContext;
#define OFFSET(x) offsetof(SilenceDetectContext, x)
@@ -49,80 +52,103 @@ static const AVOption silencedetect_options[] = {
{ "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
{ "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
{ "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(silencedetect);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static char *get_metadata_val(AVFrame *insamples, const char *key)
{
- int ret;
- SilenceDetectContext *silence = ctx->priv;
-
- silence->class = &silencedetect_class;
- av_opt_set_defaults(silence);
-
- if ((ret = av_set_options_string(silence, args, "=", ":")) < 0)
- return ret;
+ AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0);
+ return e && e->value ? e->value : NULL;
+}
- av_opt_free(silence);
+static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples,
+ int is_silence, int64_t nb_samples_notify,
+ AVRational time_base)
+{
+ if (is_silence) {
+ if (!s->start) {
+ s->nb_null_samples++;
+ if (s->nb_null_samples >= nb_samples_notify) {
+ s->start = insamples->pts - (int64_t)(s->duration / av_q2d(time_base) + .5);
+ av_dict_set(&insamples->metadata, "lavfi.silence_start",
+ av_ts2timestr(s->start, &time_base), 0);
+ av_log(s, AV_LOG_INFO, "silence_start: %s\n",
+ get_metadata_val(insamples, "lavfi.silence_start"));
+ }
+ }
+ } else {
+ if (s->start) {
+ av_dict_set(&insamples->metadata, "lavfi.silence_end",
+ av_ts2timestr(insamples->pts, &time_base), 0);
+ av_dict_set(&insamples->metadata, "lavfi.silence_duration",
+ av_ts2timestr(insamples->pts - s->start, &time_base), 0);
+ av_log(s, AV_LOG_INFO,
+ "silence_end: %s | silence_duration: %s\n",
+ get_metadata_val(insamples, "lavfi.silence_end"),
+ get_metadata_val(insamples, "lavfi.silence_duration"));
+ }
+ s->nb_null_samples = s->start = 0;
+ }
+}
- return 0;
+#define SILENCE_DETECT(name, type) \
+static void silencedetect_##name(SilenceDetectContext *s, AVFrame *insamples, \
+ int nb_samples, int64_t nb_samples_notify, \
+ AVRational time_base) \
+{ \
+ const type *p = (const type *)insamples->data[0]; \
+ const type noise = s->noise; \
+ int i; \
+ \
+ for (i = 0; i < nb_samples; i++, p++) \
+ update(s, insamples, *p < noise && *p > -noise, \
+ nb_samples_notify, time_base); \
}
-static char *get_metadata_val(AVFrame *insamples, const char *key)
+SILENCE_DETECT(dbl, double)
+SILENCE_DETECT(flt, float)
+SILENCE_DETECT(s32, int32_t)
+SILENCE_DETECT(s16, int16_t)
+
+static int config_input(AVFilterLink *inlink)
{
- AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0);
- return e && e->value ? e->value : NULL;
+ AVFilterContext *ctx = inlink->dst;
+ SilenceDetectContext *s = ctx->priv;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->silencedetect = silencedetect_dbl; break;
+ case AV_SAMPLE_FMT_FLT: s->silencedetect = silencedetect_flt; break;
+ case AV_SAMPLE_FMT_S32:
+ s->noise *= INT32_MAX;
+ s->silencedetect = silencedetect_s32;
+ break;
+ case AV_SAMPLE_FMT_S16:
+ s->noise *= INT16_MAX;
+ s->silencedetect = silencedetect_s16;
+ break;
+ }
+
+ return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
- int i;
- SilenceDetectContext *silence = inlink->dst->priv;
- const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ SilenceDetectContext *s = inlink->dst->priv;
+ const int nb_channels = inlink->channels;
const int srate = inlink->sample_rate;
const int nb_samples = insamples->nb_samples * nb_channels;
- const int64_t nb_samples_notify = srate * silence->duration * nb_channels;
+ const int64_t nb_samples_notify = srate * s->duration * nb_channels;
// scale number of null samples to the new sample rate
- if (silence->last_sample_rate && silence->last_sample_rate != srate)
- silence->nb_null_samples =
- srate * silence->nb_null_samples / silence->last_sample_rate;
- silence->last_sample_rate = srate;
+ if (s->last_sample_rate && s->last_sample_rate != srate)
+ s->nb_null_samples = srate * s->nb_null_samples / s->last_sample_rate;
+ s->last_sample_rate = srate;
- // TODO: support more sample formats
// TODO: document metadata
- if (insamples->format == AV_SAMPLE_FMT_DBL) {
- double *p = (double *)insamples->data[0];
-
- for (i = 0; i < nb_samples; i++, p++) {
- if (*p < silence->noise && *p > -silence->noise) {
- if (!silence->start) {
- silence->nb_null_samples++;
- if (silence->nb_null_samples >= nb_samples_notify) {
- silence->start = insamples->pts - (int64_t)(silence->duration / av_q2d(inlink->time_base) + .5);
- av_dict_set(&insamples->metadata, "lavfi.silence_start",
- av_ts2timestr(silence->start, &inlink->time_base), 0);
- av_log(silence, AV_LOG_INFO, "silence_start: %s\n",
- get_metadata_val(insamples, "lavfi.silence_start"));
- }
- }
- } else {
- if (silence->start) {
- av_dict_set(&insamples->metadata, "lavfi.silence_end",
- av_ts2timestr(insamples->pts, &inlink->time_base), 0);
- av_dict_set(&insamples->metadata, "lavfi.silence_duration",
- av_ts2timestr(insamples->pts - silence->start, &inlink->time_base), 0);
- av_log(silence, AV_LOG_INFO,
- "silence_end: %s | silence_duration: %s\n",
- get_metadata_val(insamples, "lavfi.silence_end"),
- get_metadata_val(insamples, "lavfi.silence_duration"));
- }
- silence->nb_null_samples = silence->start = 0;
- }
- }
- }
+ s->silencedetect(s, insamples, nb_samples, nb_samples_notify,
+ inlink->time_base);
return ff_filter_frame(inlink->dst->outputs[0], insamples);
}
@@ -133,6 +159,9 @@ static int query_formats(AVFilterContext *ctx)
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE
};
@@ -156,10 +185,10 @@ static int query_formats(AVFilterContext *ctx)
static const AVFilterPad silencedetect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -172,11 +201,10 @@ static const AVFilterPad silencedetect_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_silencedetect = {
+AVFilter ff_af_silencedetect = {
.name = "silencedetect",
.description = NULL_IF_CONFIG_SMALL("Detect silence."),
.priv_size = sizeof(SilenceDetectContext),
- .init = init,
.query_formats = query_formats,
.inputs = silencedetect_inputs,
.outputs = silencedetect_outputs,
diff --git a/ffmpeg/libavfilter/af_volume.c b/ffmpeg/libavfilter/af_volume.c
index 447e8d5..269a2a5 100644
--- a/ffmpeg/libavfilter/af_volume.c
+++ b/ffmpeg/libavfilter/af_volume.c
@@ -39,49 +39,82 @@ static const char *precision_str[] = {
"fixed", "float", "double"
};
+static const char *const var_names[] = {
+ "n", ///< frame number (starting at zero)
+ "nb_channels", ///< number of channels
+ "nb_consumed_samples", ///< number of samples consumed by the filter
+ "nb_samples", ///< number of samples in the current frame
+ "pos", ///< position in the file of the frame
+ "pts", ///< frame presentation timestamp
+ "sample_rate", ///< sample rate
+ "startpts", ///< PTS at start of stream
+ "startt", ///< time at start of stream
+ "t", ///< time in the file of the frame
+ "tb", ///< timebase
+ "volume", ///< last set value
+ NULL
+};
+
#define OFFSET(x) offsetof(VolumeContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption volume_options[] = {
- { "volume", "set volume adjustment",
- OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A|F },
+ { "volume", "set volume adjustment expression",
+ OFFSET(volume_expr), AV_OPT_TYPE_STRING, { .str = "1.0" }, .flags = A|F },
{ "precision", "select mathematical precision",
OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" },
{ "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" },
{ "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" },
{ "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" },
- { NULL },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_ONCE}, 0, EVAL_MODE_NB-1, .flags = A|F, "eval" },
+ { "once", "eval volume expression once", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_ONCE}, .flags = A|F, .unit = "eval" },
+ { "frame", "eval volume expression per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = A|F, .unit = "eval" },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(volume);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static int set_expr(AVExpr **pexpr, const char *expr, void *log_ctx)
{
- VolumeContext *vol = ctx->priv;
-
- if (vol->precision == PRECISION_FIXED) {
- vol->volume_i = (int)(vol->volume * 256 + 0.5);
- vol->volume = vol->volume_i / 256.0;
- av_log(ctx, AV_LOG_VERBOSE, "volume:(%d/256)(%f)(%1.2fdB) precision:fixed\n",
- vol->volume_i, vol->volume, 20.0*log(vol->volume)/M_LN10);
- } else {
- av_log(ctx, AV_LOG_VERBOSE, "volume:(%f)(%1.2fdB) precision:%s\n",
- vol->volume, 20.0*log(vol->volume)/M_LN10,
- precision_str[vol->precision]);
+ int ret;
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the volume expression '%s'\n", expr);
+ *pexpr = old;
+ return ret;
}
+ av_expr_free(old);
return 0;
}
+static av_cold int init(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+ return set_expr(&vol->volume_pexpr, vol->volume_expr, ctx);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+ av_expr_free(vol->volume_pexpr);
+ av_opt_free(vol);
+}
+
static int query_formats(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[][7] = {
- /* PRECISION_FIXED */
- {
+ [PRECISION_FIXED] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16,
@@ -90,21 +123,19 @@ static int query_formats(AVFilterContext *ctx)
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE
},
- /* PRECISION_FLOAT */
- {
+ [PRECISION_FLOAT] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
},
- /* PRECISION_DOUBLE */
- {
+ [PRECISION_DOUBLE] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
}
};
- layouts = ff_all_channel_layouts();
+ layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
@@ -168,7 +199,7 @@ static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src,
smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8));
}
-static void volume_init(VolumeContext *vol)
+static av_cold void volume_init(VolumeContext *vol)
{
vol->samples_align = 1;
@@ -202,6 +233,38 @@ static void volume_init(VolumeContext *vol)
ff_volume_init_x86(vol);
}
+static int set_volume(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+
+ vol->volume = av_expr_eval(vol->volume_pexpr, vol->var_values, NULL);
+ if (isnan(vol->volume)) {
+ if (vol->eval_mode == EVAL_MODE_ONCE) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid value NaN for volume\n");
+ return AVERROR(EINVAL);
+ } else {
+ av_log(ctx, AV_LOG_WARNING, "Invalid value NaN for volume, setting to 0\n");
+ vol->volume = 0;
+ }
+ }
+ vol->var_values[VAR_VOLUME] = vol->volume;
+
+ av_log(ctx, AV_LOG_VERBOSE, "n:%f t:%f pts:%f precision:%s ",
+ vol->var_values[VAR_N], vol->var_values[VAR_T], vol->var_values[VAR_PTS],
+ precision_str[vol->precision]);
+
+ if (vol->precision == PRECISION_FIXED) {
+ vol->volume_i = (int)(vol->volume * 256 + 0.5);
+ vol->volume = vol->volume_i / 256.0;
+ av_log(ctx, AV_LOG_VERBOSE, "volume_i:%d/255 ", vol->volume_i);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "volume:%f volume_dB:%f\n",
+ vol->volume, 20.0*log(vol->volume)/M_LN10);
+
+ volume_init(vol);
+ return 0;
+}
+
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -209,23 +272,77 @@ static int config_output(AVFilterLink *outlink)
AVFilterLink *inlink = ctx->inputs[0];
vol->sample_fmt = inlink->format;
- vol->channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ vol->channels = inlink->channels;
vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels : 1;
- volume_init(vol);
+ vol->var_values[VAR_N] =
+ vol->var_values[VAR_NB_CONSUMED_SAMPLES] =
+ vol->var_values[VAR_NB_SAMPLES] =
+ vol->var_values[VAR_POS] =
+ vol->var_values[VAR_PTS] =
+ vol->var_values[VAR_STARTPTS] =
+ vol->var_values[VAR_STARTT] =
+ vol->var_values[VAR_T] =
+ vol->var_values[VAR_VOLUME] = NAN;
+
+ vol->var_values[VAR_NB_CHANNELS] = inlink->channels;
+ vol->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ vol->var_values[VAR_SAMPLE_RATE] = inlink->sample_rate;
+
+ av_log(inlink->src, AV_LOG_VERBOSE, "tb:%f sample_rate:%f nb_channels:%f\n",
+ vol->var_values[VAR_TB],
+ vol->var_values[VAR_SAMPLE_RATE],
+ vol->var_values[VAR_NB_CHANNELS]);
+
+ return set_volume(ctx);
+}
- return 0;
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ VolumeContext *vol = ctx->priv;
+ int ret = AVERROR(ENOSYS);
+
+ if (!strcmp(cmd, "volume")) {
+ if ((ret = set_expr(&vol->volume_pexpr, args, ctx)) < 0)
+ return ret;
+ if (vol->eval_mode == EVAL_MODE_ONCE)
+ set_volume(ctx);
+ }
+
+ return ret;
}
+#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
+ AVFilterContext *ctx = inlink->dst;
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
+ int64_t pos;
+
+ if (isnan(vol->var_values[VAR_STARTPTS])) {
+ vol->var_values[VAR_STARTPTS] = TS2D(buf->pts);
+ vol->var_values[VAR_STARTT ] = TS2T(buf->pts, inlink->time_base);
+ }
+ vol->var_values[VAR_PTS] = TS2D(buf->pts);
+ vol->var_values[VAR_T ] = TS2T(buf->pts, inlink->time_base);
+ vol->var_values[VAR_N ] = inlink->frame_count;
- if (vol->volume == 1.0 || vol->volume_i == 256)
- return ff_filter_frame(outlink, buf);
+ pos = av_frame_get_pkt_pos(buf);
+ vol->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+ if (vol->eval_mode == EVAL_MODE_FRAME)
+ set_volume(ctx);
+
+ if (vol->volume == 1.0 || vol->volume_i == 256) {
+ out_buf = buf;
+ goto end;
+ }
/* do volume scaling in-place if input buffer is writable */
if (av_frame_is_writable(buf)) {
@@ -234,7 +351,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
- out_buf->pts = buf->pts;
+ av_frame_copy_props(out_buf, buf);
}
if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) {
@@ -269,6 +386,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (buf != out_buf)
av_frame_free(&buf);
+end:
+ vol->var_values[VAR_NB_CONSUMED_SAMPLES] += out_buf->nb_samples;
return ff_filter_frame(outlink, out_buf);
}
@@ -290,16 +409,16 @@ static const AVFilterPad avfilter_af_volume_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "volume", "precision", NULL };
-
-AVFilter avfilter_af_volume = {
+AVFilter ff_af_volume = {
.name = "volume",
.description = NULL_IF_CONFIG_SMALL("Change input volume."),
.query_formats = query_formats,
.priv_size = sizeof(VolumeContext),
+ .priv_class = &volume_class,
.init = init,
+ .uninit = uninit,
.inputs = avfilter_af_volume_inputs,
.outputs = avfilter_af_volume_outputs,
- .priv_class = &volume_class,
- .shorthand = shorthand,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+ .process_command = process_command,
};
diff --git a/ffmpeg/libavfilter/af_volume.h b/ffmpeg/libavfilter/af_volume.h
index bd7932e..10ef6fb 100644
--- a/ffmpeg/libavfilter/af_volume.h
+++ b/ffmpeg/libavfilter/af_volume.h
@@ -25,6 +25,7 @@
#define AVFILTER_AF_VOLUME_H
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
@@ -35,10 +36,37 @@ enum PrecisionType {
PRECISION_DOUBLE,
};
+enum EvalMode {
+ EVAL_MODE_ONCE,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
+enum VolumeVarName {
+ VAR_N,
+ VAR_NB_CHANNELS,
+ VAR_NB_CONSUMED_SAMPLES,
+ VAR_NB_SAMPLES,
+ VAR_POS,
+ VAR_PTS,
+ VAR_SAMPLE_RATE,
+ VAR_STARTPTS,
+ VAR_STARTT,
+ VAR_T,
+ VAR_TB,
+ VAR_VOLUME,
+ VAR_VARS_NB
+};
+
typedef struct VolumeContext {
const AVClass *class;
AVFloatDSPContext fdsp;
enum PrecisionType precision;
+ enum EvalMode eval_mode;
+ const char *volume_expr;
+ AVExpr *volume_pexpr;
+ double var_values[VAR_VARS_NB];
+
double volume;
int volume_i;
int channels;
diff --git a/ffmpeg/libavfilter/af_volumedetect.c b/ffmpeg/libavfilter/af_volumedetect.c
index 79d992e..5de115e 100644
--- a/ffmpeg/libavfilter/af_volumedetect.c
+++ b/ffmpeg/libavfilter/af_volumedetect.c
@@ -126,17 +126,16 @@ static void print_stats(AVFilterContext *ctx)
}
}
-static void uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
print_stats(ctx);
}
static const AVFilterPad volumedetect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -149,10 +148,9 @@ static const AVFilterPad volumedetect_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_volumedetect = {
+AVFilter ff_af_volumedetect = {
.name = "volumedetect",
.description = NULL_IF_CONFIG_SMALL("Detect audio volume."),
-
.priv_size = sizeof(VolDetectContext),
.query_formats = query_formats,
.uninit = uninit,
diff --git a/ffmpeg/libavfilter/allfilters.c b/ffmpeg/libavfilter/allfilters.c
index 45a67e5..d58e8cc 100644
--- a/ffmpeg/libavfilter/allfilters.c
+++ b/ffmpeg/libavfilter/allfilters.c
@@ -21,19 +21,20 @@
#include "avfilter.h"
#include "config.h"
+#include "opencl_allkernels.h"
#define REGISTER_FILTER(X, x, y) \
{ \
- extern AVFilter avfilter_##y##_##x; \
+ extern AVFilter ff_##y##_##x; \
if (CONFIG_##X##_FILTER) \
- avfilter_register(&avfilter_##y##_##x); \
+ avfilter_register(&ff_##y##_##x); \
}
#define REGISTER_FILTER_UNCONDITIONAL(x) \
{ \
- extern AVFilter avfilter_##x; \
- avfilter_register(&avfilter_##x); \
+ extern AVFilter ff_##x; \
+ avfilter_register(&ff_##x); \
}
void avfilter_register_all(void)
@@ -44,39 +45,53 @@ void avfilter_register_all(void)
return;
initialized = 1;
+#if FF_API_ACONVERT_FILTER
REGISTER_FILTER(ACONVERT, aconvert, af);
+#endif
+ REGISTER_FILTER(ADELAY, adelay, af);
+ REGISTER_FILTER(AECHO, aecho, af);
+ REGISTER_FILTER(AEVAL, aeval, af);
REGISTER_FILTER(AFADE, afade, af);
REGISTER_FILTER(AFORMAT, aformat, af);
+ REGISTER_FILTER(AINTERLEAVE, ainterleave, af);
REGISTER_FILTER(ALLPASS, allpass, af);
REGISTER_FILTER(AMERGE, amerge, af);
REGISTER_FILTER(AMIX, amix, af);
REGISTER_FILTER(ANULL, anull, af);
REGISTER_FILTER(APAD, apad, af);
REGISTER_FILTER(APERMS, aperms, af);
+ REGISTER_FILTER(APHASER, aphaser, af);
REGISTER_FILTER(ARESAMPLE, aresample, af);
REGISTER_FILTER(ASELECT, aselect, af);
REGISTER_FILTER(ASENDCMD, asendcmd, af);
REGISTER_FILTER(ASETNSAMPLES, asetnsamples, af);
REGISTER_FILTER(ASETPTS, asetpts, af);
+ REGISTER_FILTER(ASETRATE, asetrate, af);
REGISTER_FILTER(ASETTB, asettb, af);
REGISTER_FILTER(ASHOWINFO, ashowinfo, af);
REGISTER_FILTER(ASPLIT, asplit, af);
+ REGISTER_FILTER(ASTATS, astats, af);
REGISTER_FILTER(ASTREAMSYNC, astreamsync, af);
REGISTER_FILTER(ASYNCTS, asyncts, af);
REGISTER_FILTER(ATEMPO, atempo, af);
+ REGISTER_FILTER(ATRIM, atrim, af);
+ REGISTER_FILTER(AZMQ, azmq, af);
REGISTER_FILTER(BANDPASS, bandpass, af);
REGISTER_FILTER(BANDREJECT, bandreject, af);
REGISTER_FILTER(BASS, bass, af);
REGISTER_FILTER(BIQUAD, biquad, af);
REGISTER_FILTER(CHANNELMAP, channelmap, af);
REGISTER_FILTER(CHANNELSPLIT, channelsplit, af);
+ REGISTER_FILTER(COMPAND, compand, af);
REGISTER_FILTER(EARWAX, earwax, af);
REGISTER_FILTER(EBUR128, ebur128, af);
REGISTER_FILTER(EQUALIZER, equalizer, af);
REGISTER_FILTER(HIGHPASS, highpass, af);
REGISTER_FILTER(JOIN, join, af);
+ REGISTER_FILTER(LADSPA, ladspa, af);
REGISTER_FILTER(LOWPASS, lowpass, af);
REGISTER_FILTER(PAN, pan, af);
+ REGISTER_FILTER(REPLAYGAIN, replaygain, af);
REGISTER_FILTER(RESAMPLE, resample, af);
REGISTER_FILTER(SILENCEDETECT, silencedetect, af);
REGISTER_FILTER(TREBLE, treble, af);
@@ -98,19 +113,26 @@ void avfilter_register_all(void)
REGISTER_FILTER(BLACKFRAME, blackframe, vf);
REGISTER_FILTER(BLEND, blend, vf);
REGISTER_FILTER(BOXBLUR, boxblur, vf);
+ REGISTER_FILTER(COLORBALANCE, colorbalance, vf);
+ REGISTER_FILTER(COLORCHANNELMIXER, colorchannelmixer, vf);
REGISTER_FILTER(COLORMATRIX, colormatrix, vf);
REGISTER_FILTER(COPY, copy, vf);
REGISTER_FILTER(CROP, crop, vf);
REGISTER_FILTER(CROPDETECT, cropdetect, vf);
REGISTER_FILTER(CURVES, curves, vf);
+ REGISTER_FILTER(DCTDNOIZ, dctdnoiz, vf);
REGISTER_FILTER(DECIMATE, decimate, vf);
REGISTER_FILTER(DELOGO, delogo, vf);
REGISTER_FILTER(DESHAKE, deshake, vf);
REGISTER_FILTER(DRAWBOX, drawbox, vf);
+ REGISTER_FILTER(DRAWGRID, drawgrid, vf);
REGISTER_FILTER(DRAWTEXT, drawtext, vf);
REGISTER_FILTER(EDGEDETECT, edgedetect, vf);
+ REGISTER_FILTER(ELBG, elbg, vf);
+ REGISTER_FILTER(EXTRACTPLANES, extractplanes, vf);
REGISTER_FILTER(FADE, fade, vf);
REGISTER_FILTER(FIELD, field, vf);
+ REGISTER_FILTER(FIELDMATCH, fieldmatch, vf);
REGISTER_FILTER(FIELDORDER, fieldorder, vf);
REGISTER_FILTER(FORMAT, format, vf);
REGISTER_FILTER(FPS, fps, vf);
@@ -118,6 +140,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(FREI0R, frei0r, vf);
REGISTER_FILTER(GEQ, geq, vf);
REGISTER_FILTER(GRADFUN, gradfun, vf);
+ REGISTER_FILTER(HALDCLUT, haldclut, vf);
REGISTER_FILTER(HFLIP, hflip, vf);
REGISTER_FILTER(HISTEQ, histeq, vf);
REGISTER_FILTER(HISTOGRAM, histogram, vf);
@@ -125,25 +148,39 @@ void avfilter_register_all(void)
REGISTER_FILTER(HUE, hue, vf);
REGISTER_FILTER(IDET, idet, vf);
REGISTER_FILTER(IL, il, vf);
+ REGISTER_FILTER(INTERLACE, interlace, vf);
+ REGISTER_FILTER(INTERLEAVE, interleave, vf);
REGISTER_FILTER(KERNDEINT, kerndeint, vf);
+ REGISTER_FILTER(LUT3D, lut3d, vf);
REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);
+ REGISTER_FILTER(MCDEINT, mcdeint, vf);
+ REGISTER_FILTER(MERGEPLANES, mergeplanes, vf);
REGISTER_FILTER(MP, mp, vf);
+ REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf);
REGISTER_FILTER(NOFORMAT, noformat, vf);
REGISTER_FILTER(NOISE, noise, vf);
REGISTER_FILTER(NULL, null, vf);
REGISTER_FILTER(OCV, ocv, vf);
REGISTER_FILTER(OVERLAY, overlay, vf);
+ REGISTER_FILTER(OWDENOISE, owdenoise, vf);
REGISTER_FILTER(PAD, pad, vf);
REGISTER_FILTER(PERMS, perms, vf);
+ REGISTER_FILTER(PERSPECTIVE, perspective, vf);
+ REGISTER_FILTER(PHASE, phase, vf);
REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf);
REGISTER_FILTER(PP, pp, vf);
+ REGISTER_FILTER(PSNR, psnr, vf);
+ REGISTER_FILTER(PULLUP, pullup, vf);
REGISTER_FILTER(REMOVELOGO, removelogo, vf);
+ REGISTER_FILTER(ROTATE, rotate, vf);
+ REGISTER_FILTER(SAB, sab, vf);
REGISTER_FILTER(SCALE, scale, vf);
REGISTER_FILTER(SELECT, select, vf);
REGISTER_FILTER(SENDCMD, sendcmd, vf);
+ REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf);
REGISTER_FILTER(SETDAR, setdar, vf);
REGISTER_FILTER(SETFIELD, setfield, vf);
REGISTER_FILTER(SETPTS, setpts, vf);
@@ -152,32 +189,43 @@ void avfilter_register_all(void)
REGISTER_FILTER(SHOWINFO, showinfo, vf);
REGISTER_FILTER(SMARTBLUR, smartblur, vf);
REGISTER_FILTER(SPLIT, split, vf);
+ REGISTER_FILTER(SPP, spp, vf);
REGISTER_FILTER(STEREO3D, stereo3d, vf);
REGISTER_FILTER(SUBTITLES, subtitles, vf);
REGISTER_FILTER(SUPER2XSAI, super2xsai, vf);
REGISTER_FILTER(SWAPUV, swapuv, vf);
+ REGISTER_FILTER(TELECINE, telecine, vf);
REGISTER_FILTER(THUMBNAIL, thumbnail, vf);
REGISTER_FILTER(TILE, tile, vf);
REGISTER_FILTER(TINTERLACE, tinterlace, vf);
REGISTER_FILTER(TRANSPOSE, transpose, vf);
+ REGISTER_FILTER(TRIM, trim, vf);
REGISTER_FILTER(UNSHARP, unsharp, vf);
REGISTER_FILTER(VFLIP, vflip, vf);
+ REGISTER_FILTER(VIDSTABDETECT, vidstabdetect, vf);
+ REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
+ REGISTER_FILTER(VIGNETTE, vignette, vf);
+ REGISTER_FILTER(W3FDIF, w3fdif, vf);
REGISTER_FILTER(YADIF, yadif, vf);
+ REGISTER_FILTER(ZMQ, zmq, vf);
REGISTER_FILTER(CELLAUTO, cellauto, vsrc);
REGISTER_FILTER(COLOR, color, vsrc);
REGISTER_FILTER(FREI0R, frei0r_src, vsrc);
+ REGISTER_FILTER(HALDCLUTSRC, haldclutsrc, vsrc);
REGISTER_FILTER(LIFE, life, vsrc);
REGISTER_FILTER(MANDELBROT, mandelbrot, vsrc);
REGISTER_FILTER(MPTESTSRC, mptestsrc, vsrc);
REGISTER_FILTER(NULLSRC, nullsrc, vsrc);
REGISTER_FILTER(RGBTESTSRC, rgbtestsrc, vsrc);
REGISTER_FILTER(SMPTEBARS, smptebars, vsrc);
+ REGISTER_FILTER(SMPTEHDBARS, smptehdbars, vsrc);
REGISTER_FILTER(TESTSRC, testsrc, vsrc);
REGISTER_FILTER(NULLSINK, nullsink, vsink);
/* multimedia filters */
+ REGISTER_FILTER(AVECTORSCOPE, avectorscope, avf);
REGISTER_FILTER(CONCAT, concat, avf);
REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf);
REGISTER_FILTER(SHOWWAVES, showwaves, avf);
@@ -199,4 +247,5 @@ void avfilter_register_all(void)
REGISTER_FILTER_UNCONDITIONAL(vsink_buffer);
REGISTER_FILTER_UNCONDITIONAL(af_afifo);
REGISTER_FILTER_UNCONDITIONAL(vf_fifo);
+ ff_opencl_register_filter_kernel_code_all();
}
diff --git a/ffmpeg/libavfilter/asink_anullsink.c b/ffmpeg/libavfilter/asink_anullsink.c
index 8015da2..9b53d3f 100644
--- a/ffmpeg/libavfilter/asink_anullsink.c
+++ b/ffmpeg/libavfilter/asink_anullsink.c
@@ -37,7 +37,7 @@ static const AVFilterPad avfilter_asink_anullsink_inputs[] = {
{ NULL },
};
-AVFilter avfilter_asink_anullsink = {
+AVFilter ff_asink_anullsink = {
.name = "anullsink",
.description = NULL_IF_CONFIG_SMALL("Do absolutely nothing with the input audio."),
diff --git a/ffmpeg/libavfilter/asrc_aevalsrc.c b/ffmpeg/libavfilter/asrc_aevalsrc.c
deleted file mode 100644
index 409399f..0000000
--- a/ffmpeg/libavfilter/asrc_aevalsrc.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2011 Stefano Sabatini
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * eval audio source
- */
-
-#include "libavutil/avassert.h"
-#include "libavutil/avstring.h"
-#include "libavutil/channel_layout.h"
-#include "libavutil/eval.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "avfilter.h"
-#include "audio.h"
-#include "internal.h"
-
-static const char * const var_names[] = {
- "n", ///< number of frame
- "t", ///< timestamp expressed in seconds
- "s", ///< sample rate
- NULL
-};
-
-enum var_name {
- VAR_N,
- VAR_T,
- VAR_S,
- VAR_VARS_NB
-};
-
-typedef struct {
- const AVClass *class;
- char *sample_rate_str;
- int sample_rate;
- int64_t chlayout;
- char *chlayout_str;
- int nb_channels;
- int64_t pts;
- AVExpr *expr[8];
- char *expr_str[8];
- int nb_samples; ///< number of samples per requested frame
- char *duration_str; ///< total duration of the generated audio
- double duration;
- uint64_t n;
- double var_values[VAR_VARS_NB];
-} EvalContext;
-
-#define OFFSET(x) offsetof(EvalContext, x)
-#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-static const AVOption aevalsrc_options[]= {
- { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
- { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
- { "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "duration", "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "d", "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
-{NULL},
-};
-
-AVFILTER_DEFINE_CLASS(aevalsrc);
-
-static int init(AVFilterContext *ctx, const char *args)
-{
- EvalContext *eval = ctx->priv;
- char *args1 = av_strdup(args);
- char *expr, *buf, *bufptr;
- int ret, i;
-
- eval->class = &aevalsrc_class;
- av_opt_set_defaults(eval);
-
- if (!args1) {
- av_log(ctx, AV_LOG_ERROR, "Argument is empty\n");
- ret = args ? AVERROR(ENOMEM) : AVERROR(EINVAL);
- goto end;
- }
-
- /* parse expressions */
- buf = args1;
- i = 0;
- while (expr = av_strtok(buf, ":", &bufptr)) {
- ret = av_expr_parse(&eval->expr[i], expr, var_names,
- NULL, NULL, NULL, NULL, 0, ctx);
- if (ret < 0)
- goto end;
- i++;
- if (bufptr && *bufptr == ':') { /* found last expression */
- bufptr++;
- break;
- }
- buf = NULL;
- }
- eval->nb_channels = i;
-
- if (bufptr && (ret = av_set_options_string(eval, bufptr, "=", ":")) < 0)
- goto end;
-
- if (eval->chlayout_str) {
- int n;
- ret = ff_parse_channel_layout(&eval->chlayout, eval->chlayout_str, ctx);
- if (ret < 0)
- goto end;
-
- n = av_get_channel_layout_nb_channels(eval->chlayout);
- if (n != eval->nb_channels) {
- av_log(ctx, AV_LOG_ERROR,
- "Mismatch between the specified number of channels '%d' "
- "and the number of channels '%d' in the specified channel layout '%s'\n",
- eval->nb_channels, n, eval->chlayout_str);
- ret = AVERROR(EINVAL);
- goto end;
- }
- } else {
- /* guess channel layout from nb expressions/channels */
- eval->chlayout = av_get_default_channel_layout(eval->nb_channels);
- if (!eval->chlayout) {
- av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n",
- eval->nb_channels);
- ret = AVERROR(EINVAL);
- goto end;
- }
- }
-
- if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx)))
- goto end;
-
- eval->duration = -1;
- if (eval->duration_str) {
- int64_t us = -1;
- if ((ret = av_parse_time(&us, eval->duration_str, 1)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", eval->duration_str);
- goto end;
- }
- eval->duration = (double)us / 1000000;
- }
- eval->n = 0;
-
-end:
- av_free(args1);
- return ret;
-}
-
-static void uninit(AVFilterContext *ctx)
-{
- EvalContext *eval = ctx->priv;
- int i;
-
- for (i = 0; i < 8; i++) {
- av_expr_free(eval->expr[i]);
- eval->expr[i] = NULL;
- }
- av_freep(&eval->chlayout_str);
- av_freep(&eval->duration_str);
- av_freep(&eval->sample_rate_str);
-}
-
-static int config_props(AVFilterLink *outlink)
-{
- EvalContext *eval = outlink->src->priv;
- char buf[128];
-
- outlink->time_base = (AVRational){1, eval->sample_rate};
- outlink->sample_rate = eval->sample_rate;
-
- eval->var_values[VAR_S] = eval->sample_rate;
-
- av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout);
-
- av_log(outlink->src, AV_LOG_VERBOSE,
- "sample_rate:%d chlayout:%s duration:%f\n",
- eval->sample_rate, buf, eval->duration);
-
- return 0;
-}
-
-static int query_formats(AVFilterContext *ctx)
-{
- EvalContext *eval = ctx->priv;
- static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE };
- int64_t chlayouts[] = { eval->chlayout, -1 };
- int sample_rates[] = { eval->sample_rate, -1 };
-
- ff_set_common_formats (ctx, ff_make_format_list(sample_fmts));
- ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
- ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
-
- return 0;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- EvalContext *eval = outlink->src->priv;
- AVFrame *samplesref;
- int i, j;
- double t = eval->n * (double)1/eval->sample_rate;
-
- if (eval->duration >= 0 && t >= eval->duration)
- return AVERROR_EOF;
-
- samplesref = ff_get_audio_buffer(outlink, eval->nb_samples);
-
- /* evaluate expression for each single sample and for each channel */
- for (i = 0; i < eval->nb_samples; i++, eval->n++) {
- eval->var_values[VAR_N] = eval->n;
- eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
-
- for (j = 0; j < eval->nb_channels; j++) {
- *((double *) samplesref->extended_data[j] + i) =
- av_expr_eval(eval->expr[j], eval->var_values, NULL);
- }
- }
-
- samplesref->pts = eval->pts;
- samplesref->sample_rate = eval->sample_rate;
- eval->pts += eval->nb_samples;
-
- return ff_filter_frame(outlink, samplesref);
-}
-
-static const AVFilterPad aevalsrc_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .config_props = config_props,
- .request_frame = request_frame,
- },
- { NULL }
-};
-
-AVFilter avfilter_asrc_aevalsrc = {
- .name = "aevalsrc",
- .description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
-
- .query_formats = query_formats,
- .init = init,
- .uninit = uninit,
- .priv_size = sizeof(EvalContext),
- .inputs = NULL,
- .outputs = aevalsrc_outputs,
- .priv_class = &aevalsrc_class,
-};
diff --git a/ffmpeg/libavfilter/asrc_anullsrc.c b/ffmpeg/libavfilter/asrc_anullsrc.c
index f8e6ac5..28d4500 100644
--- a/ffmpeg/libavfilter/asrc_anullsrc.c
+++ b/ffmpeg/libavfilter/asrc_anullsrc.c
@@ -54,44 +54,46 @@ static const AVOption anullsrc_options[]= {
{ "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(anullsrc);
-static int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
ANullContext *null = ctx->priv;
int ret;
- null->class = &anullsrc_class;
- av_opt_set_defaults(null);
-
- if ((ret = (av_set_options_string(null, args, "=", ":"))) < 0)
- return ret;
-
if ((ret = ff_parse_sample_rate(&null->sample_rate,
null->sample_rate_str, ctx)) < 0)
return ret;
- if ((ret = ff_parse_channel_layout(&null->channel_layout,
+ if ((ret = ff_parse_channel_layout(&null->channel_layout, NULL,
null->channel_layout_str, ctx)) < 0)
return ret;
return 0;
}
+static int query_formats(AVFilterContext *ctx)
+{
+ ANullContext *null = ctx->priv;
+ int64_t chlayouts[] = { null->channel_layout, -1 };
+ int sample_rates[] = { null->sample_rate, -1 };
+
+ ff_set_common_formats (ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
+ ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
+ ff_set_common_samplerates (ctx, ff_make_format_list(sample_rates));
+
+ return 0;
+}
+
static int config_props(AVFilterLink *outlink)
{
ANullContext *null = outlink->src->priv;
char buf[128];
- int chans_nb;
- outlink->sample_rate = null->sample_rate;
- outlink->channel_layout = null->channel_layout;
-
- chans_nb = av_get_channel_layout_nb_channels(null->channel_layout);
- av_get_channel_layout_string(buf, sizeof(buf), chans_nb, null->channel_layout);
+ av_get_channel_layout_string(buf, sizeof(buf), 0, null->channel_layout);
av_log(outlink->src, AV_LOG_VERBOSE,
"sample_rate:%d channel_layout:'%s' nb_samples:%d\n",
null->sample_rate, buf, null->nb_samples);
@@ -106,12 +108,17 @@ static int request_frame(AVFilterLink *outlink)
AVFrame *samplesref;
samplesref = ff_get_audio_buffer(outlink, null->nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
samplesref->pts = null->pts;
samplesref->channel_layout = null->channel_layout;
samplesref->sample_rate = outlink->sample_rate;
ret = ff_filter_frame(outlink, av_frame_clone(samplesref));
av_frame_free(&samplesref);
+ if (ret < 0)
+ return ret;
null->pts += null->nb_samples;
return ret;
@@ -127,15 +134,13 @@ static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
{ NULL }
};
-AVFilter avfilter_asrc_anullsrc = {
- .name = "anullsrc",
- .description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
-
- .init = init,
- .priv_size = sizeof(ANullContext),
-
- .inputs = NULL,
-
- .outputs = avfilter_asrc_anullsrc_outputs,
- .priv_class = &anullsrc_class,
+AVFilter ff_asrc_anullsrc = {
+ .name = "anullsrc",
+ .description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
+ .init = init,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ANullContext),
+ .inputs = NULL,
+ .outputs = avfilter_asrc_anullsrc_outputs,
+ .priv_class = &anullsrc_class,
};
diff --git a/ffmpeg/libavfilter/asrc_flite.c b/ffmpeg/libavfilter/asrc_flite.c
index c13eb8b..098a1dd 100644
--- a/ffmpeg/libavfilter/asrc_flite.c
+++ b/ffmpeg/libavfilter/asrc_flite.c
@@ -131,17 +131,11 @@ static int select_voice(struct voice_entry **entry_ret, const char *voice_name,
return AVERROR(EINVAL);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
int ret = 0;
- flite->class = &flite_class;
- av_opt_set_defaults(flite);
-
- if ((ret = av_set_options_string(flite, args, "=", ":")) < 0)
- return ret;
-
if (flite->list_voices) {
list_voices(ctx, "\n");
return AVERROR_EXIT;
@@ -200,8 +194,6 @@ static av_cold void uninit(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
- av_opt_free(flite);
-
if (!--flite->voice_entry->usage_count)
flite->voice_entry->unregister_fn(flite->voice);
flite->voice = NULL;
@@ -278,14 +270,14 @@ static const AVFilterPad flite_outputs[] = {
{ NULL }
};
-AVFilter avfilter_asrc_flite = {
- .name = "flite",
- .description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
+AVFilter ff_asrc_flite = {
+ .name = "flite",
+ .description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
.query_formats = query_formats,
- .init = init,
- .uninit = uninit,
- .priv_size = sizeof(FliteContext),
- .inputs = NULL,
- .outputs = flite_outputs,
- .priv_class = &flite_class,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(FliteContext),
+ .inputs = NULL,
+ .outputs = flite_outputs,
+ .priv_class = &flite_class,
};
diff --git a/ffmpeg/libavfilter/asrc_sine.c b/ffmpeg/libavfilter/asrc_sine.c
index 82a2bef..68e1398 100644
--- a/ffmpeg/libavfilter/asrc_sine.c
+++ b/ffmpeg/libavfilter/asrc_sine.c
@@ -71,7 +71,7 @@ static const AVOption sine_options[] = {
OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration"),
OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration"),
OPT_INT("samples_per_frame", samples_per_frame, 1024, 0, INT_MAX, "set the number of samples per frame"),
- {NULL},
+ {NULL}
};
AVFILTER_DEFINE_CLASS(sine);
@@ -120,17 +120,10 @@ static void make_sin_table(int16_t *sin)
sin[i + 2 * half_pi] = -sin[i];
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
SineContext *sine = ctx->priv;
- static const char *shorthand[] = { "frequency", "beep_factor", NULL };
- int ret;
- sine->class = &sine_class;
- av_opt_set_defaults(sine);
-
- if ((ret = av_opt_set_from_string(sine, args, shorthand, "=", ":")) < 0)
- return ret;
if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD)))
return AVERROR(ENOMEM);
sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5;
@@ -217,7 +210,7 @@ static const AVFilterPad sine_outputs[] = {
{ NULL }
};
-AVFilter avfilter_asrc_sine = {
+AVFilter ff_asrc_sine = {
.name = "sine",
.description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."),
.query_formats = query_formats,
diff --git a/ffmpeg/libavfilter/audio.c b/ffmpeg/libavfilter/audio.c
index 1075217..315c273 100644
--- a/ffmpeg/libavfilter/audio.c
+++ b/ffmpeg/libavfilter/audio.c
@@ -42,43 +42,29 @@ AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
int channels = link->channels;
- int buf_size, ret;
+ int ret;
av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout));
if (!frame)
return NULL;
- buf_size = av_samples_get_buffer_size(NULL, channels, nb_samples,
- link->format, 0);
- if (buf_size < 0)
- goto fail;
-
- frame->buf[0] = av_buffer_alloc(buf_size);
- if (!frame->buf[0])
- goto fail;
-
- frame->nb_samples = nb_samples;
- ret = avcodec_fill_audio_frame(frame, channels, link->format,
- frame->buf[0]->data, buf_size, 0);
- if (ret < 0)
- goto fail;
-
- av_samples_set_silence(frame->extended_data, 0, nb_samples, channels,
- link->format);
-
frame->nb_samples = nb_samples;
frame->format = link->format;
av_frame_set_channels(frame, link->channels);
frame->channel_layout = link->channel_layout;
frame->sample_rate = link->sample_rate;
+ ret = av_frame_get_buffer(frame, 0);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ return NULL;
+ }
- return frame;
+ av_samples_set_silence(frame->extended_data, 0, nb_samples, channels,
+ link->format);
-fail:
- av_buffer_unref(&frame->buf[0]);
- av_frame_free(&frame);
- return NULL;
+
+ return frame;
}
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
diff --git a/ffmpeg/libavfilter/avcodec.h b/ffmpeg/libavfilter/avcodec.h
index ae55df7..8bbdad2 100644
--- a/ffmpeg/libavfilter/avcodec.h
+++ b/ffmpeg/libavfilter/avcodec.h
@@ -72,7 +72,7 @@ AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
*
* @param frame an already allocated AVFrame
* @param samplesref an audio buffer reference
- * @return 0 in case of success, a negative AVERROR code in case of
+ * @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
@@ -85,7 +85,7 @@ int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame,
*
* @param frame an already allocated AVFrame
* @param picref a video buffer reference
- * @return 0 in case of success, a negative AVERROR code in case of
+ * @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
@@ -98,7 +98,7 @@ int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame,
*
* @param frame an already allocated AVFrame
* @param ref a video or audio buffer reference
- * @return 0 in case of success, a negative AVERROR code in case of
+ * @return >= 0 in case of success, a negative AVERROR code in case of
* failure
* @deprecated Use avfilter_copy_buf_props() instead.
*/
diff --git a/ffmpeg/libavfilter/avf_concat.c b/ffmpeg/libavfilter/avf_concat.c
index 2b3640b..c211dc4 100644
--- a/ffmpeg/libavfilter/avf_concat.c
+++ b/ffmpeg/libavfilter/avf_concat.c
@@ -68,8 +68,8 @@ static const AVOption concat_options[] = {
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
{ "unsafe", "enable unsafe mode",
OFFSET(unsafe),
- AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|A|F},
- { 0 }
+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V|A|F},
+ { NULL }
};
AVFILTER_DEFINE_CLASS(concat);
@@ -134,10 +134,13 @@ static int config_output(AVFilterLink *outlink)
outlink->format = inlink->format;
for (seg = 1; seg < cat->nb_segments; seg++) {
inlink = ctx->inputs[in_no += ctx->nb_outputs];
+ if (!outlink->sample_aspect_ratio.num)
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
/* possible enhancement: unsafe mode, do not check */
if (outlink->w != inlink->w ||
outlink->h != inlink->h ||
- outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num &&
+ inlink->sample_aspect_ratio.num ||
outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
"(size %dx%d, SAR %d:%d) do not match the corresponding "
@@ -355,7 +358,7 @@ static int request_frame(AVFilterLink *outlink)
}
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
ConcatContext *cat = ctx->priv;
unsigned seg, type, str;
@@ -409,9 +412,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_free(cat->in);
}
-static const char *const shorthand[] = { NULL };
-
-AVFilter avfilter_avf_concat = {
+AVFilter ff_avf_concat = {
.name = "concat",
.description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
.init = init,
@@ -421,5 +422,5 @@ AVFilter avfilter_avf_concat = {
.inputs = NULL,
.outputs = NULL,
.priv_class = &concat_class,
- .shorthand = shorthand,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/ffmpeg/libavfilter/avf_showspectrum.c b/ffmpeg/libavfilter/avf_showspectrum.c
index 364ee6c..fc32834 100644
--- a/ffmpeg/libavfilter/avf_showspectrum.c
+++ b/ffmpeg/libavfilter/avf_showspectrum.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Clément Bœsch
+ * Copyright (c) 2012-2013 Clément Bœsch
* Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
*
* This file is part of FFmpeg.
@@ -37,6 +37,7 @@
enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
enum DisplayScale { LINEAR, SQRT, CBRT, LOG, NB_SCALES };
enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES };
+enum WindowFunc { WFUNC_NONE, WFUNC_HANN, WFUNC_HAMMING, WFUNC_BLACKMAN, NB_WFUNC };
typedef struct {
const AVClass *class;
@@ -57,6 +58,7 @@ typedef struct {
int filled; ///< number of samples (per channel) filled in current rdft_buffer
int consumed; ///< number of samples (per channel) consumed from the input frame
float *window_func_lut; ///< Window function LUT
+ enum WindowFunc win_func;
float *combine_buffer; ///< color combining buffer (3 * h items)
} ShowSpectrumContext;
@@ -68,18 +70,22 @@ static const AVOption showspectrum_options[] = {
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
{ "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
{ "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
- { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
- { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
+ { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
+ { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
{ "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
- { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
- { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
+ { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
+ { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
{ "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
- { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
- { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
- { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
- { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
{ "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
- { NULL },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANN}, 0, NB_WFUNC-1, FLAGS, "win_func" },
+ { "hann", "Hann window", 0, AV_OPT_TYPE_CONST, {.i64 = WFUNC_HANN}, 0, 0, FLAGS, "win_func" },
+ { "hamming", "Hamming window", 0, AV_OPT_TYPE_CONST, {.i64 = WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
+ { "blackman", "Blackman window", 0, AV_OPT_TYPE_CONST, {.i64 = WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(showspectrum);
@@ -97,32 +103,18 @@ static const struct {
{ 1, 1, 0, 0 }
};
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- ShowSpectrumContext *showspectrum = ctx->priv;
- int err;
-
- showspectrum->class = &showspectrum_class;
- av_opt_set_defaults(showspectrum);
-
- if ((err = av_set_options_string(showspectrum, args, "=", ":")) < 0)
- return err;
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
- ShowSpectrumContext *showspectrum = ctx->priv;
+ ShowSpectrumContext *s = ctx->priv;
int i;
- av_freep(&showspectrum->combine_buffer);
- av_rdft_end(showspectrum->rdft);
- for (i = 0; i < showspectrum->nb_display_channels; i++)
- av_freep(&showspectrum->rdft_data[i]);
- av_freep(&showspectrum->rdft_data);
- av_freep(&showspectrum->window_func_lut);
- av_frame_free(&showspectrum->outpicref);
+ av_freep(&s->combine_buffer);
+ av_rdft_end(s->rdft);
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->rdft_data[i]);
+ av_freep(&s->rdft_data);
+ av_freep(&s->window_func_lut);
+ av_frame_free(&s->outpicref);
}
static int query_formats(AVFilterContext *ctx)
@@ -163,110 +155,131 @@ static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
- ShowSpectrumContext *showspectrum = ctx->priv;
+ ShowSpectrumContext *s = ctx->priv;
int i, rdft_bits, win_size, h;
- outlink->w = showspectrum->w;
- outlink->h = showspectrum->h;
+ outlink->w = s->w;
+ outlink->h = s->h;
- h = (showspectrum->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
- showspectrum->channel_height = h;
+ h = (s->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
+ s->channel_height = h;
/* RDFT window size (precision) according to the requested output frame height */
for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++);
win_size = 1 << rdft_bits;
/* (re-)configuration if the video output changed (or first init) */
- if (rdft_bits != showspectrum->rdft_bits) {
+ if (rdft_bits != s->rdft_bits) {
size_t rdft_size, rdft_listsize;
AVFrame *outpicref;
- av_rdft_end(showspectrum->rdft);
- showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C);
- showspectrum->rdft_bits = rdft_bits;
+ av_rdft_end(s->rdft);
+ s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ s->rdft_bits = rdft_bits;
/* RDFT buffers: x2 for each (display) channel buffer.
* Note: we use free and malloc instead of a realloc-like function to
* make sure the buffer is aligned in memory for the FFT functions. */
- for (i = 0; i < showspectrum->nb_display_channels; i++)
- av_freep(&showspectrum->rdft_data[i]);
- av_freep(&showspectrum->rdft_data);
- showspectrum->nb_display_channels = inlink->channels;
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->rdft_data[i]);
+ av_freep(&s->rdft_data);
+ s->nb_display_channels = inlink->channels;
- if (av_size_mult(sizeof(*showspectrum->rdft_data),
- showspectrum->nb_display_channels, &rdft_listsize) < 0)
+ if (av_size_mult(sizeof(*s->rdft_data),
+ s->nb_display_channels, &rdft_listsize) < 0)
return AVERROR(EINVAL);
- if (av_size_mult(sizeof(**showspectrum->rdft_data),
+ if (av_size_mult(sizeof(**s->rdft_data),
win_size, &rdft_size) < 0)
return AVERROR(EINVAL);
- showspectrum->rdft_data = av_malloc(rdft_listsize);
- if (!showspectrum->rdft_data)
+ s->rdft_data = av_malloc(rdft_listsize);
+ if (!s->rdft_data)
return AVERROR(ENOMEM);
- for (i = 0; i < showspectrum->nb_display_channels; i++) {
- showspectrum->rdft_data[i] = av_malloc(rdft_size);
- if (!showspectrum->rdft_data[i])
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->rdft_data[i] = av_malloc(rdft_size);
+ if (!s->rdft_data[i])
return AVERROR(ENOMEM);
}
- showspectrum->filled = 0;
+ s->filled = 0;
- /* pre-calc windowing function (hann here) */
- showspectrum->window_func_lut =
- av_realloc_f(showspectrum->window_func_lut, win_size,
- sizeof(*showspectrum->window_func_lut));
- if (!showspectrum->window_func_lut)
+ /* pre-calc windowing function */
+ s->window_func_lut =
+ av_realloc_f(s->window_func_lut, win_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
return AVERROR(ENOMEM);
- for (i = 0; i < win_size; i++)
- showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
+ switch (s->win_func) {
+ case WFUNC_NONE:
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = 1.;
+ break;
+ case WFUNC_HANN:
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
+ break;
+ case WFUNC_HAMMING:
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = .54f - .46f * cos(2*M_PI*i / (win_size-1));
+ break;
+ case WFUNC_BLACKMAN: {
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = .42f - .5f*cos(2*M_PI*i / (win_size-1)) + .08f*cos(4*M_PI*i / (win_size-1));
+ break;
+ }
+ default:
+ av_assert0(0);
+ }
/* prepare the initial picref buffer (black frame) */
- av_frame_free(&showspectrum->outpicref);
- showspectrum->outpicref = outpicref =
+ av_frame_free(&s->outpicref);
+ s->outpicref = outpicref =
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outlink->sample_aspect_ratio = (AVRational){1,1};
- memset(outpicref->data[0], 0, outlink->h * outpicref->linesize[0]);
- memset(outpicref->data[1], 128, outlink->h * outpicref->linesize[1]);
- memset(outpicref->data[2], 128, outlink->h * outpicref->linesize[2]);
+ for (i = 0; i < outlink->h; i++) {
+ memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
+ memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
+ memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
+ }
}
- if (showspectrum->xpos >= outlink->w)
- showspectrum->xpos = 0;
+ if (s->xpos >= outlink->w)
+ s->xpos = 0;
- showspectrum->combine_buffer =
- av_realloc_f(showspectrum->combine_buffer, outlink->h * 3,
- sizeof(*showspectrum->combine_buffer));
+ s->combine_buffer =
+ av_realloc_f(s->combine_buffer, outlink->h * 3,
+ sizeof(*s->combine_buffer));
av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n",
- showspectrum->w, showspectrum->h, win_size);
+ s->w, s->h, win_size);
return 0;
}
inline static int push_frame(AVFilterLink *outlink)
{
- ShowSpectrumContext *showspectrum = outlink->src->priv;
+ ShowSpectrumContext *s = outlink->src->priv;
- showspectrum->xpos++;
- if (showspectrum->xpos >= outlink->w)
- showspectrum->xpos = 0;
- showspectrum->filled = 0;
- showspectrum->req_fullfilled = 1;
+ s->xpos++;
+ if (s->xpos >= outlink->w)
+ s->xpos = 0;
+ s->filled = 0;
+ s->req_fullfilled = 1;
- return ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref));
+ return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
}
static int request_frame(AVFilterLink *outlink)
{
- ShowSpectrumContext *showspectrum = outlink->src->priv;
+ ShowSpectrumContext *s = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
- showspectrum->req_fullfilled = 0;
+ s->req_fullfilled = 0;
do {
ret = ff_request_frame(inlink);
- } while (!showspectrum->req_fullfilled && ret >= 0);
+ } while (!s->req_fullfilled && ret >= 0);
- if (ret == AVERROR_EOF && showspectrum->outpicref)
+ if (ret == AVERROR_EOF && s->outpicref)
push_frame(outlink);
return ret;
}
@@ -276,60 +289,60 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
int ret;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
- ShowSpectrumContext *showspectrum = ctx->priv;
- AVFrame *outpicref = showspectrum->outpicref;
+ ShowSpectrumContext *s = ctx->priv;
+ AVFrame *outpicref = s->outpicref;
/* nb_freq contains the power of two superior or equal to the output image
* height (or half the RDFT window size) */
- const int nb_freq = 1 << (showspectrum->rdft_bits - 1);
+ const int nb_freq = 1 << (s->rdft_bits - 1);
const int win_size = nb_freq << 1;
const double w = 1. / (sqrt(nb_freq) * 32768.);
int ch, plane, n, y;
- const int start = showspectrum->filled;
+ const int start = s->filled;
const int add_samples = FFMIN(win_size - start, nb_samples);
/* fill RDFT input with the number of samples available */
- for (ch = 0; ch < showspectrum->nb_display_channels; ch++) {
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
const int16_t *p = (int16_t *)insamples->extended_data[ch];
- p += showspectrum->consumed;
+ p += s->consumed;
for (n = 0; n < add_samples; n++)
- showspectrum->rdft_data[ch][start + n] = p[n] * showspectrum->window_func_lut[start + n];
+ s->rdft_data[ch][start + n] = p[n] * s->window_func_lut[start + n];
}
- showspectrum->filled += add_samples;
+ s->filled += add_samples;
/* complete RDFT window size? */
- if (showspectrum->filled == win_size) {
+ if (s->filled == win_size) {
/* channel height */
- int h = showspectrum->channel_height;
+ int h = s->channel_height;
/* run RDFT on each samples set */
- for (ch = 0; ch < showspectrum->nb_display_channels; ch++)
- av_rdft_calc(showspectrum->rdft, showspectrum->rdft_data[ch]);
+ for (ch = 0; ch < s->nb_display_channels; ch++)
+ av_rdft_calc(s->rdft, s->rdft_data[ch]);
/* fill a new spectrum column */
-#define RE(y, ch) showspectrum->rdft_data[ch][2 * y + 0]
-#define IM(y, ch) showspectrum->rdft_data[ch][2 * y + 1]
+#define RE(y, ch) s->rdft_data[ch][2 * y + 0]
+#define IM(y, ch) s->rdft_data[ch][2 * y + 1]
#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
/* initialize buffer for combining to black */
for (y = 0; y < outlink->h; y++) {
- showspectrum->combine_buffer[3 * y ] = 0;
- showspectrum->combine_buffer[3 * y + 1] = 127.5;
- showspectrum->combine_buffer[3 * y + 2] = 127.5;
+ s->combine_buffer[3 * y ] = 0;
+ s->combine_buffer[3 * y + 1] = 127.5;
+ s->combine_buffer[3 * y + 2] = 127.5;
}
- for (ch = 0; ch < showspectrum->nb_display_channels; ch++) {
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
float yf, uf, vf;
/* decide color range */
- switch (showspectrum->mode) {
+ switch (s->mode) {
case COMBINED:
// reduce range by channel count
- yf = 256.0f / showspectrum->nb_display_channels;
- switch (showspectrum->color_mode) {
+ yf = 256.0f / s->nb_display_channels;
+ switch (s->color_mode) {
case INTENSITY:
uf = yf;
vf = yf;
@@ -354,28 +367,28 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
av_assert0(0);
}
- if (showspectrum->color_mode == CHANNEL) {
- if (showspectrum->nb_display_channels > 1) {
- uf *= 0.5 * sin((2 * M_PI * ch) / showspectrum->nb_display_channels);
- vf *= 0.5 * cos((2 * M_PI * ch) / showspectrum->nb_display_channels);
+ if (s->color_mode == CHANNEL) {
+ if (s->nb_display_channels > 1) {
+ uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
+ vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
} else {
uf = 0.0f;
vf = 0.0f;
}
}
- uf *= showspectrum->saturation;
- vf *= showspectrum->saturation;
+ uf *= s->saturation;
+ vf *= s->saturation;
/* draw the channel */
for (y = 0; y < h; y++) {
- int row = (showspectrum->mode == COMBINED) ? y : ch * h + y;
- float *out = &showspectrum->combine_buffer[3 * row];
+ int row = (s->mode == COMBINED) ? y : ch * h + y;
+ float *out = &s->combine_buffer[3 * row];
/* get magnitude */
float a = w * MAGNITUDE(y, ch);
/* apply scale */
- switch (showspectrum->scale) {
+ switch (s->scale) {
case LINEAR:
break;
case SQRT:
@@ -391,7 +404,7 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
av_assert0(0);
}
- if (showspectrum->color_mode == INTENSITY) {
+ if (s->color_mode == INTENSITY) {
float y, u, v;
int i;
@@ -432,7 +445,7 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
}
/* copy to output */
- if (showspectrum->sliding) {
+ if (s->sliding) {
for (plane = 0; plane < 3; plane++) {
for (y = 0; y < outlink->h; y++) {
uint8_t *p = outpicref->data[plane] +
@@ -440,20 +453,20 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
memmove(p, p + 1, outlink->w - 1);
}
}
- showspectrum->xpos = outlink->w - 1;
+ s->xpos = outlink->w - 1;
}
for (plane = 0; plane < 3; plane++) {
uint8_t *p = outpicref->data[plane] +
(outlink->h - 1) * outpicref->linesize[plane] +
- showspectrum->xpos;
+ s->xpos;
for (y = 0; y < outlink->h; y++) {
- *p = rint(FFMAX(0, FFMIN(showspectrum->combine_buffer[3 * y + plane], 255)));
+ *p = rint(FFMAX(0, FFMIN(s->combine_buffer[3 * y + plane], 255)));
p -= outpicref->linesize[plane];
}
}
outpicref->pts = insamples->pts +
- av_rescale_q(showspectrum->consumed,
+ av_rescale_q(s->consumed,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
ret = push_frame(outlink);
@@ -467,15 +480,15 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
- ShowSpectrumContext *showspectrum = ctx->priv;
+ ShowSpectrumContext *s = ctx->priv;
int ret = 0, left_samples = insamples->nb_samples;
- showspectrum->consumed = 0;
+ s->consumed = 0;
while (left_samples) {
int ret = plot_spectrum_column(inlink, insamples, left_samples);
if (ret < 0)
break;
- showspectrum->consumed += ret;
+ s->consumed += ret;
left_samples -= ret;
}
@@ -502,14 +515,13 @@ static const AVFilterPad showspectrum_outputs[] = {
{ NULL }
};
-AVFilter avfilter_avf_showspectrum = {
- .name = "showspectrum",
- .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
- .init = init,
- .uninit = uninit,
- .query_formats = query_formats,
- .priv_size = sizeof(ShowSpectrumContext),
- .inputs = showspectrum_inputs,
- .outputs = showspectrum_outputs,
- .priv_class = &showspectrum_class,
+AVFilter ff_avf_showspectrum = {
+ .name = "showspectrum",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowSpectrumContext),
+ .inputs = showspectrum_inputs,
+ .outputs = showspectrum_outputs,
+ .priv_class = &showspectrum_class,
};
diff --git a/ffmpeg/libavfilter/avf_showwaves.c b/ffmpeg/libavfilter/avf_showwaves.c
index 095fc57..0b45bd0 100644
--- a/ffmpeg/libavfilter/avf_showwaves.c
+++ b/ffmpeg/libavfilter/avf_showwaves.c
@@ -54,35 +54,19 @@ typedef struct {
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption showwaves_options[] = {
- { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
- { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
+ { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
+ { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
{ "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
-
- {"mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
- {"point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
- {"line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
- { NULL },
+ { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(showwaves);
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- ShowWavesContext *showwaves = ctx->priv;
- int err;
-
- showwaves->class = &showwaves_class;
- av_opt_set_defaults(showwaves);
- showwaves->buf_idx = 0;
-
- if ((err = av_set_options_string(showwaves, args, "=", ":")) < 0)
- return err;
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
ShowWavesContext *showwaves = ctx->priv;
@@ -133,6 +117,7 @@ static int config_output(AVFilterLink *outlink)
if (!showwaves->n)
showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
+ showwaves->buf_idx = 0;
outlink->w = showwaves->w;
outlink->h = showwaves->h;
outlink->sample_aspect_ratio = (AVRational){1,1};
@@ -184,7 +169,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
AVFrame *outpicref = showwaves->outpicref;
int linesize = outpicref ? outpicref->linesize[0] : 0;
int16_t *p = (int16_t *)insamples->data[0];
- int nb_channels = av_get_channel_layout_nb_channels(insamples->channel_layout);
+ int nb_channels = inlink->channels;
int i, j, k, h, ret = 0;
const int n = showwaves->n;
const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
@@ -203,7 +188,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
linesize = outpicref->linesize[0];
- memset(outpicref->data[0], 0, showwaves->h*linesize);
+ for (j = 0; j < outlink->h; j++)
+ memset(outpicref->data[0] + j * linesize, 0, outlink->w);
}
for (j = 0; j < nb_channels; j++) {
h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16);
@@ -258,14 +244,13 @@ static const AVFilterPad showwaves_outputs[] = {
{ NULL }
};
-AVFilter avfilter_avf_showwaves = {
- .name = "showwaves",
- .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
- .init = init,
- .uninit = uninit,
- .query_formats = query_formats,
- .priv_size = sizeof(ShowWavesContext),
- .inputs = showwaves_inputs,
- .outputs = showwaves_outputs,
- .priv_class = &showwaves_class,
+AVFilter ff_avf_showwaves = {
+ .name = "showwaves",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowWavesContext),
+ .inputs = showwaves_inputs,
+ .outputs = showwaves_outputs,
+ .priv_class = &showwaves_class,
};
diff --git a/ffmpeg/libavfilter/avfilter.c b/ffmpeg/libavfilter/avfilter.c
index 8a907dc..2567ce9 100644
--- a/ffmpeg/libavfilter/avfilter.c
+++ b/ffmpeg/libavfilter/avfilter.c
@@ -19,11 +19,14 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/atomic.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/rational.h"
@@ -33,7 +36,6 @@
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
-#include "audio.h"
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
@@ -65,7 +67,8 @@ void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
ff_tlog(ctx, "]%s", end ? "\n" : "");
}
-unsigned avfilter_version(void) {
+unsigned avfilter_version(void)
+{
av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
return LIBAVFILTER_VERSION_INT;
}
@@ -90,25 +93,36 @@ void ff_command_queue_pop(AVFilterContext *filter)
av_free(c);
}
-void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
+int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad)
{
+ AVFilterLink **newlinks;
+ AVFilterPad *newpads;
unsigned i;
idx = FFMIN(idx, *count);
- *pads = av_realloc(*pads, sizeof(AVFilterPad) * (*count + 1));
- *links = av_realloc(*links, sizeof(AVFilterLink*) * (*count + 1));
- memmove(*pads +idx+1, *pads +idx, sizeof(AVFilterPad) * (*count-idx));
- memmove(*links+idx+1, *links+idx, sizeof(AVFilterLink*) * (*count-idx));
- memcpy(*pads+idx, newpad, sizeof(AVFilterPad));
+ newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
+ newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
+ if (newpads)
+ *pads = newpads;
+ if (newlinks)
+ *links = newlinks;
+ if (!newpads || !newlinks)
+ return AVERROR(ENOMEM);
+
+ memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
+ memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
+ memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
(*links)[idx] = NULL;
(*count)++;
- for (i = idx+1; i < *count; i++)
- if (*links[i])
- (*(unsigned *)((uint8_t *) *links[i] + padidx_off))++;
+ for (i = idx + 1; i < *count; i++)
+ if ((*links)[i])
+ (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
+
+ return 0;
}
int avfilter_link(AVFilterContext *src, unsigned srcpad,
@@ -128,8 +142,11 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
return AVERROR(EINVAL);
}
- src->outputs[srcpad] =
- dst-> inputs[dstpad] = link = av_mallocz(sizeof(AVFilterLink));
+ link = av_mallocz(sizeof(*link));
+ if (!link)
+ return AVERROR(ENOMEM);
+
+ src->outputs[srcpad] = dst->inputs[dstpad] = link;
link->src = src;
link->dst = dst;
@@ -180,19 +197,18 @@ int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
}
/* re-hookup the link to the new destination filter we inserted */
- link->dst = filt;
- link->dstpad = &filt->input_pads[filt_srcpad_idx];
+ link->dst = filt;
+ link->dstpad = &filt->input_pads[filt_srcpad_idx];
filt->inputs[filt_srcpad_idx] = link;
/* if any information on supported media formats already exists on the
* link, we need to preserve that */
if (link->out_formats)
ff_formats_changeref(&link->out_formats,
- &filt->outputs[filt_dstpad_idx]->out_formats);
-
+ &filt->outputs[filt_dstpad_idx]->out_formats);
if (link->out_samplerates)
ff_formats_changeref(&link->out_samplerates,
- &filt->outputs[filt_dstpad_idx]->out_samplerates);
+ &filt->outputs[filt_dstpad_idx]->out_samplerates);
if (link->out_channel_layouts)
ff_channel_layouts_changeref(&link->out_channel_layouts,
&filt->outputs[filt_dstpad_idx]->out_channel_layouts);
@@ -323,17 +339,27 @@ int ff_request_frame(AVFilterLink *link)
if (link->closed)
return AVERROR_EOF;
- if (link->srcpad->request_frame)
- ret = link->srcpad->request_frame(link);
- else if (link->src->inputs[0])
- ret = ff_request_frame(link->src->inputs[0]);
- if (ret == AVERROR_EOF && link->partial_buf) {
- AVFrame *pbuf = link->partial_buf;
- link->partial_buf = NULL;
- ret = ff_filter_frame_framed(link, pbuf);
- }
- if (ret == AVERROR_EOF)
- link->closed = 1;
+ av_assert0(!link->frame_requested);
+ link->frame_requested = 1;
+ while (link->frame_requested) {
+ if (link->srcpad->request_frame)
+ ret = link->srcpad->request_frame(link);
+ else if (link->src->inputs[0])
+ ret = ff_request_frame(link->src->inputs[0]);
+ if (ret == AVERROR_EOF && link->partial_buf) {
+ AVFrame *pbuf = link->partial_buf;
+ link->partial_buf = NULL;
+ ret = ff_filter_frame_framed(link, pbuf);
+ }
+ if (ret < 0) {
+ link->frame_requested = 0;
+ if (ret == AVERROR_EOF)
+ link->closed = 1;
+ } else {
+ av_assert0(!link->frame_requested ||
+ link->flags & FF_LINK_FLAG_REQUEST_LOOP);
+ }
+ }
return ret;
}
@@ -355,6 +381,49 @@ int ff_poll_frame(AVFilterLink *link)
return min;
}
+static const char *const var_names[] = { "t", "n", "pos", NULL };
+enum { VAR_T, VAR_N, VAR_POS, VAR_VARS_NB };
+
+static int set_enable_expr(AVFilterContext *ctx, const char *expr)
+{
+ int ret;
+ char *expr_dup;
+ AVExpr *old = ctx->enable;
+
+ if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
+ av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
+ "with filter '%s'\n", ctx->filter->name);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ expr_dup = av_strdup(expr);
+ if (!expr_dup)
+ return AVERROR(ENOMEM);
+
+ if (!ctx->var_values) {
+ ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
+ if (!ctx->var_values) {
+ av_free(expr_dup);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx->priv);
+ if (ret < 0) {
+ av_log(ctx->priv, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for enable\n",
+ expr_dup);
+ av_free(expr_dup);
+ return ret;
+ }
+
+ av_expr_free(old);
+ av_free(ctx->enable_str);
+ ctx->enable_str = expr_dup;
+ return 0;
+}
+
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
{
if (pts == AV_NOPTS_VALUE)
@@ -368,42 +437,50 @@ void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
if(!strcmp(cmd, "ping")){
+ char local_res[256] = {0};
+
+ if (!res) {
+ res = local_res;
+ res_len = sizeof(local_res);
+ }
av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
+ if (res == local_res)
+ av_log(filter, AV_LOG_INFO, "%s", res);
return 0;
+ }else if(!strcmp(cmd, "enable")) {
+ return set_enable_expr(filter, arg);
}else if(filter->filter->process_command) {
return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
}
return AVERROR(ENOSYS);
}
-#define MAX_REGISTERED_AVFILTERS_NB 256
-
-static AVFilter *registered_avfilters[MAX_REGISTERED_AVFILTERS_NB + 1];
-
-static int next_registered_avfilter_idx = 0;
+static AVFilter *first_filter;
+#if !FF_API_NOCONST_GET_NAME
+const
+#endif
AVFilter *avfilter_get_by_name(const char *name)
{
- int i;
+ const AVFilter *f = NULL;
- for (i = 0; registered_avfilters[i]; i++)
- if (!strcmp(registered_avfilters[i]->name, name))
- return registered_avfilters[i];
+ if (!name)
+ return NULL;
+
+ while ((f = avfilter_next(f)))
+ if (!strcmp(f->name, name))
+ return (AVFilter *)f;
return NULL;
}
int avfilter_register(AVFilter *filter)
{
+ AVFilter **f = &first_filter;
int i;
- if (next_registered_avfilter_idx == MAX_REGISTERED_AVFILTERS_NB) {
- av_log(NULL, AV_LOG_ERROR,
- "Maximum number of registered filters %d reached, "
- "impossible to register filter with name '%s'\n",
- MAX_REGISTERED_AVFILTERS_NB, filter->name);
- return AVERROR(ENOMEM);
- }
+ /* the filter must select generic or internal exclusively */
+ av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
for(i=0; filter->inputs && filter->inputs[i].name; i++) {
const AVFilterPad *input = &filter->inputs[i];
@@ -411,29 +488,39 @@ int avfilter_register(AVFilter *filter)
|| (!input->start_frame && !input->end_frame));
}
- registered_avfilters[next_registered_avfilter_idx++] = filter;
+ filter->next = NULL;
+
+ while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
+ f = &(*f)->next;
+
return 0;
}
+const AVFilter *avfilter_next(const AVFilter *prev)
+{
+ return prev ? prev->next : first_filter;
+}
+
+#if FF_API_OLD_FILTER_REGISTER
AVFilter **av_filter_next(AVFilter **filter)
{
- return filter ? ++filter : &registered_avfilters[0];
+ return filter ? &(*filter)->next : &first_filter;
}
void avfilter_uninit(void)
{
- memset(registered_avfilters, 0, sizeof(registered_avfilters));
- next_registered_avfilter_idx = 0;
}
+#endif
-static int pad_count(const AVFilterPad *pads)
+int avfilter_pad_count(const AVFilterPad *pads)
{
int count;
if (!pads)
return 0;
- for(count = 0; pads->name; count ++) pads ++;
+ for (count = 0; pads->name; count++)
+ pads++;
return count;
}
@@ -446,31 +533,42 @@ static const char *default_filter_name(void *filter_ctx)
static void *filter_child_next(void *obj, void *prev)
{
AVFilterContext *ctx = obj;
- if (!prev && ctx->filter && ctx->filter->priv_class)
+ if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
return ctx->priv;
return NULL;
}
static const AVClass *filter_child_class_next(const AVClass *prev)
{
- AVFilter **filter_ptr = NULL;
+ const AVFilter *f = NULL;
/* find the filter that corresponds to prev */
- while (prev && *(filter_ptr = av_filter_next(filter_ptr)))
- if ((*filter_ptr)->priv_class == prev)
+ while (prev && (f = avfilter_next(f)))
+ if (f->priv_class == prev)
break;
/* could not find filter corresponding to prev */
- if (prev && !(*filter_ptr))
+ if (prev && !f)
return NULL;
/* find next filter with specific options */
- while (*(filter_ptr = av_filter_next(filter_ptr)))
- if ((*filter_ptr)->priv_class)
- return (*filter_ptr)->priv_class;
+ while ((f = avfilter_next(f)))
+ if (f->priv_class)
+ return f->priv_class;
+
return NULL;
}
+#define OFFSET(x) offsetof(AVFilterContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption avfilter_options[] = {
+ { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
+ { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
+ { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
+ { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL },
+};
+
static const AVClass avfilter_class = {
.class_name = "AVFilter",
.item_name = default_filter_name,
@@ -478,24 +576,32 @@ static const AVClass avfilter_class = {
.category = AV_CLASS_CATEGORY_FILTER,
.child_next = filter_child_next,
.child_class_next = filter_child_class_next,
+ .option = avfilter_options,
};
-const AVClass *avfilter_get_class(void)
+static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
+ int *ret, int nb_jobs)
{
- return &avfilter_class;
+ int i;
+
+ for (i = 0; i < nb_jobs; i++) {
+ int r = func(ctx, arg, i, nb_jobs);
+ if (ret)
+ ret[i] = r;
+ }
+ return 0;
}
-int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
+AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
{
AVFilterContext *ret;
- *filter_ctx = NULL;
if (!filter)
- return AVERROR(EINVAL);
+ return NULL;
ret = av_mallocz(sizeof(AVFilterContext));
if (!ret)
- return AVERROR(ENOMEM);
+ return NULL;
ret->av_class = &avfilter_class;
ret->filter = filter;
@@ -506,7 +612,18 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
goto err;
}
- ret->nb_inputs = pad_count(filter->inputs);
+ av_opt_set_defaults(ret);
+ if (filter->priv_class) {
+ *(const AVClass**)ret->priv = filter->priv_class;
+ av_opt_set_defaults(ret->priv);
+ }
+
+ ret->internal = av_mallocz(sizeof(*ret->internal));
+ if (!ret->internal)
+ goto err;
+ ret->internal->execute = default_execute;
+
+ ret->nb_inputs = avfilter_pad_count(filter->inputs);
if (ret->nb_inputs ) {
ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
if (!ret->input_pads)
@@ -517,7 +634,7 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
goto err;
}
- ret->nb_outputs = pad_count(filter->outputs);
+ ret->nb_outputs = avfilter_pad_count(filter->outputs);
if (ret->nb_outputs) {
ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
if (!ret->output_pads)
@@ -528,12 +645,13 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
goto err;
}
#if FF_API_FOO_COUNT
+FF_DISABLE_DEPRECATION_WARNINGS
ret->output_count = ret->nb_outputs;
ret->input_count = ret->nb_inputs;
+FF_ENABLE_DEPRECATION_WARNINGS
#endif
- *filter_ctx = ret;
- return 0;
+ return ret;
err:
av_freep(&ret->inputs);
@@ -543,50 +661,61 @@ err:
av_freep(&ret->output_pads);
ret->nb_outputs = 0;
av_freep(&ret->priv);
+ av_freep(&ret->internal);
av_free(ret);
- return AVERROR(ENOMEM);
+ return NULL;
+}
+
+#if FF_API_AVFILTER_OPEN
+int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
+{
+ *filter_ctx = ff_filter_alloc(filter, inst_name);
+ return *filter_ctx ? 0 : AVERROR(ENOMEM);
+}
+#endif
+
+static void free_link(AVFilterLink *link)
+{
+ if (!link)
+ return;
+
+ if (link->src)
+ link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
+ if (link->dst)
+ link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
+
+ ff_formats_unref(&link->in_formats);
+ ff_formats_unref(&link->out_formats);
+ ff_formats_unref(&link->in_samplerates);
+ ff_formats_unref(&link->out_samplerates);
+ ff_channel_layouts_unref(&link->in_channel_layouts);
+ ff_channel_layouts_unref(&link->out_channel_layouts);
+ avfilter_link_free(&link);
}
void avfilter_free(AVFilterContext *filter)
{
int i;
- AVFilterLink *link;
if (!filter)
return;
+ if (filter->graph)
+ ff_filter_graph_remove_filter(filter->graph, filter);
+
if (filter->filter->uninit)
filter->filter->uninit(filter);
- if (filter->filter->shorthand)
- av_opt_free(filter->priv);
for (i = 0; i < filter->nb_inputs; i++) {
- if ((link = filter->inputs[i])) {
- if (link->src)
- link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
- ff_formats_unref(&link->in_formats);
- ff_formats_unref(&link->out_formats);
- ff_formats_unref(&link->in_samplerates);
- ff_formats_unref(&link->out_samplerates);
- ff_channel_layouts_unref(&link->in_channel_layouts);
- ff_channel_layouts_unref(&link->out_channel_layouts);
- }
- avfilter_link_free(&link);
+ free_link(filter->inputs[i]);
}
for (i = 0; i < filter->nb_outputs; i++) {
- if ((link = filter->outputs[i])) {
- if (link->dst)
- link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
- ff_formats_unref(&link->in_formats);
- ff_formats_unref(&link->out_formats);
- ff_formats_unref(&link->in_samplerates);
- ff_formats_unref(&link->out_samplerates);
- ff_channel_layouts_unref(&link->in_channel_layouts);
- ff_channel_layouts_unref(&link->out_channel_layouts);
- }
- avfilter_link_free(&link);
+ free_link(filter->outputs[i]);
}
+ if (filter->filter->priv_class)
+ av_opt_free(filter->priv);
+
av_freep(&filter->name);
av_freep(&filter->input_pads);
av_freep(&filter->output_pads);
@@ -596,37 +725,264 @@ void avfilter_free(AVFilterContext *filter)
while(filter->command_queue){
ff_command_queue_pop(filter);
}
+ av_opt_free(filter);
+ av_expr_free(filter->enable);
+ filter->enable = NULL;
+ av_freep(&filter->var_values);
+ av_freep(&filter->internal);
av_free(filter);
}
-int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
+static int process_options(AVFilterContext *ctx, AVDictionary **options,
+ const char *args)
{
- int ret=0;
+ const AVOption *o = NULL;
+ int ret, count = 0;
+ char *av_uninit(parsed_key), *av_uninit(value);
+ const char *key;
+ int offset= -1;
+
+ if (!args)
+ return 0;
+
+ while (*args) {
+ const char *shorthand = NULL;
+
+ o = av_opt_next(ctx->priv, o);
+ if (o) {
+ if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
+ continue;
+ offset = o->offset;
+ shorthand = o->name;
+ }
+
+ ret = av_opt_get_key_value(&args, "=", ":",
+ shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
+ &parsed_key, &value);
+ if (ret < 0) {
+ if (ret == AVERROR(EINVAL))
+ av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
+ else
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
+ av_err2str(ret));
+ return ret;
+ }
+ if (*args)
+ args++;
+ if (parsed_key) {
+ key = parsed_key;
+ while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
+ } else {
+ key = shorthand;
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
+
+ if (av_opt_find(ctx, key, NULL, 0, 0)) {
+ ret = av_opt_set(ctx, key, value, 0);
+ if (ret < 0) {
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+ } else {
+ av_dict_set(options, key, value, 0);
+ if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
+ if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
+ if (ret == AVERROR_OPTION_NOT_FOUND)
+ av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+ }
+ }
- if (filter->filter->shorthand) {
- av_assert0(filter->priv);
- av_assert0(filter->filter->priv_class);
- *(const AVClass **)filter->priv = filter->filter->priv_class;
- av_opt_set_defaults(filter->priv);
- ret = av_opt_set_from_string(filter->priv, args,
- filter->filter->shorthand, "=", ":");
+ av_free(value);
+ av_free(parsed_key);
+ count++;
+ }
+
+ if (ctx->enable_str) {
+ ret = set_enable_expr(ctx, ctx->enable_str);
if (ret < 0)
return ret;
- args = NULL;
}
- if (filter->filter->init_opaque)
- ret = filter->filter->init_opaque(filter, args, opaque);
- else if (filter->filter->init)
- ret = filter->filter->init(filter, args);
+ return count;
+}
+
+#if FF_API_AVFILTER_INIT_FILTER
+int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
+{
+ return avfilter_init_str(filter, args);
+}
+#endif
+
+int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
+{
+ int ret = 0;
+
+ ret = av_opt_set_dict(ctx, options);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
+ return ret;
+ }
+
+ if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
+ ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
+ ctx->graph->internal->thread_execute) {
+ ctx->thread_type = AVFILTER_THREAD_SLICE;
+ ctx->internal->execute = ctx->graph->internal->thread_execute;
+ } else {
+ ctx->thread_type = 0;
+ }
+
+ if (ctx->filter->priv_class) {
+ ret = av_opt_set_dict(ctx->priv, options);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
+ return ret;
+ }
+ }
+
+ if (ctx->filter->init_opaque)
+ ret = ctx->filter->init_opaque(ctx, NULL);
+ else if (ctx->filter->init)
+ ret = ctx->filter->init(ctx);
+ else if (ctx->filter->init_dict)
+ ret = ctx->filter->init_dict(ctx, options);
+
+ return ret;
+}
+
+int avfilter_init_str(AVFilterContext *filter, const char *args)
+{
+ AVDictionary *options = NULL;
+ AVDictionaryEntry *e;
+ int ret = 0;
+
+ if (args && *args) {
+ if (!filter->filter->priv_class) {
+ av_log(filter, AV_LOG_ERROR, "This filter does not take any "
+ "options, but options were provided: %s.\n", args);
+ return AVERROR(EINVAL);
+ }
+
+#if FF_API_OLD_FILTER_OPTS
+ if ( !strcmp(filter->filter->name, "format") ||
+ !strcmp(filter->filter->name, "noformat") ||
+ !strcmp(filter->filter->name, "frei0r") ||
+ !strcmp(filter->filter->name, "frei0r_src") ||
+ !strcmp(filter->filter->name, "ocv") ||
+ !strcmp(filter->filter->name, "pan") ||
+ !strcmp(filter->filter->name, "pp") ||
+ !strcmp(filter->filter->name, "aevalsrc")) {
+ /* a hack for compatibility with the old syntax
+ * replace colons with |s */
+ char *copy = av_strdup(args);
+ char *p = copy;
+ int nb_leading = 0; // number of leading colons to skip
+ int deprecated = 0;
+
+ if (!copy) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (!strcmp(filter->filter->name, "frei0r") ||
+ !strcmp(filter->filter->name, "ocv"))
+ nb_leading = 1;
+ else if (!strcmp(filter->filter->name, "frei0r_src"))
+ nb_leading = 3;
+
+ while (nb_leading--) {
+ p = strchr(p, ':');
+ if (!p) {
+ p = copy + strlen(copy);
+ break;
+ }
+ p++;
+ }
+
+ deprecated = strchr(p, ':') != NULL;
+
+ if (!strcmp(filter->filter->name, "aevalsrc")) {
+ deprecated = 0;
+ while ((p = strchr(p, ':')) && p[1] != ':') {
+ const char *epos = strchr(p + 1, '=');
+ const char *spos = strchr(p + 1, ':');
+ const int next_token_is_opt = epos && (!spos || epos < spos);
+ if (next_token_is_opt) {
+ p++;
+ break;
+ }
+ /* next token does not contain a '=', assume a channel expression */
+ deprecated = 1;
+ *p++ = '|';
+ }
+ if (p && *p == ':') { // double sep '::' found
+ deprecated = 1;
+ memmove(p, p + 1, strlen(p));
+ }
+ } else
+ while ((p = strchr(p, ':')))
+ *p++ = '|';
+
+ if (deprecated)
+ av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
+ "'|' to separate the list items.\n");
+
+ av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
+ ret = process_options(filter, &options, copy);
+ av_freep(&copy);
+
+ if (ret < 0)
+ goto fail;
+#endif
+ } else {
+#if CONFIG_MP_FILTER
+ if (!strcmp(filter->filter->name, "mp")) {
+ char *escaped;
+
+ if (!strncmp(args, "filter=", 7))
+ args += 7;
+ ret = av_escape(&escaped, args, ":=", AV_ESCAPE_MODE_BACKSLASH, 0);
+ if (ret < 0) {
+ av_log(filter, AV_LOG_ERROR, "Unable to escape MPlayer filters arg '%s'\n", args);
+ goto fail;
+ }
+ ret = process_options(filter, &options, escaped);
+ av_free(escaped);
+ } else
+#endif
+ ret = process_options(filter, &options, args);
+ if (ret < 0)
+ goto fail;
+ }
+ }
+
+ ret = avfilter_init_dict(filter, &options);
+ if (ret < 0)
+ goto fail;
+
+ if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
+ ret = AVERROR_OPTION_NOT_FOUND;
+ goto fail;
+ }
+
+fail:
+ av_dict_free(&options);
+
return ret;
}
-const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx)
+const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
{
return pads[pad_idx].name;
}
-enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx)
+enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
{
return pads[pad_idx].type;
}
@@ -639,6 +995,7 @@ static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
int (*filter_frame)(AVFilterLink *, AVFrame *);
+ AVFilterContext *dstctx = link->dst;
AVFilterPad *dst = link->dstpad;
AVFrame *out;
int ret;
@@ -701,7 +1058,20 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
}
pts = out->pts;
+ if (dstctx->enable_str) {
+ int64_t pos = av_frame_get_pkt_pos(out);
+ dstctx->var_values[VAR_N] = link->frame_count;
+ dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
+ dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+ dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
+ if (dstctx->is_disabled &&
+ (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
+ filter_frame = default_filter_frame;
+ }
ret = filter_frame(link, out);
+ link->frame_count++;
+ link->frame_requested = 0;
ff_update_link_current_pts(link, pts);
return ret;
}
@@ -713,6 +1083,7 @@ static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame)
int nb_channels = av_frame_get_channels(frame);
int ret = 0;
+ link->flags |= FF_LINK_FLAG_REQUEST_LOOP;
/* Handle framing (min_samples, max_samples) */
while (insamples) {
if (!pbuf) {
@@ -724,8 +1095,9 @@ static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame)
return 0;
}
av_frame_copy_props(pbuf, frame);
- pbuf->pts = frame->pts +
- av_rescale_q(inpos, samples_tb, link->time_base);
+ pbuf->pts = frame->pts;
+ if (pbuf->pts != AV_NOPTS_VALUE)
+ pbuf->pts += av_rescale_q(inpos, samples_tb, link->time_base);
pbuf->nb_samples = 0;
}
nb_samples = FFMIN(insamples,
@@ -775,3 +1147,8 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
return ff_filter_frame_framed(link, frame);
}
}
+
+const AVClass *avfilter_get_class(void)
+{
+ return &avfilter_class;
+}
diff --git a/ffmpeg/libavfilter/avfilter.h b/ffmpeg/libavfilter/avfilter.h
index 455161f..3518ad8 100644
--- a/ffmpeg/libavfilter/avfilter.h
+++ b/ffmpeg/libavfilter/avfilter.h
@@ -25,16 +25,17 @@
/**
* @file
* @ingroup lavfi
- * external API header
+ * Main libavfilter public API header
*/
/**
- * @defgroup lavfi Libavfilter
+ * @defgroup lavfi Libavfilter - graph-based frame editing library
* @{
*/
#include <stddef.h>
+#include "libavutil/attributes.h"
#include "libavutil/avutil.h"
#include "libavutil/dict.h"
#include "libavutil/frame.h"
@@ -60,11 +61,6 @@ const char *avfilter_configuration(void);
*/
const char *avfilter_license(void);
-/**
- * Get the class for the AVFilterContext struct.
- */
-const AVClass *avfilter_get_class(void);
-
typedef struct AVFilterContext AVFilterContext;
typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
@@ -394,6 +390,12 @@ struct AVFilterPad {
#endif
/**
+ * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.
+ * AVFilter.inputs/outputs).
+ */
+int avfilter_pad_count(const AVFilterPad *pads);
+
+/**
* Get the name of an AVFilterPad.
*
* @param pads an array of AVFilterPads
@@ -402,7 +404,7 @@ struct AVFilterPad {
*
* @return name of the pad_idx'th pad in pads
*/
-const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx);
+const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx);
/**
* Get the type of an AVFilterPad.
@@ -413,23 +415,96 @@ const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx);
*
* @return type of the pad_idx'th pad in pads
*/
-enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx);
+enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);
+
+/**
+ * The number of the filter inputs is not determined just by AVFilter.inputs.
+ * The filter might add additional inputs during initialization depending on the
+ * options supplied to it.
+ */
+#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0)
+/**
+ * The number of the filter outputs is not determined just by AVFilter.outputs.
+ * The filter might add additional outputs during initialization depending on
+ * the options supplied to it.
+ */
+#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1)
+/**
+ * The filter supports multithreading by splitting frames into multiple parts
+ * and processing them concurrently.
+ */
+#define AVFILTER_FLAG_SLICE_THREADS (1 << 2)
+/**
+ * Some filters support a generic "enable" expression option that can be used
+ * to enable or disable a filter in the timeline. Filters supporting this
+ * option have this flag set. When the enable expression is false, the default
+ * no-op filter_frame() function is called in place of the filter_frame()
+ * callback defined on each input pad, thus the frame is passed unchanged to
+ * the next filters.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16)
+/**
+ * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
+ * have its filter_frame() callback(s) called as usual even when the enable
+ * expression is false. The filter will disable filtering within the
+ * filter_frame() callback(s) itself, for example executing code depending on
+ * the AVFilterContext->is_disabled value.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)
+/**
+ * Handy mask to test whether the filter supports or no the timeline feature
+ * (internally or generically).
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)
/**
* Filter definition. This defines the pads a filter contains, and all the
* callback functions used to interact with the filter.
*/
typedef struct AVFilter {
- const char *name; ///< filter name
+ /**
+ * Filter name. Must be non-NULL and unique among filters.
+ */
+ const char *name;
/**
- * A description for the filter. You should use the
- * NULL_IF_CONFIG_SMALL() macro to define it.
+ * A description of the filter. May be NULL.
+ *
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
*/
const char *description;
- const AVFilterPad *inputs; ///< NULL terminated list of inputs. NULL if none
- const AVFilterPad *outputs; ///< NULL terminated list of outputs. NULL if none
+ /**
+ * List of inputs, terminated by a zeroed element.
+ *
+ * NULL if there are no (static) inputs. Instances of filters with
+ * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in
+ * this list.
+ */
+ const AVFilterPad *inputs;
+ /**
+ * List of outputs, terminated by a zeroed element.
+ *
+ * NULL if there are no (static) outputs. Instances of filters with
+ * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in
+ * this list.
+ */
+ const AVFilterPad *outputs;
+
+ /**
+ * A class for the private data, used to declare filter private AVOptions.
+ * This field is NULL for filters that do not declare any options.
+ *
+ * If this field is non-NULL, the first member of the filter private data
+ * must be a pointer to AVClass, which will be set by libavfilter generic
+ * code to this class.
+ */
+ const AVClass *priv_class;
+
+ /**
+ * A combination of AVFILTER_FLAG_*
+ */
+ int flags;
/*****************************************************************
* All fields below this line are not part of the public API. They
@@ -440,22 +515,71 @@ typedef struct AVFilter {
*/
/**
- * Filter initialization function. Args contains the user-supplied
- * parameters. FIXME: maybe an AVOption-based system would be better?
+ * Filter initialization function.
+ *
+ * This callback will be called only once during the filter lifetime, after
+ * all the options have been set, but before links between filters are
+ * established and format negotiation is done.
+ *
+ * Basic filter initialization should be done here. Filters with dynamic
+ * inputs and/or outputs should create those inputs/outputs here based on
+ * provided options. No more changes to this filter's inputs/outputs can be
+ * done after this callback.
+ *
+ * This callback must not assume that the filter links exist or frame
+ * parameters are known.
+ *
+ * @ref AVFilter.uninit "uninit" is guaranteed to be called even if
+ * initialization fails, so this callback does not have to clean up on
+ * failure.
+ *
+ * @return 0 on success, a negative AVERROR on failure
*/
- int (*init)(AVFilterContext *ctx, const char *args);
+ int (*init)(AVFilterContext *ctx);
/**
- * Filter uninitialization function. Should deallocate any memory held
- * by the filter, release any buffer references, etc. This does not need
- * to deallocate the AVFilterContext->priv memory itself.
+ * Should be set instead of @ref AVFilter.init "init" by the filters that
+ * want to pass a dictionary of AVOptions to nested contexts that are
+ * allocated during init.
+ *
+ * On return, the options dict should be freed and replaced with one that
+ * contains all the options which could not be processed by this filter (or
+ * with NULL if all the options were processed).
+ *
+ * Otherwise the semantics is the same as for @ref AVFilter.init "init".
+ */
+ int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);
+
+ /**
+ * Filter uninitialization function.
+ *
+ * Called only once right before the filter is freed. Should deallocate any
+ * memory held by the filter, release any buffer references, etc. It does
+ * not need to deallocate the AVFilterContext.priv memory itself.
+ *
+ * This callback may be called even if @ref AVFilter.init "init" was not
+ * called or failed, so it must be prepared to handle such a situation.
*/
void (*uninit)(AVFilterContext *ctx);
/**
- * Queries formats/layouts supported by the filter and its pads, and sets
- * the in_formats/in_chlayouts for links connected to its output pads,
- * and out_formats/out_chlayouts for links connected to its input pads.
+ * Query formats supported by the filter on its inputs and outputs.
+ *
+ * This callback is called after the filter is initialized (so the inputs
+ * and outputs are fixed), shortly before the format negotiation. This
+ * callback may be called more than once.
+ *
+ * This callback must set AVFilterLink.out_formats on every input link and
+ * AVFilterLink.in_formats on every output link to a list of pixel/sample
+ * formats that the filter supports on that link. For audio links, this
+ * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" /
+ * @ref AVFilterLink.out_samplerates "out_samplerates" and
+ * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" /
+ * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously.
+ *
+ * This callback may be NULL for filters with one input, in which case
+ * libavfilter assumes that it supports all input formats and preserves
+ * them on output.
*
* @return zero on success, a negative value corresponding to an
* AVERROR code otherwise
@@ -465,6 +589,12 @@ typedef struct AVFilter {
int priv_size; ///< size of private data to allocate for the filter
/**
+ * Used by the filter registration system. Must not be touched by any other
+ * code.
+ */
+ struct AVFilter *next;
+
+ /**
* Make the filter instance process a command.
*
* @param cmd the command to process, for handling simplicity all commands must be alphanumeric only
@@ -483,45 +613,71 @@ typedef struct AVFilter {
* callback. Args contains the user-supplied parameters, opaque is
* used for providing binary data.
*/
- int (*init_opaque)(AVFilterContext *ctx, const char *args, void *opaque);
+ int (*init_opaque)(AVFilterContext *ctx, void *opaque);
+} AVFilter;
- const AVClass *priv_class; ///< private class, containing filter specific options
+/**
+ * Process multiple parts of the frame concurrently.
+ */
+#define AVFILTER_THREAD_SLICE (1 << 0)
- /**
- * Shorthand syntax for init arguments.
- * If this field is set (even to an empty list), just before init the
- * private class will be set and the arguments string will be parsed
- * using av_opt_set_from_string() with "=" and ":" delimiters, and
- * av_opt_free() will be called just after uninit.
- */
- const char *const *shorthand;
-} AVFilter;
+typedef struct AVFilterInternal AVFilterInternal;
/** An instance of a filter */
struct AVFilterContext {
- const AVClass *av_class; ///< needed for av_log()
+ const AVClass *av_class; ///< needed for av_log() and filters common options
- AVFilter *filter; ///< the AVFilter of which this is an instance
+ const AVFilter *filter; ///< the AVFilter of which this is an instance
char *name; ///< name of this filter instance
AVFilterPad *input_pads; ///< array of input pads
AVFilterLink **inputs; ///< array of pointers to input links
#if FF_API_FOO_COUNT
- unsigned input_count; ///< @deprecated use nb_inputs
+ attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs
#endif
unsigned nb_inputs; ///< number of input pads
AVFilterPad *output_pads; ///< array of output pads
AVFilterLink **outputs; ///< array of pointers to output links
#if FF_API_FOO_COUNT
- unsigned output_count; ///< @deprecated use nb_outputs
+ attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs
#endif
unsigned nb_outputs; ///< number of output pads
void *priv; ///< private data for use by the filter
+ struct AVFilterGraph *graph; ///< filtergraph this filter belongs to
+
+ /**
+ * Type of multithreading being allowed/used. A combination of
+ * AVFILTER_THREAD_* flags.
+ *
+ * May be set by the caller before initializing the filter to forbid some
+ * or all kinds of multithreading for this filter. The default is allowing
+ * everything.
+ *
+ * When the filter is initialized, this field is combined using bit AND with
+ * AVFilterGraph.thread_type to get the final mask used for determining
+ * allowed threading types. I.e. a threading type needs to be set in both
+ * to be allowed.
+ *
+ * After the filter is initialzed, libavfilter sets this field to the
+ * threading type that is actually used (0 for no multithreading).
+ */
+ int thread_type;
+
+ /**
+ * An opaque struct for libavfilter internal use.
+ */
+ AVFilterInternal *internal;
+
struct AVFilterCommand *command_queue;
+
+ char *enable_str; ///< enable expression string
+ void *enable; ///< parsed expression (AVExpr*)
+ double *var_values; ///< variable values for the enable expression
+ int is_disabled; ///< the enabled state from the last expression evaluation
};
/**
@@ -682,6 +838,22 @@ struct AVFilterLink {
* Number of channels.
*/
int channels;
+
+ /**
+ * True if a frame is being requested on the link.
+ * Used internally by the framework.
+ */
+ unsigned frame_requested;
+
+ /**
+ * Link processing flags.
+ */
+ unsigned flags;
+
+ /**
+ * Number of past frames sent through the link.
+ */
+ int64_t frame_count;
};
/**
@@ -794,14 +966,17 @@ int avfilter_process_command(AVFilterContext *filter, const char *cmd, const cha
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
+#if FF_API_OLD_FILTER_REGISTER
/** Uninitialize the filter system. Unregister all filters. */
+attribute_deprecated
void avfilter_uninit(void);
+#endif
/**
* Register a filter. This is only needed if you plan to use
* avfilter_get_by_name later to lookup the AVFilter structure by name. A
- * filter can still by instantiated with avfilter_open even if it is not
- * registered.
+ * filter can still by instantiated with avfilter_graph_alloc_filter even if it
+ * is not registered.
*
* @param filter the filter to register
* @return 0 if the registration was successful, a negative value
@@ -816,16 +991,31 @@ int avfilter_register(AVFilter *filter);
* @return the filter definition, if any matching one is registered.
* NULL if none found.
*/
+#if !FF_API_NOCONST_GET_NAME
+const
+#endif
AVFilter *avfilter_get_by_name(const char *name);
/**
+ * Iterate over all registered filters.
+ * @return If prev is non-NULL, next registered filter after prev or NULL if
+ * prev is the last filter. If prev is NULL, return the first registered filter.
+ */
+const AVFilter *avfilter_next(const AVFilter *prev);
+
+#if FF_API_OLD_FILTER_REGISTER
+/**
* If filter is NULL, returns a pointer to the first registered filter pointer,
* if filter is non-NULL, returns the next pointer after filter.
* If the returned pointer points to NULL, the last registered filter
* was already reached.
+ * @deprecated use avfilter_next()
*/
+attribute_deprecated
AVFilter **av_filter_next(AVFilter **filter);
+#endif
+#if FF_API_AVFILTER_OPEN
/**
* Create a filter instance.
*
@@ -834,9 +1024,14 @@ AVFilter **av_filter_next(AVFilter **filter);
* @param filter the filter to create an instance of
* @param inst_name Name to give to the new instance. Can be NULL for none.
* @return >= 0 in case of success, a negative error code otherwise
+ * @deprecated use avfilter_graph_alloc_filter() instead
*/
+attribute_deprecated
int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);
+#endif
+
+#if FF_API_AVFILTER_INIT_FILTER
/**
* Initialize a filter.
*
@@ -847,10 +1042,47 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
* of this parameter varies by filter.
* @return zero on success
*/
+attribute_deprecated
int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque);
+#endif
+
+/**
+ * Initialize a filter with the supplied parameters.
+ *
+ * @param ctx uninitialized filter context to initialize
+ * @param args Options to initialize the filter with. This must be a
+ * ':'-separated list of options in the 'key=value' form.
+ * May be NULL if the options have been set directly using the
+ * AVOptions API or there are no options that need to be set.
+ * @return 0 on success, a negative AVERROR on failure
+ */
+int avfilter_init_str(AVFilterContext *ctx, const char *args);
/**
- * Free a filter context.
+ * Initialize a filter with the supplied dictionary of options.
+ *
+ * @param ctx uninitialized filter context to initialize
+ * @param options An AVDictionary filled with options for this filter. On
+ * return this parameter will be destroyed and replaced with
+ * a dict containing options that were not found. This dictionary
+ * must be freed by the caller.
+ * May be NULL, then this function is equivalent to
+ * avfilter_init_str() with the second parameter set to NULL.
+ * @return 0 on success, a negative AVERROR on failure
+ *
+ * @note This function and avfilter_init_str() do essentially the same thing,
+ * the difference is in manner in which the options are passed. It is up to the
+ * calling code to choose whichever is more preferable. The two functions also
+ * behave differently when some of the provided options are not declared as
+ * supported by the filter. In such a case, avfilter_init_str() will fail, but
+ * this function will leave those extra options in the options AVDictionary and
+ * continue as usual.
+ */
+int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options);
+
+/**
+ * Free a filter context. This will also remove the filter from its
+ * filtergraph's list of filters.
*
* @param filter the filter to free
*/
@@ -889,6 +1121,402 @@ int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
#endif
/**
+ * @return AVClass for AVFilterContext.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avfilter_get_class(void);
+
+typedef struct AVFilterGraphInternal AVFilterGraphInternal;
+
+/**
+ * A function pointer passed to the @ref AVFilterGraph.execute callback to be
+ * executed multiple times, possibly in parallel.
+ *
+ * @param ctx the filter context the job belongs to
+ * @param arg an opaque parameter passed through from @ref
+ * AVFilterGraph.execute
+ * @param jobnr the index of the job being executed
+ * @param nb_jobs the total number of jobs
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+
+/**
+ * A function executing multiple jobs, possibly in parallel.
+ *
+ * @param ctx the filter context to which the jobs belong
+ * @param func the function to be called multiple times
+ * @param arg the argument to be passed to func
+ * @param ret a nb_jobs-sized array to be filled with return values from each
+ * invocation of func
+ * @param nb_jobs the number of jobs to execute
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func,
+ void *arg, int *ret, int nb_jobs);
+
+typedef struct AVFilterGraph {
+ const AVClass *av_class;
+#if FF_API_FOO_COUNT
+ attribute_deprecated
+ unsigned filter_count_unused;
+#endif
+ AVFilterContext **filters;
+#if !FF_API_FOO_COUNT
+ unsigned nb_filters;
+#endif
+
+ char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters
+ char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
+#if FF_API_FOO_COUNT
+ unsigned nb_filters;
+#endif
+
+ /**
+ * Type of multithreading allowed for filters in this graph. A combination
+ * of AVFILTER_THREAD_* flags.
+ *
+ * May be set by the caller at any point, the setting will apply to all
+ * filters initialized after that. The default is allowing everything.
+ *
+ * When a filter in this graph is initialized, this field is combined using
+ * bit AND with AVFilterContext.thread_type to get the final mask used for
+ * determining allowed threading types. I.e. a threading type needs to be
+ * set in both to be allowed.
+ */
+ int thread_type;
+
+ /**
+ * Maximum number of threads used by filters in this graph. May be set by
+ * the caller before adding any filters to the filtergraph. Zero (the
+ * default) means that the number of threads is determined automatically.
+ */
+ int nb_threads;
+
+ /**
+ * Opaque object for libavfilter internal use.
+ */
+ AVFilterGraphInternal *internal;
+
+ /**
+ * Opaque user data. May be set by the caller to an arbitrary value, e.g. to
+ * be used from callbacks like @ref AVFilterGraph.execute.
+ * Libavfilter will not touch this field in any way.
+ */
+ void *opaque;
+
+ /**
+ * This callback may be set by the caller immediately after allocating the
+ * graph and before adding any filters to it, to provide a custom
+ * multithreading implementation.
+ *
+ * If set, filters with slice threading capability will call this callback
+ * to execute multiple jobs in parallel.
+ *
+ * If this field is left unset, libavfilter will use its internal
+ * implementation, which may or may not be multithreaded depending on the
+ * platform and build options.
+ */
+ avfilter_execute_func *execute;
+
+ char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
+
+ /**
+ * Private fields
+ *
+ * The following fields are for internal use only.
+ * Their type, offset, number and semantic can change without notice.
+ */
+
+ AVFilterLink **sink_links;
+ int sink_links_count;
+
+ unsigned disable_auto_convert;
+} AVFilterGraph;
+
+/**
+ * Allocate a filter graph.
+ */
+AVFilterGraph *avfilter_graph_alloc(void);
+
+/**
+ * Create a new filter instance in a filter graph.
+ *
+ * @param graph graph in which the new filter will be used
+ * @param filter the filter to create an instance of
+ * @param name Name to give to the new instance (will be copied to
+ * AVFilterContext.name). This may be used by the caller to identify
+ * different filters, libavfilter itself assigns no semantics to
+ * this parameter. May be NULL.
+ *
+ * @return the context of the newly created filter instance (note that it is
+ * also retrievable directly through AVFilterGraph.filters or with
+ * avfilter_graph_get_filter()) on success or NULL or failure.
+ */
+AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
+ const AVFilter *filter,
+ const char *name);
+
+/**
+ * Get a filter instance with name name from graph.
+ *
+ * @return the pointer to the found filter instance or NULL if it
+ * cannot be found.
+ */
+AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);
+
+#if FF_API_AVFILTER_OPEN
+/**
+ * Add an existing filter instance to a filter graph.
+ *
+ * @param graphctx the filter graph
+ * @param filter the filter to be added
+ *
+ * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a
+ * filter graph
+ */
+attribute_deprecated
+int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);
+#endif
+
+/**
+ * Create and add a filter instance into an existing graph.
+ * The filter instance is created from the filter filt and inited
+ * with the parameters args and opaque.
+ *
+ * In case of success put in *filt_ctx the pointer to the created
+ * filter instance, otherwise set *filt_ctx to NULL.
+ *
+ * @param name the instance name to give to the created filter instance
+ * @param graph_ctx the filter graph
+ * @return a negative AVERROR error code in case of failure, a non
+ * negative value otherwise
+ */
+int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
+ const char *name, const char *args, void *opaque,
+ AVFilterGraph *graph_ctx);
+
+/**
+ * Enable or disable automatic format conversion inside the graph.
+ *
+ * Note that format conversion can still happen inside explicitly inserted
+ * scale and aresample filters.
+ *
+ * @param flags any of the AVFILTER_AUTO_CONVERT_* constants
+ */
+void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
+
+enum {
+ AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
+ AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
+};
+
+/**
+ * Check validity and configure all the links and formats in the graph.
+ *
+ * @param graphctx the filter graph
+ * @param log_ctx context used for logging
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
+ */
+int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
+
+/**
+ * Free a graph, destroy its links, and set *graph to NULL.
+ * If *graph is NULL, do nothing.
+ */
+void avfilter_graph_free(AVFilterGraph **graph);
+
+/**
+ * A linked-list of the inputs/outputs of the filter chain.
+ *
+ * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
+ * where it is used to communicate open (unlinked) inputs and outputs from and
+ * to the caller.
+ * This struct specifies, per each not connected pad contained in the graph, the
+ * filter context and the pad index required for establishing a link.
+ */
+typedef struct AVFilterInOut {
+ /** unique name for this input/output in the list */
+ char *name;
+
+ /** filter context associated to this input/output */
+ AVFilterContext *filter_ctx;
+
+ /** index of the filt_ctx pad to use for linking */
+ int pad_idx;
+
+ /** next input/input in the list, NULL if this is the last */
+ struct AVFilterInOut *next;
+} AVFilterInOut;
+
+/**
+ * Allocate a single AVFilterInOut entry.
+ * Must be freed with avfilter_inout_free().
+ * @return allocated AVFilterInOut on success, NULL on failure.
+ */
+AVFilterInOut *avfilter_inout_alloc(void);
+
+/**
+ * Free the supplied list of AVFilterInOut and set *inout to NULL.
+ * If *inout is NULL, do nothing.
+ */
+void avfilter_inout_free(AVFilterInOut **inout);
+
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @note The caller must provide the lists of inputs and outputs,
+ * which therefore must be known before calling the function.
+ *
+ * @note The inputs parameter describes inputs of the already existing
+ * part of the graph; i.e. from the point of view of the newly created
+ * part, they are outputs. Similarly the outputs parameter describes
+ * outputs of the already existing filters, which are provided as
+ * inputs to the parsed filters.
+ *
+ * @param graph the filter graph where to link the parsed grap context
+ * @param filters string to be parsed
+ * @param inputs linked list to the inputs of the graph
+ * @param outputs linked list to the outputs of the graph
+ * @return zero on success, a negative AVERROR code on error
+ */
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut *inputs, AVFilterInOut *outputs,
+ void *log_ctx);
+#else
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ * @deprecated Use avfilter_graph_parse_ptr() instead.
+ */
+attribute_deprecated
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
+#endif
+
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ */
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
+
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param[in] graph the filter graph where to link the parsed graph context
+ * @param[in] filters string to be parsed
+ * @param[out] inputs a linked list of all free (unlinked) inputs of the
+ * parsed graph will be returned here. It is to be freed
+ * by the caller using avfilter_inout_free().
+ * @param[out] outputs a linked list of all free (unlinked) outputs of the
+ * parsed graph will be returned here. It is to be freed by the
+ * caller using avfilter_inout_free().
+ * @return zero on success, a negative AVERROR code on error
+ *
+ * @note This function returns the inputs and outputs that are left
+ * unlinked after parsing the graph and the caller then deals with
+ * them.
+ * @note This function makes no reference whatsoever to already
+ * existing parts of the graph and the inputs parameter will on return
+ * contain inputs of the newly parsed part of the graph. Analogously
+ * the outputs parameter will contain outputs of the newly created
+ * filters.
+ */
+int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs,
+ AVFilterInOut **outputs);
+
+/**
+ * Send a command to one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response.
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+/**
+ * Queue a command for one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only
+ * @param arg the argument for the command
+ * @param ts time at which the command should be sent to the filter
+ *
+ * @note As this executes commands after this function returns, no return code
+ * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
+ */
+int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
+
+
+/**
+ * Dump a graph into a human-readable string representation.
+ *
+ * @param graph the graph to dump
+ * @param options formatting options; currently ignored
+ * @return a string, or NULL in case of memory allocation failure;
+ * the string must be freed using av_free
+ */
+char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
+
+/**
+ * Request a frame on the oldest sink link.
+ *
+ * If the request returns AVERROR_EOF, try the next.
+ *
+ * Note that this function is not meant to be the sole scheduling mechanism
+ * of a filtergraph, only a convenience function to help drain a filtergraph
+ * in a balanced way under normal circumstances.
+ *
+ * Also note that AVERROR_EOF does not mean that frames did not arrive on
+ * some of the sinks during the process.
+ * When there are multiple sink links, in case the requested link
+ * returns an EOF, this may cause a filter to flush pending frames
+ * which are sent to another sink link, although unrequested.
+ *
+ * @return the return value of ff_request_frame(),
+ * or AVERROR_EOF if all links returned AVERROR_EOF
+ */
+int avfilter_graph_request_oldest(AVFilterGraph *graph);
+
+/**
* @}
*/
diff --git a/ffmpeg/libavfilter/avfiltergraph.c b/ffmpeg/libavfilter/avfiltergraph.c
index 89cdda3..1fb83c4 100644
--- a/ffmpeg/libavfilter/avfiltergraph.c
+++ b/ffmpeg/libavfilter/avfiltergraph.c
@@ -20,58 +20,112 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
+
#include <string.h>
#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
+#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavcodec/avcodec.h" // avcodec_find_best_pix_fmt_of_2()
+
#include "avfilter.h"
-#include "avfiltergraph.h"
#include "formats.h"
#include "internal.h"
-
-#define OFFSET(x) offsetof(AVFilterGraph,x)
-
-static const AVOption options[]={
-{"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) , AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, 0 },
-{"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) , AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, 0 },
-{0}
+#include "thread.h"
+
+#define OFFSET(x) offsetof(AVFilterGraph, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption filtergraph_options[] = {
+ { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
+ { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
+ { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
+ { "threads", "Maximum number of threads", OFFSET(nb_threads),
+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ {"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) ,
+ AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ {"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) ,
+ AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL },
};
-
static const AVClass filtergraph_class = {
.class_name = "AVFilterGraph",
.item_name = av_default_item_name,
- .option = options,
.version = LIBAVUTIL_VERSION_INT,
+ .option = filtergraph_options,
.category = AV_CLASS_CATEGORY_FILTER,
};
+#if !HAVE_THREADS
+void ff_graph_thread_free(AVFilterGraph *graph)
+{
+}
+
+int ff_graph_thread_init(AVFilterGraph *graph)
+{
+ graph->thread_type = 0;
+ graph->nb_threads = 1;
+ return 0;
+}
+#endif
+
AVFilterGraph *avfilter_graph_alloc(void)
{
AVFilterGraph *ret = av_mallocz(sizeof(*ret));
if (!ret)
return NULL;
+
+ ret->internal = av_mallocz(sizeof(*ret->internal));
+ if (!ret->internal) {
+ av_freep(&ret);
+ return NULL;
+ }
+
ret->av_class = &filtergraph_class;
+ av_opt_set_defaults(ret);
+
return ret;
}
+void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter)
+{
+ int i;
+ for (i = 0; i < graph->nb_filters; i++) {
+ if (graph->filters[i] == filter) {
+ FFSWAP(AVFilterContext*, graph->filters[i],
+ graph->filters[graph->nb_filters - 1]);
+ graph->nb_filters--;
+ return;
+ }
+ }
+}
+
void avfilter_graph_free(AVFilterGraph **graph)
{
if (!*graph)
return;
- for (; (*graph)->nb_filters > 0; (*graph)->nb_filters--)
- avfilter_free((*graph)->filters[(*graph)->nb_filters - 1]);
+
+ while ((*graph)->nb_filters)
+ avfilter_free((*graph)->filters[0]);
+
+ ff_graph_thread_free(*graph);
+
av_freep(&(*graph)->sink_links);
+
av_freep(&(*graph)->scale_sws_opts);
av_freep(&(*graph)->aresample_swr_opts);
av_freep(&(*graph)->resample_lavr_opts);
av_freep(&(*graph)->filters);
+ av_freep(&(*graph)->internal);
av_freep(graph);
}
+#if FF_API_AVFILTER_OPEN
int avfilter_graph_add_filter(AVFilterGraph *graph, AVFilterContext *filter)
{
AVFilterContext **filters = av_realloc(graph->filters,
@@ -82,21 +136,32 @@ int avfilter_graph_add_filter(AVFilterGraph *graph, AVFilterContext *filter)
graph->filters = filters;
graph->filters[graph->nb_filters++] = filter;
+#if FF_API_FOO_COUNT
+FF_DISABLE_DEPRECATION_WARNINGS
+ graph->filter_count_unused = graph->nb_filters;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ filter->graph = graph;
+
return 0;
}
+#endif
-int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt,
+int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char *name, const char *args, void *opaque,
AVFilterGraph *graph_ctx)
{
int ret;
- if ((ret = avfilter_open(filt_ctx, filt, name)) < 0)
- goto fail;
- if ((ret = avfilter_init_filter(*filt_ctx, args, opaque)) < 0)
- goto fail;
- if ((ret = avfilter_graph_add_filter(graph_ctx, *filt_ctx)) < 0)
+ *filt_ctx = avfilter_graph_alloc_filter(graph_ctx, filt, name);
+ if (!*filt_ctx)
+ return AVERROR(ENOMEM);
+
+ ret = avfilter_init_str(*filt_ctx, args);
+ if (ret < 0)
goto fail;
+
return 0;
fail:
@@ -111,13 +176,55 @@ void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
graph->disable_auto_convert = flags;
}
+AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
+ const AVFilter *filter,
+ const char *name)
+{
+ AVFilterContext **filters, *s;
+
+ if (graph->thread_type && !graph->internal->thread_execute) {
+ if (graph->execute) {
+ graph->internal->thread_execute = graph->execute;
+ } else {
+ int ret = ff_graph_thread_init(graph);
+ if (ret < 0) {
+ av_log(graph, AV_LOG_ERROR, "Error initializing threading.\n");
+ return NULL;
+ }
+ }
+ }
+
+ s = ff_filter_alloc(filter, name);
+ if (!s)
+ return NULL;
+
+ filters = av_realloc(graph->filters, sizeof(*filters) * (graph->nb_filters + 1));
+ if (!filters) {
+ avfilter_free(s);
+ return NULL;
+ }
+
+ graph->filters = filters;
+ graph->filters[graph->nb_filters++] = s;
+
+#if FF_API_FOO_COUNT
+FF_DISABLE_DEPRECATION_WARNINGS
+ graph->filter_count_unused = graph->nb_filters;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ s->graph = graph;
+
+ return s;
+}
+
/**
* Check for the validity of graph.
*
* A graph is considered valid if all its input and output pads are
* connected.
*
- * @return 0 in case of success, a negative value otherwise
+ * @return >= 0 in case of success, a negative value otherwise
*/
static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
{
@@ -155,7 +262,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
/**
* Configure all the links of graphctx.
*
- * @return 0 in case of success, a negative value otherwise
+ * @return >= 0 in case of success, a negative value otherwise
*/
static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
{
@@ -211,8 +318,9 @@ static int filter_query_formats(AVFilterContext *ctx)
AVMEDIA_TYPE_VIDEO;
if ((ret = ctx->filter->query_formats(ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n",
- ctx->name, av_err2str(ret));
+ if (ret != AVERROR(EAGAIN))
+ av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n",
+ ctx->name, av_err2str(ret));
return ret;
}
@@ -238,26 +346,115 @@ static int filter_query_formats(AVFilterContext *ctx)
return 0;
}
+static int formats_declared(AVFilterContext *f)
+{
+ int i;
+
+ for (i = 0; i < f->nb_inputs; i++) {
+ if (!f->inputs[i]->out_formats)
+ return 0;
+ if (f->inputs[i]->type == AVMEDIA_TYPE_AUDIO &&
+ !(f->inputs[i]->out_samplerates &&
+ f->inputs[i]->out_channel_layouts))
+ return 0;
+ }
+ for (i = 0; i < f->nb_outputs; i++) {
+ if (!f->outputs[i]->in_formats)
+ return 0;
+ if (f->outputs[i]->type == AVMEDIA_TYPE_AUDIO &&
+ !(f->outputs[i]->in_samplerates &&
+ f->outputs[i]->in_channel_layouts))
+ return 0;
+ }
+ return 1;
+}
+
+static AVFilterFormats *clone_filter_formats(AVFilterFormats *arg)
+{
+ AVFilterFormats *a = av_memdup(arg, sizeof(*arg));
+ if (a) {
+ a->refcount = 0;
+ a->refs = NULL;
+ a->formats = av_memdup(a->formats, sizeof(*a->formats) * a->nb_formats);
+ if (!a->formats && arg->formats)
+ av_freep(&a);
+ }
+ return a;
+}
+
+static int can_merge_formats(AVFilterFormats *a_arg,
+ AVFilterFormats *b_arg,
+ enum AVMediaType type,
+ int is_sample_rate)
+{
+ AVFilterFormats *a, *b, *ret;
+ if (a_arg == b_arg)
+ return 1;
+ a = clone_filter_formats(a_arg);
+ b = clone_filter_formats(b_arg);
+
+ if (!a || !b) {
+ if (a)
+ av_freep(&a->formats);
+ if (b)
+ av_freep(&b->formats);
+
+ av_freep(&a);
+ av_freep(&b);
+
+ return 0;
+ }
+
+ if (is_sample_rate) {
+ ret = ff_merge_samplerates(a, b);
+ } else {
+ ret = ff_merge_formats(a, b, type);
+ }
+ if (ret) {
+ av_freep(&ret->formats);
+ av_freep(&ret->refs);
+ av_freep(&ret);
+ return 1;
+ } else {
+ av_freep(&a->formats);
+ av_freep(&b->formats);
+ av_freep(&a);
+ av_freep(&b);
+ return 0;
+ }
+}
+
+/**
+ * Perform one round of query_formats() and merging formats lists on the
+ * filter graph.
+ * @return >=0 if all links formats lists could be queried and merged;
+ * AVERROR(EAGAIN) some progress was made in the queries or merging
+ * and a later call may succeed;
+ * AVERROR(EIO) (may be changed) plus a log message if no progress
+ * was made and the negotiation is stuck;
+ * a negative error code if some other error happened
+ */
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int i, j, ret;
int scaler_count = 0, resampler_count = 0;
+ int count_queried = 0; /* successful calls to query_formats() */
+ int count_merged = 0; /* successful merge of formats lists */
+ int count_already_merged = 0; /* lists already merged */
+ int count_delayed = 0; /* lists that need to be merged later */
- for (j = 0; j < 2; j++) {
- /* ask all the sub-filters for their supported media formats */
for (i = 0; i < graph->nb_filters; i++) {
- /* Call query_formats on sources first.
- This is a temporary workaround for amerge,
- until format renegociation is implemented. */
- if (!graph->filters[i]->nb_inputs == j)
+ AVFilterContext *f = graph->filters[i];
+ if (formats_declared(f))
continue;
- if (graph->filters[i]->filter->query_formats)
- ret = filter_query_formats(graph->filters[i]);
+ if (f->filter->query_formats)
+ ret = filter_query_formats(f);
else
- ret = ff_default_query_formats(graph->filters[i]);
- if (ret < 0)
+ ret = ff_default_query_formats(f);
+ if (ret < 0 && ret != AVERROR(EAGAIN))
return ret;
- }
+ /* note: EAGAIN could indicate a partial success, not counted yet */
+ count_queried += ret >= 0;
}
/* go through and merge as many format lists as possible */
@@ -271,21 +468,49 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if (!link)
continue;
- if (link->in_formats != link->out_formats &&
- !ff_merge_formats(link->in_formats, link->out_formats,
- link->type))
- convert_needed = 1;
- if (link->type == AVMEDIA_TYPE_AUDIO) {
- if (link->in_channel_layouts != link->out_channel_layouts &&
- !ff_merge_channel_layouts(link->in_channel_layouts,
- link->out_channel_layouts))
- convert_needed = 1;
- if (link->in_samplerates != link->out_samplerates &&
- !ff_merge_samplerates(link->in_samplerates,
- link->out_samplerates))
+ if (link->in_formats != link->out_formats
+ && link->in_formats && link->out_formats)
+ if (!can_merge_formats(link->in_formats, link->out_formats,
+ link->type, 0))
convert_needed = 1;
+ if (link->type == AVMEDIA_TYPE_AUDIO) {
+ if (link->in_samplerates != link->out_samplerates
+ && link->in_samplerates && link->out_samplerates)
+ if (!can_merge_formats(link->in_samplerates,
+ link->out_samplerates,
+ 0, 1))
+ convert_needed = 1;
+ }
+
+#define MERGE_DISPATCH(field, statement) \
+ if (!(link->in_ ## field && link->out_ ## field)) { \
+ count_delayed++; \
+ } else if (link->in_ ## field == link->out_ ## field) { \
+ count_already_merged++; \
+ } else if (!convert_needed) { \
+ count_merged++; \
+ statement \
}
+ if (link->type == AVMEDIA_TYPE_AUDIO) {
+ MERGE_DISPATCH(channel_layouts,
+ if (!ff_merge_channel_layouts(link->in_channel_layouts,
+ link->out_channel_layouts))
+ convert_needed = 1;
+ )
+ MERGE_DISPATCH(samplerates,
+ if (!ff_merge_samplerates(link->in_samplerates,
+ link->out_samplerates))
+ convert_needed = 1;
+ )
+ }
+ MERGE_DISPATCH(formats,
+ if (!ff_merge_formats(link->in_formats, link->out_formats,
+ link->type))
+ convert_needed = 1;
+ )
+#undef MERGE_DISPATCH
+
if (convert_needed) {
AVFilterContext *convert;
AVFilter *filter;
@@ -304,13 +529,9 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
scaler_count++);
- if (graph->scale_sws_opts)
- snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
- else
- snprintf(scale_args, sizeof(scale_args), "0:0");
if ((ret = avfilter_graph_create_filter(&convert, filter,
- inst_name, scale_args, NULL,
+ inst_name, graph->scale_sws_opts, NULL,
graph)) < 0)
return ret;
break;
@@ -368,6 +589,30 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
}
}
+ av_log(graph, AV_LOG_DEBUG, "query_formats: "
+ "%d queried, %d merged, %d already done, %d delayed\n",
+ count_queried, count_merged, count_already_merged, count_delayed);
+ if (count_delayed) {
+ AVBPrint bp;
+
+ /* if count_queried > 0, one filter at least did set its formats,
+ that will give additional information to its neighbour;
+ if count_merged > 0, one pair of formats lists at least was merged,
+ that will give additional information to all connected filters;
+ in both cases, progress was made and a new round must be done */
+ if (count_queried || count_merged)
+ return AVERROR(EAGAIN);
+ av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ for (i = 0; i < graph->nb_filters; i++)
+ if (!formats_declared(graph->filters[i]))
+ av_bprintf(&bp, "%s%s", bp.len ? ", " : "",
+ graph->filters[i]->name);
+ av_log(graph, AV_LOG_ERROR,
+ "The following filters could not choose their formats: %s\n"
+ "Consider inserting the (a)format filter near their input or "
+ "output.\n", bp.str);
+ return AVERROR(EIO);
+ }
return 0;
}
@@ -381,34 +626,38 @@ static int pick_format(AVFilterLink *link, AVFilterLink *ref)
int has_alpha= av_pix_fmt_desc_get(ref->format)->nb_components % 2 == 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
int i;
- for (i=0; i<link->in_formats->format_count; i++) {
+ for (i=0; i<link->in_formats->nb_formats; i++) {
enum AVPixelFormat p = link->in_formats->formats[i];
best= avcodec_find_best_pix_fmt_of_2(best, p, ref->format, has_alpha, NULL);
}
av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n",
- av_get_pix_fmt_name(best), link->in_formats->format_count,
+ av_get_pix_fmt_name(best), link->in_formats->nb_formats,
av_get_pix_fmt_name(ref->format), has_alpha);
link->in_formats->formats[0] = best;
}
}
- link->in_formats->format_count = 1;
+ link->in_formats->nb_formats = 1;
link->format = link->in_formats->formats[0];
if (link->type == AVMEDIA_TYPE_AUDIO) {
- if (!link->in_samplerates->format_count) {
+ if (!link->in_samplerates->nb_formats) {
av_log(link->src, AV_LOG_ERROR, "Cannot select sample rate for"
" the link between filters %s and %s.\n", link->src->name,
link->dst->name);
return AVERROR(EINVAL);
}
- link->in_samplerates->format_count = 1;
+ link->in_samplerates->nb_formats = 1;
link->sample_rate = link->in_samplerates->formats[0];
if (link->in_channel_layouts->all_layouts) {
av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
" the link between filters %s and %s.\n", link->src->name,
link->dst->name);
+ if (!link->in_channel_layouts->all_counts)
+ av_log(link->src, AV_LOG_ERROR, "Unknown channel layouts not "
+ "supported, try specifying a channel layout using "
+ "'aformat=channel_layouts=something'.\n");
return AVERROR(EINVAL);
}
link->in_channel_layouts->nb_channel_layouts = 1;
@@ -450,6 +699,7 @@ do { \
\
if (!out_link->in_ ## list->nb) { \
add_format(&out_link->in_ ##list, fmt); \
+ ret = 1; \
break; \
} \
\
@@ -469,9 +719,9 @@ static int reduce_formats_on_filter(AVFilterContext *filter)
int i, j, k, ret = 0;
REDUCE_FORMATS(int, AVFilterFormats, formats, formats,
- format_count, ff_add_format);
+ nb_formats, ff_add_format);
REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats,
- format_count, ff_add_format);
+ nb_formats, ff_add_format);
/* reduce channel layouts */
for (i = 0; i < filter->nb_inputs; i++) {
@@ -491,7 +741,8 @@ static int reduce_formats_on_filter(AVFilterContext *filter)
if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1)
continue;
- if (fmts->all_layouts) {
+ if (fmts->all_layouts &&
+ (!FF_LAYOUT2COUNT(fmt) || fmts->all_counts)) {
/* Turn the infinite list into a singleton */
fmts->all_layouts = fmts->all_counts = 0;
ff_add_channel_layout(&outlink->in_channel_layouts, fmt);
@@ -534,7 +785,7 @@ static void swap_samplerates_on_filter(AVFilterContext *filter)
link = filter->inputs[i];
if (link->type == AVMEDIA_TYPE_AUDIO &&
- link->out_samplerates->format_count == 1)
+ link->out_samplerates->nb_formats== 1)
break;
}
if (i == filter->nb_inputs)
@@ -547,10 +798,10 @@ static void swap_samplerates_on_filter(AVFilterContext *filter)
int best_idx, best_diff = INT_MAX;
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
- outlink->in_samplerates->format_count < 2)
+ outlink->in_samplerates->nb_formats < 2)
continue;
- for (j = 0; j < outlink->in_samplerates->format_count; j++) {
+ for (j = 0; j < outlink->in_samplerates->nb_formats; j++) {
int diff = abs(sample_rate - outlink->in_samplerates->formats[j]);
if (diff < best_diff) {
@@ -712,7 +963,7 @@ static void swap_sample_fmts_on_filter(AVFilterContext *filter)
link = filter->inputs[i];
if (link->type == AVMEDIA_TYPE_AUDIO &&
- link->out_formats->format_count == 1)
+ link->out_formats->nb_formats == 1)
break;
}
if (i == filter->nb_inputs)
@@ -726,10 +977,10 @@ static void swap_sample_fmts_on_filter(AVFilterContext *filter)
int best_idx = -1, best_score = INT_MIN;
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
- outlink->in_formats->format_count < 2)
+ outlink->in_formats->nb_formats < 2)
continue;
- for (j = 0; j < outlink->in_formats->format_count; j++) {
+ for (j = 0; j < outlink->in_formats->nb_formats; j++) {
int out_format = outlink->in_formats->formats[j];
int out_bps = av_get_bytes_per_sample(out_format);
int score;
@@ -782,7 +1033,7 @@ static int pick_formats(AVFilterGraph *graph)
AVFilterContext *filter = graph->filters[i];
if (filter->nb_inputs){
for (j = 0; j < filter->nb_inputs; j++){
- if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) {
+ if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->nb_formats == 1) {
if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
return ret;
change = 1;
@@ -791,7 +1042,7 @@ static int pick_formats(AVFilterGraph *graph)
}
if (filter->nb_outputs){
for (j = 0; j < filter->nb_outputs; j++){
- if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) {
+ if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->nb_formats == 1) {
if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
return ret;
change = 1;
@@ -831,7 +1082,9 @@ static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
int ret;
/* find supported formats from sub-filters, and merge along links */
- if ((ret = query_formats(graph, log_ctx)) < 0)
+ while ((ret = query_formats(graph, log_ctx)) == AVERROR(EAGAIN))
+ av_log(graph, AV_LOG_DEBUG, "query_formats not finished\n");
+ if (ret < 0)
return ret;
/* Once everything is merged, it's possible that we'll still have
@@ -953,24 +1206,24 @@ int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const
{
int i, r = AVERROR(ENOSYS);
- if(!graph)
+ if (!graph)
return r;
- if((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) {
- r=avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST);
- if(r != AVERROR(ENOSYS))
+ if ((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) {
+ r = avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST);
+ if (r != AVERROR(ENOSYS))
return r;
}
- if(res_len && res)
- res[0]= 0;
+ if (res_len && res)
+ res[0] = 0;
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
- if(!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)){
+ if (!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)) {
r = avfilter_process_command(filter, cmd, arg, res, res_len, flags);
- if(r != AVERROR(ENOSYS)) {
- if((flags & AVFILTER_CMD_FLAG_ONE) || r<0)
+ if (r != AVERROR(ENOSYS)) {
+ if ((flags & AVFILTER_CMD_FLAG_ONE) || r < 0)
return r;
}
}
diff --git a/ffmpeg/libavfilter/avfiltergraph.h b/ffmpeg/libavfilter/avfiltergraph.h
index 61110f9..b31d581 100644
--- a/ffmpeg/libavfilter/avfiltergraph.h
+++ b/ffmpeg/libavfilter/avfiltergraph.h
@@ -25,256 +25,4 @@
#include "avfilter.h"
#include "libavutil/log.h"
-typedef struct AVFilterGraph {
- const AVClass *av_class;
-#if FF_API_FOO_COUNT
- attribute_deprecated
- unsigned filter_count_unused;
-#endif
- AVFilterContext **filters;
-#if !FF_API_FOO_COUNT
- unsigned nb_filters;
-#endif
-
- char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters
- char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
-#if FF_API_FOO_COUNT
- unsigned nb_filters;
-#endif
- char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
-
- /**
- * Private fields
- *
- * The following fields are for internal use only.
- * Their type, offset, number and semantic can change without notice.
- */
-
- AVFilterLink **sink_links;
- int sink_links_count;
-
- unsigned disable_auto_convert;
-} AVFilterGraph;
-
-/**
- * Allocate a filter graph.
- */
-AVFilterGraph *avfilter_graph_alloc(void);
-
-/**
- * Get a filter instance with name name from graph.
- *
- * @return the pointer to the found filter instance or NULL if it
- * cannot be found.
- */
-AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);
-
-/**
- * Add an existing filter instance to a filter graph.
- *
- * @param graphctx the filter graph
- * @param filter the filter to be added
- */
-int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);
-
-/**
- * Create and add a filter instance into an existing graph.
- * The filter instance is created from the filter filt and inited
- * with the parameters args and opaque.
- *
- * In case of success put in *filt_ctx the pointer to the created
- * filter instance, otherwise set *filt_ctx to NULL.
- *
- * @param name the instance name to give to the created filter instance
- * @param graph_ctx the filter graph
- * @return a negative AVERROR error code in case of failure, a non
- * negative value otherwise
- */
-int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt,
- const char *name, const char *args, void *opaque,
- AVFilterGraph *graph_ctx);
-
-/**
- * Enable or disable automatic format conversion inside the graph.
- *
- * Note that format conversion can still happen inside explicitly inserted
- * scale and aconvert filters.
- *
- * @param flags any of the AVFILTER_AUTO_CONVERT_* constants
- */
-void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
-
-enum {
- AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
- AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
-};
-
-/**
- * Check validity and configure all the links and formats in the graph.
- *
- * @param graphctx the filter graph
- * @param log_ctx context used for logging
- * @return 0 in case of success, a negative AVERROR code otherwise
- */
-int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
-
-/**
- * Free a graph, destroy its links, and set *graph to NULL.
- * If *graph is NULL, do nothing.
- */
-void avfilter_graph_free(AVFilterGraph **graph);
-
-/**
- * A linked-list of the inputs/outputs of the filter chain.
- *
- * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
- * where it is used to communicate open (unlinked) inputs and outputs from and
- * to the caller.
- * This struct specifies, per each not connected pad contained in the graph, the
- * filter context and the pad index required for establishing a link.
- */
-typedef struct AVFilterInOut {
- /** unique name for this input/output in the list */
- char *name;
-
- /** filter context associated to this input/output */
- AVFilterContext *filter_ctx;
-
- /** index of the filt_ctx pad to use for linking */
- int pad_idx;
-
- /** next input/input in the list, NULL if this is the last */
- struct AVFilterInOut *next;
-} AVFilterInOut;
-
-/**
- * Allocate a single AVFilterInOut entry.
- * Must be freed with avfilter_inout_free().
- * @return allocated AVFilterInOut on success, NULL on failure.
- */
-AVFilterInOut *avfilter_inout_alloc(void);
-
-/**
- * Free the supplied list of AVFilterInOut and set *inout to NULL.
- * If *inout is NULL, do nothing.
- */
-void avfilter_inout_free(AVFilterInOut **inout);
-
-/**
- * Add a graph described by a string to a graph.
- *
- * @param graph the filter graph where to link the parsed graph context
- * @param filters string to be parsed
- * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
- * If non-NULL, *inputs is updated to contain the list of open inputs
- * after the parsing, should be freed with avfilter_inout_free().
- * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
- * If non-NULL, *outputs is updated to contain the list of open outputs
- * after the parsing, should be freed with avfilter_inout_free().
- * @return non negative on success, a negative AVERROR code on error
- */
-int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
- AVFilterInOut **inputs, AVFilterInOut **outputs,
- void *log_ctx);
-
-/**
- * Add a graph described by a string to a graph.
- *
- * @param[in] graph the filter graph where to link the parsed graph context
- * @param[in] filters string to be parsed
- * @param[out] inputs a linked list of all free (unlinked) inputs of the
- * parsed graph will be returned here. It is to be freed
- * by the caller using avfilter_inout_free().
- * @param[out] outputs a linked list of all free (unlinked) outputs of the
- * parsed graph will be returned here. It is to be freed by the
- * caller using avfilter_inout_free().
- * @return zero on success, a negative AVERROR code on error
- *
- * @note the difference between avfilter_graph_parse2() and
- * avfilter_graph_parse() is that in avfilter_graph_parse(), the caller provides
- * the lists of inputs and outputs, which therefore must be known before calling
- * the function. On the other hand, avfilter_graph_parse2() \em returns the
- * inputs and outputs that are left unlinked after parsing the graph and the
- * caller then deals with them. Another difference is that in
- * avfilter_graph_parse(), the inputs parameter describes inputs of the
- * <em>already existing</em> part of the graph; i.e. from the point of view of
- * the newly created part, they are outputs. Similarly the outputs parameter
- * describes outputs of the already existing filters, which are provided as
- * inputs to the parsed filters.
- * avfilter_graph_parse2() takes the opposite approach -- it makes no reference
- * whatsoever to already existing parts of the graph and the inputs parameter
- * will on return contain inputs of the newly parsed part of the graph.
- * Analogously the outputs parameter will contain outputs of the newly created
- * filters.
- */
-int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
- AVFilterInOut **inputs,
- AVFilterInOut **outputs);
-
-
-/**
- * Send a command to one or more filter instances.
- *
- * @param graph the filter graph
- * @param target the filter(s) to which the command should be sent
- * "all" sends to all filters
- * otherwise it can be a filter or filter instance name
- * which will send the command to all matching filters.
- * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only
- * @param arg the argument for the command
- * @param res a buffer with size res_size where the filter(s) can return a response.
- *
- * @returns >=0 on success otherwise an error code.
- * AVERROR(ENOSYS) on unsupported commands
- */
-int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
-
-/**
- * Queue a command for one or more filter instances.
- *
- * @param graph the filter graph
- * @param target the filter(s) to which the command should be sent
- * "all" sends to all filters
- * otherwise it can be a filter or filter instance name
- * which will send the command to all matching filters.
- * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only
- * @param arg the argument for the command
- * @param ts time at which the command should be sent to the filter
- *
- * @note As this executes commands after this function returns, no return code
- * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
- */
-int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
-
-
-/**
- * Dump a graph into a human-readable string representation.
- *
- * @param graph the graph to dump
- * @param options formatting options; currently ignored
- * @return a string, or NULL in case of memory allocation failure;
- * the string must be freed using av_free
- */
-char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
-
-/**
- * Request a frame on the oldest sink link.
- *
- * If the request returns AVERROR_EOF, try the next.
- *
- * Note that this function is not meant to be the sole scheduling mechanism
- * of a filtergraph, only a convenience function to help drain a filtergraph
- * in a balanced way under normal circumstances.
- *
- * Also note that AVERROR_EOF does not mean that frames did not arrive on
- * some of the sinks during the process.
- * When there are multiple sink links, in case the requested link
- * returns an EOF, this may cause a filter to flush pending frames
- * which are sent to another sink link, although unrequested.
- *
- * @return the return value of ff_request_frame(),
- * or AVERROR_EOF if all links returned AVERROR_EOF
- */
-int avfilter_graph_request_oldest(AVFilterGraph *graph);
-
#endif /* AVFILTER_AVFILTERGRAPH_H */
diff --git a/ffmpeg/libavfilter/buffer.c b/ffmpeg/libavfilter/buffer.c
index 29fedc4..a626184 100644
--- a/ffmpeg/libavfilter/buffer.c
+++ b/ffmpeg/libavfilter/buffer.c
@@ -24,13 +24,16 @@
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
#include "libavcodec/avcodec.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#include "avcodec.h"
+#include "version.h"
+#if FF_API_AVFILTERBUFFER
void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
{
if (ptr->extended_data != ptr->data)
@@ -111,7 +114,9 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref)
void avfilter_unref_bufferp(AVFilterBufferRef **ref)
{
+FF_DISABLE_DEPRECATION_WARNINGS
avfilter_unref_buffer(*ref);
+FF_ENABLE_DEPRECATION_WARNINGS
*ref = NULL;
}
@@ -165,3 +170,4 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s
av_dict_free(&dst->metadata);
av_dict_copy(&dst->metadata, src->metadata, 0);
}
+#endif /* FF_API_AVFILTERBUFFER */
diff --git a/ffmpeg/libavfilter/buffersink.c b/ffmpeg/libavfilter/buffersink.c
index bcb6525..a6b24ad 100644
--- a/ffmpeg/libavfilter/buffersink.c
+++ b/ffmpeg/libavfilter/buffersink.c
@@ -27,7 +27,9 @@
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
@@ -35,23 +37,32 @@
#include "internal.h"
typedef struct {
+ const AVClass *class;
AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
unsigned warning_limit;
/* only used for video */
enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
+ int pixel_fmts_size;
/* only used for audio */
enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
+ int sample_fmts_size;
int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
+ int channel_layouts_size;
+ int *channel_counts; ///< list of accepted channel counts, terminated by -1
+ int channel_counts_size;
int all_channel_counts;
int *sample_rates; ///< list of accepted sample rates, terminated by -1
+ int sample_rates_size;
/* only used for compat API */
- AVAudioFifo *audio_fifo; ///< FIFO for audio samples
+ AVAudioFifo *audio_fifo; ///< FIFO for audio samples
int64_t next_pts; ///< interpolating audio pts
} BufferSinkContext;
+#define NB_ITEMS(list) (list ## _size / sizeof(*list))
+
static av_cold void uninit(AVFilterContext *ctx)
{
BufferSinkContext *sink = ctx->priv;
@@ -63,15 +74,11 @@ static av_cold void uninit(AVFilterContext *ctx)
if (sink->fifo) {
while (av_fifo_size(sink->fifo) >= sizeof(AVFilterBufferRef *)) {
av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL);
- av_frame_unref(frame);
+ av_frame_free(&frame);
}
av_fifo_free(sink->fifo);
sink->fifo = NULL;
}
- av_freep(&sink->pixel_fmts);
- av_freep(&sink->sample_fmts);
- av_freep(&sink->sample_rates);
- av_freep(&sink->channel_layouts);
}
static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref)
@@ -112,7 +119,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
return 0;
}
-int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
+int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
return av_buffersink_get_frame_flags(ctx, frame, 0);
}
@@ -160,17 +167,18 @@ static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
tmp->pts = s->next_pts;
- s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
- link->time_base);
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
+ link->time_base);
av_frame_move_ref(frame, tmp);
av_frame_free(&tmp);
return 0;
-
}
-int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
+int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
+ AVFrame *frame, int nb_samples)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
@@ -211,7 +219,6 @@ int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, AVFrame
}
return ret;
-
}
AVBufferSinkParams *av_buffersink_params_alloc(void)
@@ -246,6 +253,7 @@ static av_cold int common_init(AVFilterContext *ctx)
return AVERROR(ENOMEM);
}
buf->warning_limit = 100;
+ buf->next_pts = AV_NOPTS_VALUE;
return 0;
}
@@ -258,6 +266,7 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
}
#if FF_API_AVFILTERBUFFER
+FF_DISABLE_DEPRECATION_WARNINGS
static void compat_free_buffer(AVFilterBuffer *buf)
{
AVFrame *frame = buf->priv;
@@ -265,7 +274,8 @@ static void compat_free_buffer(AVFilterBuffer *buf)
av_free(buf);
}
-static int attribute_align_arg compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags)
+static int compat_read(AVFilterContext *ctx,
+ AVFilterBufferRef **pbuf, int nb_samples, int flags)
{
AVFilterBufferRef *buf;
AVFrame *frame;
@@ -286,6 +296,7 @@ static int attribute_align_arg compat_read(AVFilterContext *ctx, AVFilterBufferR
if (ret < 0)
goto fail;
+ AV_NOWARN_DEPRECATED(
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
AV_PERM_READ,
@@ -304,6 +315,7 @@ static int attribute_align_arg compat_read(AVFilterContext *ctx, AVFilterBufferR
}
avfilter_copy_frame_props(buf, frame);
+ )
buf->buf->priv = frame;
buf->buf->free = compat_free_buffer;
@@ -316,19 +328,19 @@ fail:
return ret;
}
-int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
+int attribute_align_arg av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
return compat_read(ctx, buf, 0, 0);
}
-int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
- int nb_samples)
+int attribute_align_arg av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
+ int nb_samples)
{
return compat_read(ctx, buf, nb_samples, 0);
}
-int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
- AVFilterBufferRef **bufref, int flags)
+int attribute_align_arg av_buffersink_get_buffer_ref(AVFilterContext *ctx,
+ AVFilterBufferRef **bufref, int flags)
{
*bufref = NULL;
@@ -339,6 +351,7 @@ int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
return compat_read(ctx, bufref, 0, flags);
}
+FF_ENABLE_DEPRECATION_WARNINGS
#endif
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
@@ -362,83 +375,63 @@ int attribute_align_arg av_buffersink_poll_frame(AVFilterContext *ctx)
return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
}
-static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
+static av_cold int vsink_init(AVFilterContext *ctx, void *opaque)
{
BufferSinkContext *buf = ctx->priv;
AVBufferSinkParams *params = opaque;
+ int ret;
- if (params && params->pixel_fmts) {
- const int *pixel_fmts = params->pixel_fmts;
-
- buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
- if (!buf->pixel_fmts)
- return AVERROR(ENOMEM);
+ if (params) {
+ if ((ret = av_opt_set_int_list(buf, "pix_fmts", params->pixel_fmts, AV_PIX_FMT_NONE, 0)) < 0)
+ return ret;
}
return common_init(ctx);
}
+#define CHECK_LIST_SIZE(field) \
+ if (buf->field ## _size % sizeof(*buf->field)) { \
+ av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \
+ "should be multiple of %d\n", \
+ buf->field ## _size, (int)sizeof(*buf->field)); \
+ return AVERROR(EINVAL); \
+ }
static int vsink_query_formats(AVFilterContext *ctx)
{
BufferSinkContext *buf = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ unsigned i;
+ int ret;
- if (buf->pixel_fmts)
- ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts));
- else
+ CHECK_LIST_SIZE(pixel_fmts)
+ if (buf->pixel_fmts_size) {
+ for (i = 0; i < NB_ITEMS(buf->pixel_fmts); i++)
+ if ((ret = ff_add_format(&formats, buf->pixel_fmts[i])) < 0) {
+ ff_formats_unref(&formats);
+ return ret;
+ }
+ ff_set_common_formats(ctx, formats);
+ } else {
ff_default_query_formats(ctx);
+ }
return 0;
}
-static int64_t *concat_channels_lists(const int64_t *layouts, const int *counts)
-{
- int nb_layouts = 0, nb_counts = 0, i;
- int64_t *list;
-
- if (layouts)
- for (; layouts[nb_layouts] != -1; nb_layouts++);
- if (counts)
- for (; counts[nb_counts] != -1; nb_counts++);
- if (nb_counts > INT_MAX - 1 - nb_layouts)
- return NULL;
- if (!(list = av_calloc(nb_layouts + nb_counts + 1, sizeof(*list))))
- return NULL;
- for (i = 0; i < nb_layouts; i++)
- list[i] = layouts[i];
- for (i = 0; i < nb_counts; i++)
- list[nb_layouts + i] = FF_COUNT2LAYOUT(counts[i]);
- list[nb_layouts + nb_counts] = -1;
- return list;
-}
-
-static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
+static av_cold int asink_init(AVFilterContext *ctx, void *opaque)
{
BufferSinkContext *buf = ctx->priv;
AVABufferSinkParams *params = opaque;
+ int ret;
- if (params && params->sample_fmts) {
- buf->sample_fmts = ff_copy_int_list(params->sample_fmts);
- if (!buf->sample_fmts)
- return AVERROR(ENOMEM);
- }
- if (params && params->sample_rates) {
- buf->sample_rates = ff_copy_int_list(params->sample_rates);
- if (!buf->sample_rates)
- return AVERROR(ENOMEM);
- }
- if (params && (params->channel_layouts || params->channel_counts)) {
- if (params->all_channel_counts) {
- av_log(ctx, AV_LOG_ERROR,
- "Conflicting all_channel_counts and list in parameters\n");
- return AVERROR(EINVAL);
- }
- buf->channel_layouts = concat_channels_lists(params->channel_layouts,
- params->channel_counts);
- if (!buf->channel_layouts)
- return AVERROR(ENOMEM);
+ if (params) {
+ if ((ret = av_opt_set_int_list(buf, "sample_fmts", params->sample_fmts, AV_SAMPLE_FMT_NONE, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "sample_rates", params->sample_rates, -1, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "channel_layouts", params->channel_layouts, -1, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "channel_counts", params->channel_counts, -1, 0)) < 0 ||
+ (ret = av_opt_set_int(buf, "all_channel_counts", params->all_channel_counts, 0)) < 0)
+ return ret;
}
- if (params)
- buf->all_channel_counts = params->all_channel_counts;
return common_init(ctx);
}
@@ -447,32 +440,86 @@ static int asink_query_formats(AVFilterContext *ctx)
BufferSinkContext *buf = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
+ unsigned i;
+ int ret;
- if (buf->sample_fmts) {
- if (!(formats = ff_make_format_list(buf->sample_fmts)))
- return AVERROR(ENOMEM);
+ CHECK_LIST_SIZE(sample_fmts)
+ CHECK_LIST_SIZE(sample_rates)
+ CHECK_LIST_SIZE(channel_layouts)
+ CHECK_LIST_SIZE(channel_counts)
+
+ if (buf->sample_fmts_size) {
+ for (i = 0; i < NB_ITEMS(buf->sample_fmts); i++)
+ if ((ret = ff_add_format(&formats, buf->sample_fmts[i])) < 0) {
+ ff_formats_unref(&formats);
+ return ret;
+ }
ff_set_common_formats(ctx, formats);
}
- if (buf->channel_layouts || buf->all_channel_counts) {
- layouts = buf->all_channel_counts ? ff_all_channel_counts() :
- avfilter_make_format64_list(buf->channel_layouts);
- if (!layouts)
- return AVERROR(ENOMEM);
+ if (buf->channel_layouts_size || buf->channel_counts_size ||
+ buf->all_channel_counts) {
+ for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++)
+ if ((ret = ff_add_channel_layout(&layouts, buf->channel_layouts[i])) < 0) {
+ ff_channel_layouts_unref(&layouts);
+ return ret;
+ }
+ for (i = 0; i < NB_ITEMS(buf->channel_counts); i++)
+ if ((ret = ff_add_channel_layout(&layouts, FF_COUNT2LAYOUT(buf->channel_counts[i]))) < 0) {
+ ff_channel_layouts_unref(&layouts);
+ return ret;
+ }
+ if (buf->all_channel_counts) {
+ if (layouts)
+ av_log(ctx, AV_LOG_WARNING,
+ "Conflicting all_channel_counts and list in options\n");
+ else if (!(layouts = ff_all_channel_counts()))
+ return AVERROR(ENOMEM);
+ }
ff_set_common_channel_layouts(ctx, layouts);
}
- if (buf->sample_rates) {
- formats = ff_make_format_list(buf->sample_rates);
- if (!formats)
- return AVERROR(ENOMEM);
+ if (buf->sample_rates_size) {
+ formats = NULL;
+ for (i = 0; i < NB_ITEMS(buf->sample_rates); i++)
+ if ((ret = ff_add_format(&formats, buf->sample_rates[i])) < 0) {
+ ff_formats_unref(&formats);
+ return ret;
+ }
ff_set_common_samplerates(ctx, formats);
}
return 0;
}
+#define OFFSET(x) offsetof(BufferSinkContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption buffersink_options[] = {
+ { "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { NULL },
+};
+#undef FLAGS
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption abuffersink_options[] = {
+ { "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "channel_layouts", "set the supported channel layouts", OFFSET(channel_layouts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "channel_counts", "set the supported channel counts", OFFSET(channel_counts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL },
+};
+#undef FLAGS
+
+AVFILTER_DEFINE_CLASS(buffersink);
+AVFILTER_DEFINE_CLASS(abuffersink);
+
#if FF_API_AVFILTERBUFFER
+
+#define ffbuffersink_options buffersink_options
+#define ffabuffersink_options abuffersink_options
+AVFILTER_DEFINE_CLASS(ffbuffersink);
+AVFILTER_DEFINE_CLASS(ffabuffersink);
+
static const AVFilterPad ffbuffersink_inputs[] = {
{
.name = "default",
@@ -482,10 +529,11 @@ static const AVFilterPad ffbuffersink_inputs[] = {
{ NULL },
};
-AVFilter avfilter_vsink_ffbuffersink = {
+AVFilter ff_vsink_ffbuffersink = {
.name = "ffbuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
.priv_size = sizeof(BufferSinkContext),
+ .priv_class = &ffbuffersink_class,
.init_opaque = vsink_init,
.uninit = uninit,
@@ -503,12 +551,13 @@ static const AVFilterPad ffabuffersink_inputs[] = {
{ NULL },
};
-AVFilter avfilter_asink_ffabuffersink = {
+AVFilter ff_asink_ffabuffersink = {
.name = "ffabuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
.init_opaque = asink_init,
.uninit = uninit,
.priv_size = sizeof(BufferSinkContext),
+ .priv_class = &ffabuffersink_class,
.query_formats = asink_query_formats,
.inputs = ffabuffersink_inputs,
.outputs = NULL,
@@ -517,42 +566,44 @@ AVFilter avfilter_asink_ffabuffersink = {
static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_vsink_buffer = {
- .name = "buffersink",
+AVFilter ff_vsink_buffer = {
+ .name = "buffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
- .priv_size = sizeof(BufferSinkContext),
+ .priv_size = sizeof(BufferSinkContext),
+ .priv_class = &buffersink_class,
.init_opaque = vsink_init,
- .uninit = uninit,
+ .uninit = uninit,
.query_formats = vsink_query_formats,
- .inputs = avfilter_vsink_buffer_inputs,
- .outputs = NULL,
+ .inputs = avfilter_vsink_buffer_inputs,
+ .outputs = NULL,
};
static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_asink_abuffer = {
- .name = "abuffersink",
+AVFilter ff_asink_abuffer = {
+ .name = "abuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
- .priv_size = sizeof(BufferSinkContext),
+ .priv_class = &abuffersink_class,
+ .priv_size = sizeof(BufferSinkContext),
.init_opaque = asink_init,
- .uninit = uninit,
+ .uninit = uninit,
.query_formats = asink_query_formats,
- .inputs = avfilter_asink_abuffer_inputs,
- .outputs = NULL,
+ .inputs = avfilter_asink_abuffer_inputs,
+ .outputs = NULL,
};
diff --git a/ffmpeg/libavfilter/buffersrc.c b/ffmpeg/libavfilter/buffersrc.c
index cc650ff..fb42c8e 100644
--- a/ffmpeg/libavfilter/buffersrc.c
+++ b/ffmpeg/libavfilter/buffersrc.c
@@ -23,11 +23,14 @@
* memory buffer source filter
*/
+#include <float.h>
+
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
@@ -55,7 +58,6 @@ typedef struct {
/* audio only */
int sample_rate;
enum AVSampleFormat sample_fmt;
- char *sample_fmt_str;
int channels;
uint64_t channel_layout;
char *channel_layout_str;
@@ -75,13 +77,13 @@ typedef struct {
return AVERROR(EINVAL);\
}
-int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
+int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
{
return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame,
AV_BUFFERSRC_FLAG_KEEP_REF);
}
-int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
+int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
{
return av_buffersrc_add_frame_flags(ctx, frame, 0);
}
@@ -89,7 +91,7 @@ int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
AVFrame *frame, int flags);
-int av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
+int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
AVFrame *copy = NULL;
int ret = 0;
@@ -113,13 +115,15 @@ int av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags
return ret;
}
-static int attribute_align_arg av_buffersrc_add_frame_internal(AVFilterContext *ctx,
- AVFrame *frame, int flags)
+static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
+ AVFrame *frame, int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int ret;
+ s->nb_failed_requests = 0;
+
if (!frame) {
s->eof = 1;
return 0;
@@ -169,6 +173,7 @@ static int attribute_align_arg av_buffersrc_add_frame_internal(AVFilterContext *
}
#if FF_API_AVFILTERBUFFER
+FF_DISABLE_DEPRECATION_WARNINGS
static void compat_free_buffer(void *opaque, uint8_t *data)
{
AVFilterBufferRef *buf = opaque;
@@ -279,6 +284,7 @@ fail:
return ret;
}
+FF_ENABLE_DEPRECATION_WARNINGS
int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
{
@@ -286,59 +292,15 @@ int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
}
#endif
-#define OFFSET(x) offsetof(BufferSourceContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption buffer_options[] = {
- { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
- { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
- { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = FLAGS },
- { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = FLAGS },
- { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
- { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { NULL },
-};
-#undef FLAGS
-
-AVFILTER_DEFINE_CLASS(buffer);
-
-static av_cold int init_video(AVFilterContext *ctx, const char *args)
+static av_cold int init_video(AVFilterContext *ctx)
{
BufferSourceContext *c = ctx->priv;
- char pix_fmt_str[128], *colon, *equal;
- int ret, n = 0;
-
- c->class = &buffer_class;
- if (!args) {
- av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
+ if (c->pix_fmt == AV_PIX_FMT_NONE || !c->w || !c->h || av_q2d(c->time_base) <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
return AVERROR(EINVAL);
}
- colon = strchr(args, ':');
- equal = strchr(args, '=');
- if (equal && (!colon || equal < colon)) {
- av_opt_set_defaults(c);
- ret = av_set_options_string(c, args, "=", ":");
- if (ret < 0)
- goto fail;
- } else {
- if (!args ||
- (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
- &c->time_base.num, &c->time_base.den,
- &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
- av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args);
- return AVERROR(EINVAL);
- }
- if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == AV_PIX_FMT_NONE) {
- char *tail;
- c->pix_fmt = strtol(pix_fmt_str, &tail, 10);
- if (*tail || c->pix_fmt < 0 || c->pix_fmt >= AV_PIX_FMT_NB) {
- av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str);
- return AVERROR(EINVAL);
- }
- }
- }
-
if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
return AVERROR(ENOMEM);
@@ -348,10 +310,6 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args)
c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
c->warning_limit = 100;
return 0;
-
-fail:
- av_opt_free(c);
- return ret;
}
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
@@ -359,35 +317,52 @@ unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
}
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+#define OFFSET(x) offsetof(BufferSourceContext, x)
+#define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption buffer_options[] = {
+ { "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
+ { "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, .min = AV_PIX_FMT_NONE, .max = INT_MAX, .flags = V },
+#if FF_API_OLD_FILTER_OPTS
+ /* those 4 are for compatibility with the old option passing system where each filter
+ * did its own parsing */
+ { "time_base_num", "deprecated, do not use", OFFSET(time_base.num), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "time_base_den", "deprecated, do not use", OFFSET(time_base.den), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "sar_num", "deprecated, do not use", OFFSET(pixel_aspect.num), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "sar_den", "deprecated, do not use", OFFSET(pixel_aspect.den), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+#endif
+ { "sar", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
+ { "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
+ { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+ { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+ { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(buffer);
+
static const AVOption abuffer_options[] = {
- { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
- { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A },
+ { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
+ { "sample_fmt", NULL, OFFSET(sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, { .i64 = AV_SAMPLE_FMT_NONE }, .min = AV_SAMPLE_FMT_NONE, .max = INT_MAX, .flags = A },
+ { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
+ { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
{ NULL },
};
AVFILTER_DEFINE_CLASS(abuffer);
-static av_cold int init_audio(AVFilterContext *ctx, const char *args)
+static av_cold int init_audio(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
int ret = 0;
- s->class = &abuffer_class;
- av_opt_set_defaults(s);
-
- if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
- goto fail;
-
- s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
- av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n",
- s->sample_fmt_str);
- ret = AVERROR(EINVAL);
- goto fail;
+ av_log(ctx, AV_LOG_ERROR, "Sample format was not set or was invalid\n");
+ return AVERROR(EINVAL);
}
if (s->channel_layout_str) {
@@ -395,10 +370,9 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args)
/* TODO reindent */
s->channel_layout = av_get_channel_layout(s->channel_layout_str);
if (!s->channel_layout) {
- av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n",
+ av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
s->channel_layout_str);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
n = av_get_channel_layout_nb_channels(s->channel_layout);
if (s->channels) {
@@ -407,34 +381,28 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args)
"Mismatching channel count %d and layout '%s' "
"(%d channels)\n",
s->channels, s->channel_layout_str, n);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
}
s->channels = n;
} else if (!s->channels) {
av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor "
"channel layout specified\n");
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
- if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*)))) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
+ if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*))))
+ return AVERROR(ENOMEM);
if (!s->time_base.num)
s->time_base = (AVRational){1, s->sample_rate};
av_log(ctx, AV_LOG_VERBOSE,
"tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
- s->time_base.num, s->time_base.den, s->sample_fmt_str,
+ s->time_base.num, s->time_base.den, av_get_sample_fmt_name(s->sample_fmt),
s->sample_rate, s->channel_layout_str);
s->warning_limit = 100;
-fail:
- av_opt_free(s);
return ret;
}
@@ -448,7 +416,6 @@ static av_cold void uninit(AVFilterContext *ctx)
}
av_fifo_free(s->fifo);
s->fifo = NULL;
- av_freep(&s->sws_param);
}
static int query_formats(AVFilterContext *ctx)
@@ -541,7 +508,7 @@ static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vsrc_buffer = {
+AVFilter ff_vsrc_buffer = {
.name = "buffer",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
@@ -566,7 +533,7 @@ static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
{ NULL }
};
-AVFilter avfilter_asrc_abuffer = {
+AVFilter ff_asrc_abuffer = {
.name = "abuffer",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
diff --git a/ffmpeg/libavfilter/buffersrc.h b/ffmpeg/libavfilter/buffersrc.h
index 66361b3..89613e1 100644
--- a/ffmpeg/libavfilter/buffersrc.h
+++ b/ffmpeg/libavfilter/buffersrc.h
@@ -1,19 +1,19 @@
/*
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/ffmpeg/libavfilter/drawutils.c b/ffmpeg/libavfilter/drawutils.c
index aebc000..a5064f8 100644
--- a/ffmpeg/libavfilter/drawutils.c
+++ b/ffmpeg/libavfilter/drawutils.c
@@ -37,12 +37,22 @@ int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
case AV_PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break;
case AV_PIX_FMT_0BGR:
case AV_PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break;
+ case AV_PIX_FMT_RGB48LE:
+ case AV_PIX_FMT_RGB48BE:
+ case AV_PIX_FMT_RGBA64BE:
+ case AV_PIX_FMT_RGBA64LE:
case AV_PIX_FMT_RGB0:
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break;
+ case AV_PIX_FMT_BGR48LE:
+ case AV_PIX_FMT_BGR48BE:
+ case AV_PIX_FMT_BGRA64BE:
+ case AV_PIX_FMT_BGRA64LE:
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_BGR0:
case AV_PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRP: rgba_map[GREEN] = 0; rgba_map[BLUE ] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
default: /* unsupported */
return AVERROR(EINVAL);
}
@@ -83,7 +93,7 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t
int hsub1 = (plane == 1 || plane == 2) ? hsub : 0;
pixel_step[plane] = 1;
- line_size = (w >> hsub1) * pixel_step[plane];
+ line_size = FF_CEIL_RSHIFT(w, hsub1) * pixel_step[plane];
line[plane] = av_malloc(line_size);
memset(line[plane], dst_color[plane], line_size);
}
@@ -102,11 +112,13 @@ void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4],
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
+ int width = FF_CEIL_RSHIFT(w, hsub1);
+ int height = FF_CEIL_RSHIFT(h, vsub1);
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
- for (i = 0; i < (h >> vsub1); i++) {
+ for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
- src[plane], (w >> hsub1) * pixelstep[plane]);
+ src[plane], width * pixelstep[plane]);
p += dst_linesize[plane];
}
}
@@ -122,11 +134,13 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
+ int width = FF_CEIL_RSHIFT(w, hsub1);
+ int height = FF_CEIL_RSHIFT(h, vsub1);
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
- for (i = 0; i < (h >> vsub1); i++) {
+ for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
- src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), (w >> hsub1) * pixelstep[plane]);
+ src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), width * pixelstep[plane]);
p += dst_linesize[plane];
}
}
@@ -141,7 +155,7 @@ int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
if (!desc->name)
return AVERROR(EINVAL);
- if (desc->flags & ~(PIX_FMT_PLANAR | PIX_FMT_RGB | PIX_FMT_PSEUDOPAL | PIX_FMT_ALPHA))
+ if (desc->flags & ~(AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_PSEUDOPAL | AV_PIX_FMT_FLAG_ALPHA))
return AVERROR(ENOSYS);
for (i = 0; i < desc->nb_components; i++) {
c = &desc->comp[i];
@@ -166,10 +180,8 @@ int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
draw->format = format;
draw->nb_planes = nb_planes;
memcpy(draw->pixelstep, pixelstep, sizeof(draw->pixelstep));
- if (nb_planes >= 3 && !(desc->flags & PIX_FMT_RGB)) {
- draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w;
- draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h;
- }
+ draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w;
+ draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h;
for (i = 0; i < ((desc->nb_components - 1) | 1); i++)
draw->comp_mask[desc->comp[i].plane] |=
1 << (desc->comp[i].offset_plus1 - 1);
@@ -183,10 +195,15 @@ void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4
if (rgba != color->rgba)
memcpy(color->rgba, rgba, sizeof(color->rgba));
- if ((draw->desc->flags & PIX_FMT_RGB) && draw->nb_planes == 1 &&
+ if ((draw->desc->flags & AV_PIX_FMT_FLAG_RGB) &&
ff_fill_rgba_map(rgba_map, draw->format) >= 0) {
+ if (draw->nb_planes == 1) {
for (i = 0; i < 4; i++)
color->comp[0].u8[rgba_map[i]] = rgba[i];
+ } else {
+ for (i = 0; i < 4; i++)
+ color->comp[rgba_map[i]].u8[0] = rgba[i];
+ }
} else if (draw->nb_planes == 3 || draw->nb_planes == 4) {
/* assume YUV */
color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
@@ -223,8 +240,8 @@ void ff_copy_rectangle2(FFDrawContext *draw,
for (plane = 0; plane < draw->nb_planes; plane++) {
p = pointer_at(draw, src, src_linesize, plane, src_x, src_y);
q = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
- wp = (w >> draw->hsub[plane]) * draw->pixelstep[plane];
- hp = (h >> draw->vsub[plane]);
+ wp = FF_CEIL_RSHIFT(w, draw->hsub[plane]) * draw->pixelstep[plane];
+ hp = FF_CEIL_RSHIFT(h, draw->vsub[plane]);
for (y = 0; y < hp; y++) {
memcpy(q, p, wp);
p += src_linesize[plane];
@@ -242,8 +259,8 @@ void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
for (plane = 0; plane < draw->nb_planes; plane++) {
p0 = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
- wp = (w >> draw->hsub[plane]);
- hp = (h >> draw->vsub[plane]);
+ wp = FF_CEIL_RSHIFT(w, draw->hsub[plane]);
+ hp = FF_CEIL_RSHIFT(h, draw->vsub[plane]);
if (!hp)
return;
p = p0;
diff --git a/ffmpeg/libavfilter/f_ebur128.c b/ffmpeg/libavfilter/f_ebur128.c
index 8aaea73..a2c6160 100644
--- a/ffmpeg/libavfilter/f_ebur128.c
+++ b/ffmpeg/libavfilter/f_ebur128.c
@@ -64,7 +64,7 @@
#define HIST_SIZE ((ABS_UP_THRES - ABS_THRES) * HIST_GRAIN + 1)
/**
- * An histogram is an array of HIST_SIZE hist_entry storing all the energies
+ * A histogram is an array of HIST_SIZE hist_entry storing all the energies
* recorded (with an accuracy of 1/HIST_GRAIN) of the loudnesses from ABS_THRES
* (at 0) to ABS_UP_THRES (at HIST_SIZE-1).
* This fixed-size system avoids the need of a list of energies growing
@@ -128,7 +128,6 @@ typedef struct {
/* misc */
int loglevel; ///< log level for frame logging
int metadata; ///< whether or not to inject loudness results in frames
- int request_fulfilled; ///< 1 if some audio just got pushed, 0 otherwise. FIXME: remove me
} EBUR128Context;
#define OFFSET(x) offsetof(EBUR128Context, x)
@@ -143,7 +142,7 @@ static const AVOption ebur128_options[] = {
{ "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, A|V|F, "level" },
{ "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, INT_MIN, INT_MAX, A|V|F, "level" },
{ "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, A|V|F },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(ebur128);
@@ -317,6 +316,8 @@ static int config_video_output(AVFilterLink *outlink)
DRAW_RECT(ebur128->graph);
DRAW_RECT(ebur128->gauge);
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
return 0;
}
@@ -380,6 +381,8 @@ static int config_audio_output(AVFilterLink *outlink)
return AVERROR(ENOMEM);
}
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
return 0;
}
@@ -400,33 +403,11 @@ static struct hist_entry *get_histogram(void)
return h;
}
-/* This is currently necessary for the min/max samples to work properly.
- * FIXME: remove me when possible */
-static int audio_request_frame(AVFilterLink *outlink)
+static av_cold int init(AVFilterContext *ctx)
{
- int ret;
- AVFilterContext *ctx = outlink->src;
- EBUR128Context *ebur128 = ctx->priv;
-
- ebur128->request_fulfilled = 0;
- do {
- ret = ff_request_frame(ctx->inputs[0]);
- } while (!ebur128->request_fulfilled && ret >= 0);
- return ret;
-}
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- int ret;
EBUR128Context *ebur128 = ctx->priv;
AVFilterPad pad;
- ebur128->class = &ebur128_class;
- av_opt_set_defaults(ebur128);
-
- if ((ret = av_set_options_string(ebur128, args, "=", ":")) < 0)
- return ret;
-
if (ebur128->loglevel != AV_LOG_INFO &&
ebur128->loglevel != AV_LOG_VERBOSE) {
if (ebur128->do_video || ebur128->metadata)
@@ -463,8 +444,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_audio_output,
};
- if (ebur128->metadata)
- pad.request_frame = audio_request_frame;
if (!pad.name)
return AVERROR(ENOMEM);
ff_insert_outpad(ctx, ebur128->do_video, &pad);
@@ -717,7 +696,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
}
}
- ebur128->request_fulfilled = 1;
return ff_filter_frame(ctx->outputs[ebur128->do_video], insamples);
}
@@ -744,7 +722,7 @@ static int query_formats(AVFilterContext *ctx)
/* set input and output audio formats
* Note: ff_set_common_* functions are not used because they affect all the
- * links, and thus break the video format negociation */
+ * links, and thus break the video format negotiation */
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
@@ -795,21 +773,19 @@ static av_cold void uninit(AVFilterContext *ctx)
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
av_frame_free(&ebur128->outpicref);
- av_opt_free(ebur128);
}
static const AVFilterPad ebur128_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
- .config_props = config_audio_input,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_audio_input,
},
{ NULL }
};
-AVFilter avfilter_af_ebur128 = {
+AVFilter ff_af_ebur128 = {
.name = "ebur128",
.description = NULL_IF_CONFIG_SMALL("EBU R128 scanner."),
.priv_size = sizeof(EBUR128Context),
@@ -819,4 +795,5 @@ AVFilter avfilter_af_ebur128 = {
.inputs = ebur128_inputs,
.outputs = NULL,
.priv_class = &ebur128_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/ffmpeg/libavfilter/f_perms.c b/ffmpeg/libavfilter/f_perms.c
index ddba03e..2c53e8c 100644
--- a/ffmpeg/libavfilter/f_perms.c
+++ b/ffmpeg/libavfilter/f_perms.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
@@ -34,11 +36,12 @@ enum mode {
typedef struct {
const AVClass *class;
AVLFG lfg;
+ int64_t random_seed;
enum mode mode;
} PermsContext;
#define OFFSET(x) offsetof(PermsContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "mode", "select permissions mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_NONE}, MODE_NONE, NB_MODES-1, FLAGS, "mode" },
@@ -47,26 +50,24 @@ static const AVOption options[] = {
{ "rw", "set all output frames writable", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RW}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "toggle", "switch permissions", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_TOGGLE}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "random", "set permissions randomly", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RANDOM}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "seed", "set the seed for the random mode", OFFSET(random_seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
{ NULL }
};
-static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class)
+static av_cold int init(AVFilterContext *ctx)
{
- int ret;
PermsContext *perms = ctx->priv;
- static const char *shorthand[] = { "mode", NULL };
-
- perms->class = class;
- av_opt_set_defaults(perms);
- if ((ret = av_opt_set_from_string(perms, args, shorthand, "=", ":")) < 0)
- return ret;
+ if (perms->mode == MODE_RANDOM) {
+ uint32_t seed;
- // TODO: add a seed option
- if (perms->mode == MODE_RANDOM)
- av_lfg_init(&perms->lfg, av_get_random_seed());
+ if (perms->random_seed == -1)
+ perms->random_seed = av_get_random_seed();
+ seed = perms->random_seed;
+ av_log(ctx, AV_LOG_INFO, "random seed: 0x%08x\n", seed);
+ av_lfg_init(&perms->lfg, seed);
+ }
- av_opt_free(perms);
return 0;
}
@@ -115,11 +116,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
#define aperms_options options
AVFILTER_DEFINE_CLASS(aperms);
-static av_cold int aperms_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &aperms_class);
-}
-
static const AVFilterPad aperms_inputs[] = {
{
.name = "default",
@@ -137,10 +133,10 @@ static const AVFilterPad aperms_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_aperms = {
+AVFilter ff_af_aperms = {
.name = "aperms",
.description = NULL_IF_CONFIG_SMALL("Set permissions for the output audio frame."),
- .init = aperms_init,
+ .init = init,
.priv_size = sizeof(PermsContext),
.inputs = aperms_inputs,
.outputs = aperms_outputs,
@@ -153,11 +149,6 @@ AVFilter avfilter_af_aperms = {
#define perms_options options
AVFILTER_DEFINE_CLASS(perms);
-static av_cold int perms_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &perms_class);
-}
-
static const AVFilterPad perms_inputs[] = {
{
.name = "default",
@@ -175,10 +166,10 @@ static const AVFilterPad perms_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_perms = {
+AVFilter ff_vf_perms = {
.name = "perms",
.description = NULL_IF_CONFIG_SMALL("Set permissions for the output video frame."),
- .init = perms_init,
+ .init = init,
.priv_size = sizeof(PermsContext),
.inputs = perms_inputs,
.outputs = perms_outputs,
diff --git a/ffmpeg/libavfilter/f_select.c b/ffmpeg/libavfilter/f_select.c
index 603fad3..ec84da8 100644
--- a/ffmpeg/libavfilter/f_select.c
+++ b/ffmpeg/libavfilter/f_select.c
@@ -23,6 +23,7 @@
* filter for selecting which frame passes in the filterchain
*/
+#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/fifo.h"
#include "libavutil/internal.h"
@@ -58,6 +59,13 @@ static const char *const var_names[] = {
"SI",
"SP",
"BI",
+ "PICT_TYPE_I",
+ "PICT_TYPE_P",
+ "PICT_TYPE_B",
+ "PICT_TYPE_S",
+ "PICT_TYPE_SI",
+ "PICT_TYPE_SP",
+ "PICT_TYPE_BI",
"interlace_type", ///< the frame interlace type
"PROGRESSIVE",
@@ -94,6 +102,13 @@ enum var_name {
VAR_PREV_SELECTED_T,
VAR_PICT_TYPE,
+ VAR_I,
+ VAR_P,
+ VAR_B,
+ VAR_S,
+ VAR_SI,
+ VAR_SP,
+ VAR_BI,
VAR_PICT_TYPE_I,
VAR_PICT_TYPE_P,
VAR_PICT_TYPE_B,
@@ -125,8 +140,8 @@ enum var_name {
typedef struct {
const AVClass *class;
- AVExpr *expr;
char *expr_str;
+ AVExpr *expr;
double var_values[VAR_VARS_NB];
int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
#if CONFIG_AVCODEC
@@ -136,35 +151,46 @@ typedef struct {
#endif
AVFrame *prev_picref; ///< previous frame (scene detect only)
double select;
+ int select_out; ///< mark the selected output pad index
+ int nb_outputs;
} SelectContext;
#define OFFSET(x) offsetof(SelectContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
-static const AVOption options[] = {
- { "expr", "set selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str = "1"}, 0, 0, FLAGS },
- { "e", "set selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str = "1"}, 0, 0, FLAGS },
- {NULL},
-};
+#define DEFINE_OPTIONS(filt_name, FLAGS) \
+static const AVOption filt_name##_options[] = { \
+ { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
+ { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
+ { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
+ { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
+ { NULL } \
+}
-static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class)
+static int request_frame(AVFilterLink *outlink);
+
+static av_cold int init(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
- const char *shorthand[] = { "expr", NULL };
- int ret;
-
- select->class = class;
- av_opt_set_defaults(select);
-
- if ((ret = av_opt_set_from_string(select, args, shorthand, "=", ":")) < 0)
- return ret;
+ int i, ret;
if ((ret = av_expr_parse(&select->expr, select->expr_str,
var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", select->expr_str);
+ av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
+ select->expr_str);
return ret;
}
select->do_scene_detect = !!strstr(select->expr_str, "scene");
+ for (i = 0; i < select->nb_outputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.name = av_asprintf("output%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.type = ctx->filter->inputs[0].type;
+ pad.request_frame = request_frame;
+ ff_insert_outpad(ctx, i, &pad);
+ }
+
return 0;
}
@@ -188,11 +214,18 @@ static int config_input(AVFilterLink *inlink)
select->var_values[VAR_START_PTS] = NAN;
select->var_values[VAR_START_T] = NAN;
+ select->var_values[VAR_I] = AV_PICTURE_TYPE_I;
+ select->var_values[VAR_P] = AV_PICTURE_TYPE_P;
+ select->var_values[VAR_B] = AV_PICTURE_TYPE_B;
+ select->var_values[VAR_SI] = AV_PICTURE_TYPE_SI;
+ select->var_values[VAR_SP] = AV_PICTURE_TYPE_SP;
+ select->var_values[VAR_BI] = AV_PICTURE_TYPE_BI;
select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
+ select->var_values[VAR_PICT_TYPE_BI] = AV_PICTURE_TYPE_BI;
select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
@@ -212,7 +245,7 @@ static int config_input(AVFilterLink *inlink)
select->avctx = avcodec_alloc_context3(NULL);
if (!select->avctx)
return AVERROR(ENOMEM);
- dsputil_init(&select->c, select->avctx);
+ avpriv_dsputil_init(&select->c, select->avctx);
}
#endif
return 0;
@@ -260,7 +293,7 @@ static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
-static int select_frame(AVFilterContext *ctx, AVFrame *frame)
+static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
SelectContext *select = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
@@ -271,6 +304,7 @@ static int select_frame(AVFilterContext *ctx, AVFrame *frame)
if (isnan(select->var_values[VAR_START_T]))
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
+ select->var_values[VAR_N ] = inlink->frame_count;
select->var_values[VAR_PTS] = TS2D(frame->pts);
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
@@ -291,13 +325,13 @@ static int select_frame(AVFilterContext *ctx, AVFrame *frame)
select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
// TODO: document metadata
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
- av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
+ av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
}
#endif
break;
}
- res = av_expr_eval(select->expr, select->var_values, NULL);
+ select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
av_log(inlink->dst, AV_LOG_DEBUG,
"n:%f pts:%f t:%f key:%d",
select->var_values[VAR_N],
@@ -321,7 +355,15 @@ static int select_frame(AVFilterContext *ctx, AVFrame *frame)
break;
}
- av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f\n", res);
+ if (res == 0) {
+ select->select_out = -1; /* drop */
+ } else if (isnan(res) || res < 0) {
+ select->select_out = 0; /* first output */
+ } else {
+ select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
if (res) {
select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
@@ -332,20 +374,18 @@ static int select_frame(AVFilterContext *ctx, AVFrame *frame)
select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
}
- select->var_values[VAR_N] += 1.0;
select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
-
- return res;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
- SelectContext *select = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ SelectContext *select = ctx->priv;
- select->select = select_frame(inlink->dst, frame);
+ select_frame(ctx, frame);
if (select->select)
- return ff_filter_frame(inlink->dst->outputs[0], frame);
+ return ff_filter_frame(ctx->outputs[select->select_out], frame);
av_frame_free(&frame);
return 0;
@@ -356,13 +396,13 @@ static int request_frame(AVFilterLink *outlink)
AVFilterContext *ctx = outlink->src;
SelectContext *select = ctx->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
- select->select = 0;
+ int out_no = FF_OUTLINK_IDX(outlink);
do {
int ret = ff_request_frame(inlink);
if (ret < 0)
return ret;
- } while (!select->select);
+ } while (select->select_out != out_no);
return 0;
}
@@ -370,10 +410,13 @@ static int request_frame(AVFilterLink *outlink)
static av_cold void uninit(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
+ int i;
av_expr_free(select->expr);
select->expr = NULL;
- av_opt_free(select);
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
#if CONFIG_AVCODEC
if (select->do_scene_detect) {
@@ -404,15 +447,15 @@ static int query_formats(AVFilterContext *ctx)
#if CONFIG_ASELECT_FILTER
-#define aselect_options options
+DEFINE_OPTIONS(aselect, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(aselect);
-static av_cold int aselect_init(AVFilterContext *ctx, const char *args)
+static av_cold int aselect_init(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
int ret;
- if ((ret = init(ctx, args, &aselect_class)) < 0)
+ if ((ret = init(ctx)) < 0)
return ret;
if (select->do_scene_detect) {
@@ -425,46 +468,37 @@ static av_cold int aselect_init(AVFilterContext *ctx, const char *args)
static const AVFilterPad avfilter_af_aselect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad avfilter_af_aselect_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_af_aselect = {
- .name = "aselect",
+AVFilter ff_af_aselect = {
+ .name = "aselect",
.description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
- .init = aselect_init,
- .uninit = uninit,
- .priv_size = sizeof(SelectContext),
- .inputs = avfilter_af_aselect_inputs,
- .outputs = avfilter_af_aselect_outputs,
- .priv_class = &aselect_class,
+ .init = aselect_init,
+ .uninit = uninit,
+ .priv_size = sizeof(SelectContext),
+ .inputs = avfilter_af_aselect_inputs,
+ .priv_class = &aselect_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_ASELECT_FILTER */
#if CONFIG_SELECT_FILTER
-#define select_options options
+DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(select);
-static av_cold int select_init(AVFilterContext *ctx, const char *args)
+static av_cold int select_init(AVFilterContext *ctx)
{
SelectContext *select = ctx->priv;
int ret;
- if ((ret = init(ctx, args, &select_class)) < 0)
+ if ((ret = init(ctx)) < 0)
return ret;
if (select->do_scene_detect && !CONFIG_AVCODEC) {
@@ -477,35 +511,23 @@ static av_cold int select_init(AVFilterContext *ctx, const char *args)
static const AVFilterPad avfilter_vf_select_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad avfilter_vf_select_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_vf_select = {
- .name = "select",
- .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
- .init = select_init,
- .uninit = uninit,
+AVFilter ff_vf_select = {
+ .name = "select",
+ .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
+ .init = select_init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .priv_size = sizeof(SelectContext),
-
- .inputs = avfilter_vf_select_inputs,
- .outputs = avfilter_vf_select_outputs,
- .priv_class = &select_class,
+ .priv_size = sizeof(SelectContext),
+ .priv_class = &select_class,
+ .inputs = avfilter_vf_select_inputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_SELECT_FILTER */
diff --git a/ffmpeg/libavfilter/f_sendcmd.c b/ffmpeg/libavfilter/f_sendcmd.c
index a5a5f2e..c30f49f 100644
--- a/ffmpeg/libavfilter/f_sendcmd.c
+++ b/ffmpeg/libavfilter/f_sendcmd.c
@@ -39,7 +39,7 @@
static inline char *make_command_flags_str(AVBPrint *pbuf, int flags)
{
- const char *flag_strings[] = { "enter", "leave" };
+ static const char * const flag_strings[] = { "enter", "leave" };
int i, is_first = 1;
av_bprint_init(pbuf, 0, AV_BPRINT_SIZE_AUTOMATIC);
@@ -80,13 +80,13 @@ typedef struct {
} SendCmdContext;
#define OFFSET(x) offsetof(SendCmdContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "commands", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "c", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "filename", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "f", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- {NULL},
+ { NULL }
};
#define SPACES " \f\t\n\r"
@@ -134,7 +134,7 @@ static int parse_command(Command *cmd, int cmd_count, int interval_count,
char flag_buf[64];
av_strlcpy(flag_buf, *buf, sizeof(flag_buf));
av_log(log_ctx, AV_LOG_ERROR,
- "Unknown flag '%s' in in interval #%d, command #%d\n",
+ "Unknown flag '%s' in interval #%d, command #%d\n",
flag_buf, interval_count, cmd_count);
return AVERROR(EINVAL);
}
@@ -166,7 +166,7 @@ static int parse_command(Command *cmd, int cmd_count, int interval_count,
cmd->target = av_get_token(buf, COMMAND_DELIMS);
if (!cmd->target || !cmd->target[0]) {
av_log(log_ctx, AV_LOG_ERROR,
- "No target specified in in interval #%d, command #%d\n",
+ "No target specified in interval #%d, command #%d\n",
interval_count, cmd_count);
ret = AVERROR(EINVAL);
goto fail;
@@ -176,7 +176,7 @@ static int parse_command(Command *cmd, int cmd_count, int interval_count,
cmd->command = av_get_token(buf, COMMAND_DELIMS);
if (!cmd->command || !cmd->command[0]) {
av_log(log_ctx, AV_LOG_ERROR,
- "No command specified in in interval #%d, command #%d\n",
+ "No command specified in interval #%d, command #%d\n",
interval_count, cmd_count);
ret = AVERROR(EINVAL);
goto fail;
@@ -368,17 +368,11 @@ static int cmp_intervals(const void *a, const void *b)
return ret == 0 ? i1->index - i2->index : ret;
}
-static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class)
+static av_cold int init(AVFilterContext *ctx)
{
SendCmdContext *sendcmd = ctx->priv;
int ret, i, j;
- sendcmd->class = class;
- av_opt_set_defaults(sendcmd);
-
- if ((ret = av_set_options_string(sendcmd, args, "=", ":")) < 0)
- return ret;
-
if (sendcmd->commands_filename && sendcmd->commands_str) {
av_log(ctx, AV_LOG_ERROR,
"Only one of the filename or commands options must be specified\n");
@@ -428,13 +422,11 @@ static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *c
return 0;
}
-static void av_cold uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
SendCmdContext *sendcmd = ctx->priv;
int i, j;
- av_opt_free(sendcmd);
-
for (i = 0; i < sendcmd->nb_intervals; i++) {
Interval *interval = &sendcmd->intervals[i];
for (j = 0; j < interval->nb_commands; j++) {
@@ -518,17 +510,11 @@ end:
#define sendcmd_options options
AVFILTER_DEFINE_CLASS(sendcmd);
-static av_cold int sendcmd_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &sendcmd_class);
-}
-
static const AVFilterPad sendcmd_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -541,16 +527,15 @@ static const AVFilterPad sendcmd_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_sendcmd = {
- .name = "sendcmd",
+AVFilter ff_vf_sendcmd = {
+ .name = "sendcmd",
.description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
-
- .init = sendcmd_init,
- .uninit = uninit,
- .priv_size = sizeof(SendCmdContext),
- .inputs = sendcmd_inputs,
- .outputs = sendcmd_outputs,
- .priv_class = &sendcmd_class,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SendCmdContext),
+ .inputs = sendcmd_inputs,
+ .outputs = sendcmd_outputs,
+ .priv_class = &sendcmd_class,
};
#endif
@@ -560,17 +545,11 @@ AVFilter avfilter_vf_sendcmd = {
#define asendcmd_options options
AVFILTER_DEFINE_CLASS(asendcmd);
-static av_cold int asendcmd_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &asendcmd_class);
-}
-
static const AVFilterPad asendcmd_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -583,16 +562,15 @@ static const AVFilterPad asendcmd_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_asendcmd = {
- .name = "asendcmd",
+AVFilter ff_af_asendcmd = {
+ .name = "asendcmd",
.description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
-
- .init = asendcmd_init,
- .uninit = uninit,
- .priv_size = sizeof(SendCmdContext),
- .inputs = asendcmd_inputs,
- .outputs = asendcmd_outputs,
- .priv_class = &asendcmd_class,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SendCmdContext),
+ .inputs = asendcmd_inputs,
+ .outputs = asendcmd_outputs,
+ .priv_class = &asendcmd_class,
};
#endif
diff --git a/ffmpeg/libavfilter/f_setpts.c b/ffmpeg/libavfilter/f_setpts.c
deleted file mode 100644
index d3a2976..0000000
--- a/ffmpeg/libavfilter/f_setpts.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (c) 2010 Stefano Sabatini
- * Copyright (c) 2008 Victor Paesa
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * video presentation timestamp (PTS) modification filter
- */
-
-#include "libavutil/eval.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/time.h"
-#include "avfilter.h"
-#include "internal.h"
-#include "audio.h"
-#include "video.h"
-
-static const char *const var_names[] = {
- "FRAME_RATE", ///< defined only for constant frame-rate video
- "INTERLACED", ///< tell if the current frame is interlaced
- "N", ///< frame number (starting at zero)
- "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
- "NB_SAMPLES", ///< number of samples in the current frame (only audio)
- "POS", ///< original position in the file of the frame
- "PREV_INPTS", ///< previous input PTS
- "PREV_INT", ///< previous input time in seconds
- "PREV_OUTPTS", ///< previous output PTS
- "PREV_OUTT", ///< previous output time in seconds
- "PTS", ///< original pts in the file of the frame
- "SAMPLE_RATE", ///< sample rate (only audio)
- "STARTPTS", ///< PTS at start of movie
- "STARTT", ///< time at start of movie
- "T", ///< original time in the file of the frame
- "TB", ///< timebase
- "RTCTIME", ///< wallclock (RTC) time in micro seconds
- "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
- NULL
-};
-
-enum var_name {
- VAR_FRAME_RATE,
- VAR_INTERLACED,
- VAR_N,
- VAR_NB_CONSUMED_SAMPLES,
- VAR_NB_SAMPLES,
- VAR_POS,
- VAR_PREV_INPTS,
- VAR_PREV_INT,
- VAR_PREV_OUTPTS,
- VAR_PREV_OUTT,
- VAR_PTS,
- VAR_SAMPLE_RATE,
- VAR_STARTPTS,
- VAR_STARTT,
- VAR_T,
- VAR_TB,
- VAR_RTCTIME,
- VAR_RTCSTART,
- VAR_VARS_NB
-};
-
-typedef struct {
- AVExpr *expr;
- double var_values[VAR_VARS_NB];
- enum AVMediaType type;
-} SetPTSContext;
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- SetPTSContext *setpts = ctx->priv;
- int ret;
-
- if ((ret = av_expr_parse(&setpts->expr, args ? args : "PTS",
- var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", args);
- return ret;
- }
-
- setpts->var_values[VAR_N ] = 0.0;
- setpts->var_values[VAR_PREV_INPTS ] = setpts->var_values[VAR_PREV_INT ] = NAN;
- setpts->var_values[VAR_PREV_OUTPTS] = setpts->var_values[VAR_PREV_OUTT] = NAN;
- setpts->var_values[VAR_STARTPTS ] = setpts->var_values[VAR_STARTT ] = NAN;
- return 0;
-}
-
-static int config_input(AVFilterLink *inlink)
-{
- AVFilterContext *ctx = inlink->dst;
- SetPTSContext *setpts = ctx->priv;
-
- setpts->type = inlink->type;
- setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
- setpts->var_values[VAR_RTCSTART] = av_gettime();
-
- setpts->var_values[VAR_SAMPLE_RATE] =
- setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
-
- setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ?
- av_q2d(inlink->frame_rate) : NAN;
-
- av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
- setpts->var_values[VAR_TB],
- setpts->var_values[VAR_FRAME_RATE],
- setpts->var_values[VAR_SAMPLE_RATE]);
- return 0;
-}
-
-#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
-#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
-#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
-
-#define BUF_SIZE 64
-
-static inline char *double2int64str(char *buf, double v)
-{
- if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
- else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
- return buf;
-}
-
-#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
-
-static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
-{
- SetPTSContext *setpts = inlink->dst->priv;
- int64_t in_pts = frame->pts;
- double d;
-
- if (isnan(setpts->var_values[VAR_STARTPTS])) {
- setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
- setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base);
- }
- setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
- setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
- setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
- setpts->var_values[VAR_RTCTIME ] = av_gettime();
-
- switch (inlink->type) {
- case AVMEDIA_TYPE_VIDEO:
- setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
- break;
-
- case AVMEDIA_TYPE_AUDIO:
- setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
- break;
- }
-
- d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
-
- av_log(inlink->dst, AV_LOG_DEBUG,
- "N:%"PRId64" PTS:%s T:%f POS:%s",
- (int64_t)setpts->var_values[VAR_N],
- d2istr(setpts->var_values[VAR_PTS]),
- setpts->var_values[VAR_T],
- d2istr(setpts->var_values[VAR_POS]));
- switch (inlink->type) {
- case AVMEDIA_TYPE_VIDEO:
- av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64,
- (int64_t)setpts->var_values[VAR_INTERLACED]);
- break;
- case AVMEDIA_TYPE_AUDIO:
- av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
- (int64_t)setpts->var_values[VAR_NB_SAMPLES],
- (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
- break;
- }
- av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
-
- frame->pts = D2TS(d);
-
- setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
- setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
- setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
- setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
- setpts->var_values[VAR_N] += 1.0;
- if (setpts->type == AVMEDIA_TYPE_AUDIO) {
- setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
- }
- return ff_filter_frame(inlink->dst->outputs[0], frame);
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- SetPTSContext *setpts = ctx->priv;
- av_expr_free(setpts->expr);
- setpts->expr = NULL;
-}
-
-#if CONFIG_ASETPTS_FILTER
-static const AVFilterPad avfilter_af_asetpts_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad avfilter_af_asetpts_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- },
- { NULL }
-};
-
-AVFilter avfilter_af_asetpts = {
- .name = "asetpts",
- .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
- .init = init,
- .uninit = uninit,
- .priv_size = sizeof(SetPTSContext),
- .inputs = avfilter_af_asetpts_inputs,
- .outputs = avfilter_af_asetpts_outputs,
-};
-#endif /* CONFIG_ASETPTS_FILTER */
-
-#if CONFIG_SETPTS_FILTER
-static const AVFilterPad avfilter_vf_setpts_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad avfilter_vf_setpts_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- },
- { NULL }
-};
-
-AVFilter avfilter_vf_setpts = {
- .name = "setpts",
- .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(SetPTSContext),
-
- .inputs = avfilter_vf_setpts_inputs,
- .outputs = avfilter_vf_setpts_outputs,
-};
-#endif /* CONFIG_SETPTS_FILTER */
diff --git a/ffmpeg/libavfilter/f_settb.c b/ffmpeg/libavfilter/f_settb.c
index 1fba23a..d511c14 100644
--- a/ffmpeg/libavfilter/f_settb.c
+++ b/ffmpeg/libavfilter/f_settb.c
@@ -60,32 +60,13 @@ typedef struct {
#define OFFSET(x) offsetof(SetTBContext, x)
#define DEFINE_OPTIONS(filt_name, filt_type) \
static const AVOption filt_name##_options[] = { \
- { "tb", "set timebase expression", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
+ { "expr", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
+ .flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
+ { "tb", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
.flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
{ NULL } \
}
-static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class)
-{
- SetTBContext *settb = ctx->priv;
- static const char *shorthand[] = { "tb", NULL };
- int ret;
-
- settb->class = class;
- av_opt_set_defaults(settb);
-
- if ((ret = av_opt_set_from_string(settb, args, shorthand, "=", ":")) < 0)
- return ret;
-
- return 0;
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- SetTBContext *settb = ctx->priv;
- av_opt_free(settb);
-}
-
static int config_output_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -144,17 +125,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
DEFINE_OPTIONS(settb, VIDEO);
AVFILTER_DEFINE_CLASS(settb);
-static av_cold int settb_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &settb_class);
-}
-
static const AVFilterPad avfilter_vf_settb_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -168,17 +143,13 @@ static const AVFilterPad avfilter_vf_settb_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_settb = {
- .name = "settb",
+AVFilter ff_vf_settb = {
+ .name = "settb",
.description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."),
- .init = settb_init,
- .uninit = uninit,
-
- .priv_size = sizeof(SetTBContext),
-
- .inputs = avfilter_vf_settb_inputs,
- .outputs = avfilter_vf_settb_outputs,
- .priv_class = &settb_class,
+ .priv_size = sizeof(SetTBContext),
+ .priv_class = &settb_class,
+ .inputs = avfilter_vf_settb_inputs,
+ .outputs = avfilter_vf_settb_outputs,
};
#endif
@@ -187,17 +158,11 @@ AVFilter avfilter_vf_settb = {
DEFINE_OPTIONS(asettb, AUDIO);
AVFILTER_DEFINE_CLASS(asettb);
-static av_cold int asettb_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &asettb_class);
-}
-
static const AVFilterPad avfilter_af_asettb_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -211,15 +176,12 @@ static const AVFilterPad avfilter_af_asettb_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_asettb = {
- .name = "asettb",
+AVFilter ff_af_asettb = {
+ .name = "asettb",
.description = NULL_IF_CONFIG_SMALL("Set timebase for the audio output link."),
- .init = asettb_init,
- .uninit = uninit,
-
- .priv_size = sizeof(SetTBContext),
- .inputs = avfilter_af_asettb_inputs,
- .outputs = avfilter_af_asettb_outputs,
- .priv_class = &asettb_class,
+ .priv_size = sizeof(SetTBContext),
+ .inputs = avfilter_af_asettb_inputs,
+ .outputs = avfilter_af_asettb_outputs,
+ .priv_class = &asettb_class,
};
#endif
diff --git a/ffmpeg/libavfilter/fifo.c b/ffmpeg/libavfilter/fifo.c
index 5153752..5310af2 100644
--- a/ffmpeg/libavfilter/fifo.c
+++ b/ffmpeg/libavfilter/fifo.c
@@ -51,7 +51,7 @@ typedef struct {
int allocated_samples; ///< number of samples out was allocated for
} FifoContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
FifoContext *fifo = ctx->priv;
fifo->last = &fifo->root;
@@ -147,10 +147,14 @@ static int return_audio_frame(AVFilterContext *ctx)
{
AVFilterLink *link = ctx->outputs[0];
FifoContext *s = ctx->priv;
- AVFrame *head = s->root.next->frame;
+ AVFrame *head = s->root.next ? s->root.next->frame : NULL;
AVFrame *out;
int ret;
+ /* if head is NULL then we're flushing the remaining samples in out */
+ if (!head && !s->out)
+ return AVERROR_EOF;
+
if (!s->out &&
head->nb_samples >= link->request_samples &&
calc_ptr_alignment(head) >= 32) {
@@ -183,8 +187,26 @@ static int return_audio_frame(AVFilterContext *ctx)
}
while (s->out->nb_samples < s->allocated_samples) {
- int len = FFMIN(s->allocated_samples - s->out->nb_samples,
- head->nb_samples);
+ int len;
+
+ if (!s->root.next) {
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF) {
+ av_samples_set_silence(s->out->extended_data,
+ s->out->nb_samples,
+ s->allocated_samples -
+ s->out->nb_samples,
+ nb_channels, link->format);
+ s->out->nb_samples = s->allocated_samples;
+ break;
+ } else if (ret < 0)
+ return ret;
+ av_assert0(s->root.next); // If ff_request_frame() succeeded then we should have a frame
+ }
+ head = s->root.next->frame;
+
+ len = FFMIN(s->allocated_samples - s->out->nb_samples,
+ head->nb_samples);
av_samples_copy(s->out->extended_data, head->extended_data,
s->out->nb_samples, 0, len, nb_channels,
@@ -194,21 +216,6 @@ static int return_audio_frame(AVFilterContext *ctx)
if (len == head->nb_samples) {
av_frame_free(&head);
queue_pop(s);
-
- if (!s->root.next &&
- (ret = ff_request_frame(ctx->inputs[0])) < 0) {
- if (ret == AVERROR_EOF) {
- av_samples_set_silence(s->out->extended_data,
- s->out->nb_samples,
- s->allocated_samples -
- s->out->nb_samples,
- nb_channels, link->format);
- s->out->nb_samples = s->allocated_samples;
- break;
- }
- return ret;
- }
- head = s->root.next->frame;
} else {
buffer_offset(link, head, len);
}
@@ -225,8 +232,11 @@ static int request_frame(AVFilterLink *outlink)
int ret = 0;
if (!fifo->root.next) {
- if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
+ if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) {
+ if (ret == AVERROR_EOF && outlink->request_samples)
+ return return_audio_frame(outlink->src);
return ret;
+ }
av_assert0(fifo->root.next);
}
@@ -244,7 +254,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
.filter_frame = add_to_queue,
},
{ NULL }
@@ -259,7 +268,7 @@ static const AVFilterPad avfilter_vf_fifo_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_fifo = {
+AVFilter ff_vf_fifo = {
.name = "fifo",
.description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."),
@@ -276,7 +285,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = add_to_queue,
},
{ NULL }
@@ -291,7 +299,7 @@ static const AVFilterPad avfilter_af_afifo_outputs[] = {
{ NULL }
};
-AVFilter avfilter_af_afifo = {
+AVFilter ff_af_afifo = {
.name = "afifo",
.description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."),
diff --git a/ffmpeg/libavfilter/filtfmts.c b/ffmpeg/libavfilter/filtfmts.c
index 7286729..e6c9b03 100644
--- a/ffmpeg/libavfilter/filtfmts.c
+++ b/ffmpeg/libavfilter/filtfmts.c
@@ -31,11 +31,11 @@ static void print_formats(AVFilterContext *filter_ctx)
int i, j;
#define PRINT_FMTS(inout, outin, INOUT) \
- for (i = 0; i < filter_ctx->inout##put_count; i++) { \
+ for (i = 0; i < filter_ctx->nb_##inout##puts; i++) { \
if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \
AVFilterFormats *fmts = \
filter_ctx->inout##puts[i]->outin##_formats; \
- for (j = 0; j < fmts->format_count; j++) \
+ for (j = 0; j < fmts->nb_formats; j++) \
if(av_get_pix_fmt_name(fmts->formats[j])) \
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
i, filter_ctx->filter->inout##puts[i].name, \
@@ -45,7 +45,7 @@ static void print_formats(AVFilterContext *filter_ctx)
AVFilterChannelLayouts *layouts; \
\
fmts = filter_ctx->inout##puts[i]->outin##_formats; \
- for (j = 0; j < fmts->format_count; j++) \
+ for (j = 0; j < fmts->nb_formats; j++) \
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
i, filter_ctx->filter->inout##puts[i].name, \
av_get_sample_fmt_name(fmts->formats[j])); \
@@ -69,6 +69,7 @@ int main(int argc, char **argv)
{
AVFilter *filter;
AVFilterContext *filter_ctx;
+ AVFilterGraph *graph_ctx;
const char *filter_name;
const char *filter_args = NULL;
int i;
@@ -84,6 +85,11 @@ int main(int argc, char **argv)
if (argc > 2)
filter_args = argv[2];
+ /* allocate graph */
+ graph_ctx = avfilter_graph_alloc();
+ if (!graph_ctx)
+ return 1;
+
avfilter_register_all();
/* get a corresponding filter and open it */
@@ -92,24 +98,25 @@ int main(int argc, char **argv)
return 1;
}
- if (avfilter_open(&filter_ctx, filter, NULL) < 0) {
+ /* open filter and add it to the graph */
+ if (!(filter_ctx = avfilter_graph_alloc_filter(graph_ctx, filter, filter_name))) {
fprintf(stderr, "Impossible to open filter with name '%s'\n",
filter_name);
return 1;
}
- if (avfilter_init_filter(filter_ctx, filter_args, NULL) < 0) {
+ if (avfilter_init_str(filter_ctx, filter_args) < 0) {
fprintf(stderr, "Impossible to init filter '%s' with arguments '%s'\n",
filter_name, filter_args);
return 1;
}
/* create a link for each of the input pads */
- for (i = 0; i < filter_ctx->input_count; i++) {
+ for (i = 0; i < filter_ctx->nb_inputs; i++) {
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
link->type = filter_ctx->filter->inputs[i].type;
filter_ctx->inputs[i] = link;
}
- for (i = 0; i < filter_ctx->output_count; i++) {
+ for (i = 0; i < filter_ctx->nb_outputs; i++) {
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
link->type = filter_ctx->filter->outputs[i].type;
filter_ctx->outputs[i] = link;
@@ -123,6 +130,7 @@ int main(int argc, char **argv)
print_formats(filter_ctx);
avfilter_free(filter_ctx);
+ avfilter_graph_free(&graph_ctx);
fflush(stdout);
return 0;
}
diff --git a/ffmpeg/libavfilter/formats.c b/ffmpeg/libavfilter/formats.c
index 43718e4..5816032 100644
--- a/ffmpeg/libavfilter/formats.c
+++ b/ffmpeg/libavfilter/formats.c
@@ -108,14 +108,14 @@ AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
To avoid that, pretend that there are no common formats to force the
insertion of a conversion filter. */
if (type == AVMEDIA_TYPE_VIDEO)
- for (i = 0; i < a->format_count; i++)
- for (j = 0; j < b->format_count; j++) {
+ for (i = 0; i < a->nb_formats; i++)
+ for (j = 0; j < b->nb_formats; j++) {
const AVPixFmtDescriptor *adesc = av_pix_fmt_desc_get(a->formats[i]);
const AVPixFmtDescriptor *bdesc = av_pix_fmt_desc_get(b->formats[j]);
- alpha2 |= adesc->flags & bdesc->flags & PIX_FMT_ALPHA;
+ alpha2 |= adesc->flags & bdesc->flags & AV_PIX_FMT_FLAG_ALPHA;
chroma2|= adesc->nb_components > 1 && bdesc->nb_components > 1;
if (a->formats[i] == b->formats[j]) {
- alpha1 |= adesc->flags & PIX_FMT_ALPHA;
+ alpha1 |= adesc->flags & AV_PIX_FMT_FLAG_ALPHA;
chroma1|= adesc->nb_components > 1;
}
}
@@ -124,7 +124,7 @@ AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
if (alpha2 > alpha1 || chroma2 > chroma1)
return NULL;
- MERGE_FORMATS(ret, a, b, formats, format_count, AVFilterFormats, fail);
+ MERGE_FORMATS(ret, a, b, formats, nb_formats, AVFilterFormats, fail);
return ret;
fail:
@@ -143,9 +143,9 @@ AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a,
if (a == b) return a;
- if (a->format_count && b->format_count) {
- MERGE_FORMATS(ret, a, b, formats, format_count, AVFilterFormats, fail);
- } else if (a->format_count) {
+ if (a->nb_formats && b->nb_formats) {
+ MERGE_FORMATS(ret, a, b, formats, nb_formats, AVFilterFormats, fail);
+ } else if (a->nb_formats) {
MERGE_REF(a, b, formats, AVFilterFormats, fail);
ret = a;
} else {
@@ -184,6 +184,10 @@ AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a,
for (i = j = 0; i < b->nb_channel_layouts; i++)
if (KNOWN(b->channel_layouts[i]))
b->channel_layouts[j++] = b->channel_layouts[i];
+ /* Not optimal: the unknown layouts of b may become known after
+ another merge. */
+ if (!j)
+ return NULL;
b->nb_channel_layouts = j;
}
MERGE_REF(b, a, channel_layouts, AVFilterChannelLayouts, fail);
@@ -270,20 +274,6 @@ int ff_fmt_is_in(int fmt, const int *fmts)
} \
}
-int *ff_copy_int_list(const int * const list)
-{
- int *ret = NULL;
- COPY_INT_LIST(ret, list, int);
- return ret;
-}
-
-int64_t *ff_copy_int64_list(const int64_t * const list)
-{
- int64_t *ret = NULL;
- COPY_INT_LIST(ret, list, int64_t);
- return ret;
-}
-
#define MAKE_FORMAT_LIST(type, field, count_field) \
type *formats; \
int count = 0; \
@@ -303,7 +293,7 @@ int64_t *ff_copy_int64_list(const int64_t * const list)
AVFilterFormats *ff_make_format_list(const int *fmts)
{
- MAKE_FORMAT_LIST(AVFilterFormats, formats, format_count);
+ MAKE_FORMAT_LIST(AVFilterFormats, formats, nb_formats);
while (count--)
formats->formats[count] = fmts[count];
@@ -339,7 +329,7 @@ do { \
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
{
- ADD_FORMAT(avff, fmt, int, formats, format_count);
+ ADD_FORMAT(avff, fmt, int, formats, nb_formats);
return 0;
}
@@ -360,7 +350,7 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type)
for (fmt = 0; fmt < num_formats; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if ((type != AVMEDIA_TYPE_VIDEO) ||
- (type == AVMEDIA_TYPE_VIDEO && !(desc->flags & PIX_FMT_HWACCEL)))
+ (type == AVMEDIA_TYPE_VIDEO && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)))
ff_add_format(&ret, fmt);
}
@@ -625,10 +615,21 @@ int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx)
return 0;
}
-int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
+int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
+ void *log_ctx)
{
char *tail;
- int64_t chlayout = av_get_channel_layout(arg);
+ int64_t chlayout, count;
+
+ if (nret) {
+ count = strtol(arg, &tail, 10);
+ if (*tail == 'c' && !tail[1] && count > 0 && count < 63) {
+ *nret = count;
+ *ret = 0;
+ return 0;
+ }
+ }
+ chlayout = av_get_channel_layout(arg);
if (chlayout == 0) {
chlayout = strtol(arg, &tail, 10);
if (*tail || chlayout == 0) {
@@ -637,6 +638,8 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
}
}
*ret = chlayout;
+ if (nret)
+ *nret = av_get_channel_layout_nb_channels(chlayout);
return 0;
}
diff --git a/ffmpeg/libavfilter/formats.h b/ffmpeg/libavfilter/formats.h
index c06f6df..468eac8 100644
--- a/ffmpeg/libavfilter/formats.h
+++ b/ffmpeg/libavfilter/formats.h
@@ -62,7 +62,7 @@
* pointer to each of the pointers to itself.
*/
struct AVFilterFormats {
- unsigned format_count; ///< number of formats
+ unsigned nb_formats; ///< number of formats
int *formats; ///< list of media formats
unsigned refcount; ///< number of references to this list
diff --git a/ffmpeg/libavfilter/gradfun.h b/ffmpeg/libavfilter/gradfun.h
index 801dddd..eb1f1eb 100644
--- a/ffmpeg/libavfilter/gradfun.h
+++ b/ffmpeg/libavfilter/gradfun.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
- * Copyright (c) 2009 Loren Merritt <lorenm@u.washignton.edu>
+ * Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
@@ -27,7 +27,7 @@
/// Holds instance-specific information for gradfun.
typedef struct GradFunContext {
const AVClass *class;
- double strength; ///< user specified strength, used to define thresh
+ float strength;
int thresh; ///< threshold for gradient algorithm
int radius; ///< blur radius
int chroma_w; ///< width of the chroma planes
diff --git a/ffmpeg/libavfilter/graphdump.c b/ffmpeg/libavfilter/graphdump.c
index 756f63d..1b59321 100644
--- a/ffmpeg/libavfilter/graphdump.c
+++ b/ffmpeg/libavfilter/graphdump.c
@@ -71,14 +71,14 @@ static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph)
unsigned lname = strlen(filter->name);
unsigned ltype = strlen(filter->filter->name);
- for (j = 0; j < filter->input_count; j++) {
+ for (j = 0; j < filter->nb_inputs; j++) {
AVFilterLink *l = filter->inputs[j];
unsigned ln = strlen(l->src->name) + 1 + strlen(l->srcpad->name);
max_src_name = FFMAX(max_src_name, ln);
max_in_name = FFMAX(max_in_name, strlen(l->dstpad->name));
max_in_fmt = FFMAX(max_in_fmt, print_link_prop(NULL, l));
}
- for (j = 0; j < filter->output_count; j++) {
+ for (j = 0; j < filter->nb_outputs; j++) {
AVFilterLink *l = filter->outputs[j];
unsigned ln = strlen(l->dst->name) + 1 + strlen(l->dstpad->name);
max_dst_name = FFMAX(max_dst_name, ln);
@@ -88,17 +88,17 @@ static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph)
in_indent = max_src_name + max_in_name + max_in_fmt;
in_indent += in_indent ? 4 : 0;
width = FFMAX(lname + 2, ltype + 4);
- height = FFMAX3(2, filter->input_count, filter->output_count);
+ height = FFMAX3(2, filter->nb_inputs, filter->nb_outputs);
av_bprint_chars(buf, ' ', in_indent);
av_bprintf(buf, "+");
av_bprint_chars(buf, '-', width);
av_bprintf(buf, "+\n");
for (j = 0; j < height; j++) {
- unsigned in_no = j - (height - filter->input_count ) / 2;
- unsigned out_no = j - (height - filter->output_count) / 2;
+ unsigned in_no = j - (height - filter->nb_inputs ) / 2;
+ unsigned out_no = j - (height - filter->nb_outputs) / 2;
/* Input link */
- if (in_no < filter->input_count) {
+ if (in_no < filter->nb_inputs) {
AVFilterLink *l = filter->inputs[in_no];
e = buf->len + max_src_name + 2;
av_bprintf(buf, "%s:%s", l->src->name, l->srcpad->name);
@@ -127,7 +127,7 @@ static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph)
av_bprintf(buf, "|");
/* Output link */
- if (out_no < filter->output_count) {
+ if (out_no < filter->nb_outputs) {
AVFilterLink *l = filter->outputs[out_no];
unsigned ln = strlen(l->dst->name) + 1 +
strlen(l->dstpad->name);
diff --git a/ffmpeg/libavfilter/graphparser.c b/ffmpeg/libavfilter/graphparser.c
index 8d2fffc..7e25282 100644
--- a/ffmpeg/libavfilter/graphparser.c
+++ b/ffmpeg/libavfilter/graphparser.c
@@ -26,7 +26,6 @@
#include "libavutil/avstring.h"
#include "libavutil/mem.h"
#include "avfilter.h"
-#include "avfiltergraph.h"
#define WHITESPACES " \n\t"
@@ -90,14 +89,14 @@ static char *parse_link_name(const char **buf, void *log_ctx)
* @param filt_name the name of the filter to create
* @param args the arguments provided to the filter during its initialization
* @param log_ctx the log context to use
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index,
const char *filt_name, const char *args, void *log_ctx)
{
AVFilter *filt;
char inst_name[30];
- char tmp_args[256];
+ char *tmp_args = NULL;
int ret;
snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index);
@@ -110,32 +109,35 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
return AVERROR(EINVAL);
}
- ret = avfilter_open(filt_ctx, filt, inst_name);
+ *filt_ctx = avfilter_graph_alloc_filter(ctx, filt, inst_name);
if (!*filt_ctx) {
av_log(log_ctx, AV_LOG_ERROR,
"Error creating filter '%s'\n", filt_name);
- return ret;
- }
-
- if ((ret = avfilter_graph_add_filter(ctx, *filt_ctx)) < 0) {
- avfilter_free(*filt_ctx);
- return ret;
+ return AVERROR(ENOMEM);
}
- if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags")
- && ctx->scale_sws_opts) {
- snprintf(tmp_args, sizeof(tmp_args), "%s:%s",
+ if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") &&
+ ctx->scale_sws_opts) {
+ tmp_args = av_asprintf("%s:%s",
args, ctx->scale_sws_opts);
+ if (!tmp_args)
+ return AVERROR(ENOMEM);
args = tmp_args;
}
- if ((ret = avfilter_init_filter(*filt_ctx, args, NULL)) < 0) {
+ ret = avfilter_init_str(*filt_ctx, args);
+ if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
- "Error initializing filter '%s' with args '%s'\n", filt_name, args);
- return ret;
+ "Error initializing filter '%s'", filt_name);
+ if (args)
+ av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args);
+ av_log(log_ctx, AV_LOG_ERROR, "\n");
+ avfilter_free(*filt_ctx);
+ *filt_ctx = NULL;
}
- return 0;
+ av_free(tmp_args);
+ return ret;
}
/**
@@ -152,7 +154,7 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
* @param index an index which is assigned to the created filter
* instance, and which is supposed to be unique for each filter
* instance added to the filtergraph
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int parse_filter(AVFilterContext **filt_ctx, const char **buf, AVFilterGraph *graph,
int index, void *log_ctx)
@@ -436,8 +438,8 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
return 0;
fail:end:
- for (; graph->nb_filters > 0; graph->nb_filters--)
- avfilter_free(graph->filters[graph->nb_filters - 1]);
+ while (graph->nb_filters)
+ avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
avfilter_inout_free(&open_inputs);
avfilter_inout_free(&open_outputs);
@@ -449,14 +451,12 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
return ret;
}
+#if HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
- AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
- void *log_ctx)
+ AVFilterInOut *open_inputs,
+ AVFilterInOut *open_outputs, void *log_ctx)
{
-#if 0
int ret;
- AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL;
- AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;
AVFilterInOut *cur, *match, *inputs = NULL, *outputs = NULL;
if ((ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)) < 0)
@@ -504,20 +504,28 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
fail:
if (ret < 0) {
- for (; graph->nb_filters > 0; graph->nb_filters--)
- avfilter_free(graph->filters[graph->nb_filters - 1]);
+ while (graph->nb_filters)
+ avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
}
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
- /* clear open_in/outputs only if not passed as parameters */
- if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
- else avfilter_inout_free(&open_inputs);
- if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
- else avfilter_inout_free(&open_outputs);
+ avfilter_inout_free(&open_inputs);
+ avfilter_inout_free(&open_outputs);
return ret;
-}
#else
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx)
+{
+ return avfilter_graph_parse_ptr(graph, filters, inputs, outputs, log_ctx);
+#endif
+}
+
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
+ void *log_ctx)
+{
int index = 0, ret = 0;
char chr = 0;
@@ -539,7 +547,7 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
goto end;
- if (filter->input_count == 1 && !curr_inputs && !index) {
+ if (filter->nb_inputs == 1 && !curr_inputs && !index) {
/* First input pad, assume it is "[in]" if not specified */
const char *tmp = "[in]";
if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0)
@@ -591,11 +599,9 @@ end:
avfilter_inout_free(&curr_inputs);
if (ret < 0) {
- for (; graph->nb_filters > 0; graph->nb_filters--)
- avfilter_free(graph->filters[graph->nb_filters - 1]);
+ while (graph->nb_filters)
+ avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
}
return ret;
}
-
-#endif
diff --git a/ffmpeg/libavfilter/internal.h b/ffmpeg/libavfilter/internal.h
index 9a42ae0..5e19698 100644
--- a/ffmpeg/libavfilter/internal.h
+++ b/ffmpeg/libavfilter/internal.h
@@ -24,9 +24,12 @@
* internal API functions
*/
+#include "libavutil/internal.h"
#include "avfilter.h"
#include "avfiltergraph.h"
#include "formats.h"
+#include "thread.h"
+#include "version.h"
#include "video.h"
#define POOL_SIZE 32
@@ -141,24 +144,23 @@ struct AVFilterPad {
};
#endif
+struct AVFilterGraphInternal {
+ void *thread;
+ avfilter_execute_func *thread_execute;
+};
+
+struct AVFilterInternal {
+ avfilter_execute_func *execute;
+};
+
+#if FF_API_AVFILTERBUFFER
/** default handler for freeing audio/video buffer when there are no references left */
void ff_avfilter_default_free_buffer(AVFilterBuffer *buf);
+#endif
/** Tell is a format is contained in the provided list terminated by -1. */
int ff_fmt_is_in(int fmt, const int *fmts);
-/**
- * Return a copy of a list of integers terminated by -1, or NULL in
- * case of copy failure.
- */
-int *ff_copy_int_list(const int * const list);
-
-/**
- * Return a copy of a list of 64-bit integers, or NULL in case of
- * copy failure.
- */
-int64_t *ff_copy_int64_list(const int64_t * const list);
-
/* Functions to parse audio format arguments */
/**
@@ -167,7 +169,7 @@ int64_t *ff_copy_int64_list(const int64_t * const list);
* @param ret pixel format pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
- * @return 0 in case of success, a negative AVERROR code on error
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx);
@@ -177,7 +179,7 @@ int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ct
* @param ret unsigned integer pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
- * @return 0 in case of success, a negative AVERROR code on error
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx);
@@ -187,7 +189,7 @@ int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx);
* @param ret unsigned AVRational pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
- * @return 0 in case of success, a negative AVERROR code on error
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx);
@@ -197,7 +199,7 @@ int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx);
* @param ret integer pointer to where the value should be written
* @param arg string to parse
* @param log_ctx log context
- * @return 0 in case of success, a negative AVERROR code on error
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
@@ -205,11 +207,14 @@ int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
* Parse a channel layout or a corresponding integer representation.
*
* @param ret 64bit integer pointer to where the value should be written.
+ * @param nret integer pointer to the number of channels;
+ * if not NULL, then unknown channel layouts are accepted
* @param arg string to parse
* @param log_ctx log context
- * @return 0 in case of success, a negative AVERROR code on error
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
-int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx);
+int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
+ void *log_ctx);
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
@@ -245,31 +250,38 @@ void ff_tlog_link(void *ctx, AVFilterLink *link, int end);
* @param pads Pointer to the pointer to the beginning of the list of pads
* @param links Pointer to the pointer to the beginning of the list of links
* @param newpad The new pad to add. A copy is made when adding.
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
-void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
+int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad);
/** Insert a new input pad for the filter. */
-static inline void ff_insert_inpad(AVFilterContext *f, unsigned index,
+static inline int ff_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
- ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
+ int ret = ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
&f->input_pads, &f->inputs, p);
#if FF_API_FOO_COUNT
+FF_DISABLE_DEPRECATION_WARNINGS
f->input_count = f->nb_inputs;
+FF_ENABLE_DEPRECATION_WARNINGS
#endif
+ return ret;
}
/** Insert a new output pad for the filter. */
-static inline void ff_insert_outpad(AVFilterContext *f, unsigned index,
+static inline int ff_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
- ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
+ int ret = ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
#if FF_API_FOO_COUNT
+FF_DISABLE_DEPRECATION_WARNINGS
f->output_count = f->nb_outputs;
+FF_ENABLE_DEPRECATION_WARNINGS
#endif
+ return ret;
}
/**
@@ -325,4 +337,33 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **
*/
int ff_filter_frame(AVFilterLink *link, AVFrame *frame);
+/**
+ * Flags for AVFilterLink.flags.
+ */
+enum {
+
+ /**
+ * Frame requests may need to loop in order to be fulfilled.
+ * A filter must set this flags on an output link if it may return 0 in
+ * request_frame() without filtering a frame.
+ */
+ FF_LINK_FLAG_REQUEST_LOOP = 1,
+
+};
+
+/**
+ * Allocate a new filter context and return it.
+ *
+ * @param filter what filter to create an instance of
+ * @param inst_name name to give to the new filter context
+ *
+ * @return newly created filter context or NULL on failure
+ */
+AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name);
+
+/**
+ * Remove a filter from a graph;
+ */
+void ff_filter_graph_remove_filter(AVFilterGraph *graph, AVFilterContext *filter);
+
#endif /* AVFILTER_INTERNAL_H */
diff --git a/ffmpeg/libavfilter/lavfutils.c b/ffmpeg/libavfilter/lavfutils.c
index 8b6b114..58d98cf 100644
--- a/ffmpeg/libavfilter/lavfutils.c
+++ b/ffmpeg/libavfilter/lavfutils.c
@@ -33,6 +33,8 @@ int ff_load_image(uint8_t *data[4], int linesize[4],
int frame_decoded, ret = 0;
AVPacket pkt;
+ av_init_packet(&pkt);
+
av_register_all();
iformat = av_find_input_format("image2");
@@ -55,7 +57,7 @@ int ff_load_image(uint8_t *data[4], int linesize[4],
goto end;
}
- if (!(frame = avcodec_alloc_frame()) ) {
+ if (!(frame = av_frame_alloc()) ) {
av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n");
ret = AVERROR(ENOMEM);
goto end;
@@ -85,6 +87,7 @@ int ff_load_image(uint8_t *data[4], int linesize[4],
av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h);
end:
+ av_free_packet(&pkt);
avcodec_close(codec_ctx);
avformat_close_input(&format_ctx);
av_freep(&frame);
diff --git a/ffmpeg/libavfilter/lavfutils.h b/ffmpeg/libavfilter/lavfutils.h
index a310e83..2d5308f 100644
--- a/ffmpeg/libavfilter/lavfutils.h
+++ b/ffmpeg/libavfilter/lavfutils.h
@@ -34,7 +34,7 @@
* @param pix_fmt pointer to the pixel format of the loaded image
* @param filename the name of the image file to load
* @param log_ctx log context
- * @return 0 in case of success, a negative error code otherwise.
+ * @return >= 0 in case of success, a negative error code otherwise.
*/
int ff_load_image(uint8_t *data[4], int linesize[4],
int *w, int *h, enum AVPixelFormat *pix_fmt,
diff --git a/ffmpeg/libavfilter/libavfilter.pc b/ffmpeg/libavfilter/libavfilter.pc
index 12bf013..2e82266 100644
--- a/ffmpeg/libavfilter/libavfilter.pc
+++ b/ffmpeg/libavfilter/libavfilter.pc
@@ -5,10 +5,10 @@ includedir=${prefix}/include
Name: libavfilter
Description: FFmpeg audio/video filtering library
-Version: 3.48.100
+Version: 4.0.103
Requires:
-Requires.private: libpostproc = 52.2.100, libswresample = 0.17.102, libswscale = 2.2.100, libavformat = 55.0.100, libavcodec = 55.1.100, libavutil = 52.22.100
+Requires.private: libpostproc = 52.3.100, libswresample = 0.17.104, libswscale = 2.5.101, libavformat = 55.22.100, libavcodec = 55.46.100, libavutil = 52.59.100
Conflicts:
-Libs: -L${libdir} -lavfilter
-Libs.private: -ldl -lXfixes -lXext -lX11 -ljack -lasound -lxvidcore -lx264 -lvorbisenc -lvorbis -logg -ltheoraenc -ltheoradec -logg -lschroedinger-1.0 -lmp3lame -lfaac -lm -pthread -lz -lrt
+Libs: -L${libdir} -lavfilter
+Libs.private: -lXfixes -lXext -lX11 -lx264 -lmp3lame -lm -lz -pthread
Cflags: -I${includedir}
diff --git a/ffmpeg/libavfilter/libmpcodecs/help_mp.h b/ffmpeg/libavfilter/libmpcodecs/help_mp.h
deleted file mode 100644
index 6ceb630..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/help_mp.h
+++ /dev/null
@@ -1,2126 +0,0 @@
-/* WARNING! This is a generated file, do NOT edit.
- * See the help/ subdirectory for the editable files. */
-
-#ifndef MPLAYER_HELP_MP_H
-#define MPLAYER_HELP_MP_H
-
-// $Revision: 32397 $
-// MASTER FILE. Use this file as base for translations.
-// Translated files should be sent to the mplayer-DOCS mailing list or
-// to the help messages maintainer, see DOCS/tech/MAINTAINERS.
-// The header of the translated file should contain credits and contact
-// information. Before major releases we will notify all translators to update
-// their files. Please do not simply translate and forget this, outdated
-// translations quickly become worthless. To help us spot outdated files put a
-// note like "sync'ed with help_mp-en.h XXX" in the header of the translation.
-// Do NOT translate the above lines, just follow the instructions.
-
-
-// ========================= MPlayer help ===========================
-
-static const char help_text[]=
-"Usage: mplayer [options] [url|path/]filename\n"
-"\n"
-"Basic options: (complete list in the man page)\n"
-" -vo <drv> select video output driver ('-vo help' for a list)\n"
-" -ao <drv> select audio output driver ('-ao help' for a list)\n"
-#ifdef CONFIG_VCD
-" vcd://<trackno> play (S)VCD (Super Video CD) track (raw device, no mount)\n"
-#endif
-#ifdef CONFIG_DVDREAD
-" dvd://<titleno> play DVD title from device instead of plain file\n"
-#endif
-" -alang/-slang select DVD audio/subtitle language (by 2-char country code)\n"
-" -ss <position> seek to given (seconds or hh:mm:ss) position\n"
-" -nosound do not play sound\n"
-" -fs fullscreen playback (or -vm, -zoom, details in the man page)\n"
-" -x <x> -y <y> set display resolution (for use with -vm or -zoom)\n"
-" -sub <file> specify subtitle file to use (also see -subfps, -subdelay)\n"
-" -playlist <file> specify playlist file\n"
-" -vid x -aid y select video (x) and audio (y) stream to play\n"
-" -fps x -srate y change video (x fps) and audio (y Hz) rate\n"
-" -pp <quality> enable postprocessing filter (details in the man page)\n"
-" -framedrop enable frame dropping (for slow machines)\n"
-"\n"
-"Basic keys: (complete list in the man page, also check input.conf)\n"
-" <- or -> seek backward/forward 10 seconds\n"
-" down or up seek backward/forward 1 minute\n"
-" pgdown or pgup seek backward/forward 10 minutes\n"
-" < or > step backward/forward in playlist\n"
-" p or SPACE pause movie (press any key to continue)\n"
-" q or ESC stop playing and quit program\n"
-" + or - adjust audio delay by +/- 0.1 second\n"
-" o cycle OSD mode: none / seekbar / seekbar + timer\n"
-" * or / increase or decrease PCM volume\n"
-" x or z adjust subtitle delay by +/- 0.1 second\n"
-" r or t adjust subtitle position up/down, also see -vf expand\n"
-"\n"
-" * * * SEE THE MAN PAGE FOR DETAILS, FURTHER (ADVANCED) OPTIONS AND KEYS * * *\n"
-"\n";
-
-// ========================= MPlayer messages ===========================
-
-// mplayer.c
-#define MSGTR_Exiting "\nExiting...\n"
-#define MSGTR_ExitingHow "\nExiting... (%s)\n"
-#define MSGTR_Exit_quit "Quit"
-#define MSGTR_Exit_eof "End of file"
-#define MSGTR_Exit_error "Fatal error"
-#define MSGTR_IntBySignal "\nMPlayer interrupted by signal %d in module: %s\n"
-#define MSGTR_NoHomeDir "Cannot find HOME directory.\n"
-#define MSGTR_GetpathProblem "get_path(\"config\") problem\n"
-#define MSGTR_CreatingCfgFile "Creating config file: %s\n"
-#define MSGTR_CantLoadFont "Cannot load bitmap font '%s'.\n"
-#define MSGTR_CantLoadSub "Cannot load subtitles '%s'.\n"
-#define MSGTR_DumpSelectedStreamMissing "dump: FATAL: Selected stream missing!\n"
-#define MSGTR_CantOpenDumpfile "Cannot open dump file.\n"
-#define MSGTR_CoreDumped "Core dumped ;)\n"
-#define MSGTR_DumpBytesWrittenPercent "dump: %"PRIu64" bytes written (~%.1f%%)\r"
-#define MSGTR_DumpBytesWritten "dump: %"PRIu64" bytes written\r"
-#define MSGTR_DumpBytesWrittenTo "dump: %"PRIu64" bytes written to '%s'.\n"
-#define MSGTR_FPSnotspecified "FPS not specified in the header or invalid, use the -fps option.\n"
-#define MSGTR_TryForceAudioFmtStr "Trying to force audio codec driver family %s...\n"
-#define MSGTR_CantFindAudioCodec "Cannot find codec for audio format 0x%X.\n"
-#define MSGTR_TryForceVideoFmtStr "Trying to force video codec driver family %s...\n"
-#define MSGTR_CantFindVideoCodec "Cannot find codec matching selected -vo and video format 0x%X.\n"
-#define MSGTR_CannotInitVO "FATAL: Cannot initialize video driver.\n"
-#define MSGTR_CannotInitAO "Could not open/initialize audio device -> no sound.\n"
-#define MSGTR_StartPlaying "Starting playback...\n"
-
-#define MSGTR_SystemTooSlow "\n\n"\
-" ************************************************\n"\
-" **** Your system is too SLOW to play this! ****\n"\
-" ************************************************\n\n"\
-"Possible reasons, problems, workarounds:\n"\
-"- Most common: broken/buggy _audio_ driver\n"\
-" - Try -ao sdl or use the OSS emulation of ALSA.\n"\
-" - Experiment with different values for -autosync, 30 is a good start.\n"\
-"- Slow video output\n"\
-" - Try a different -vo driver (-vo help for a list) or try -framedrop!\n"\
-"- Slow CPU\n"\
-" - Don't try to play a big DVD/DivX on a slow CPU! Try some of the lavdopts,\n"\
-" e.g. -vfm ffmpeg -lavdopts lowres=1:fast:skiploopfilter=all.\n"\
-"- Broken file\n"\
-" - Try various combinations of -nobps -ni -forceidx -mc 0.\n"\
-"- Slow media (NFS/SMB mounts, DVD, VCD etc)\n"\
-" - Try -cache 8192.\n"\
-"- Are you using -cache to play a non-interleaved AVI file?\n"\
-" - Try -nocache.\n"\
-"Read DOCS/HTML/en/video.html for tuning/speedup tips.\n"\
-"If none of this helps you, read DOCS/HTML/en/bugreports.html.\n\n"
-
-#define MSGTR_NoGui "MPlayer was compiled WITHOUT GUI support.\n"
-#define MSGTR_GuiNeedsX "MPlayer GUI requires X11.\n"
-#define MSGTR_Playing "\nPlaying %s.\n"
-#define MSGTR_NoSound "Audio: no sound\n"
-#define MSGTR_FPSforced "FPS forced to be %5.3f (ftime: %5.3f).\n"
-#define MSGTR_AvailableVideoOutputDrivers "Available video output drivers:\n"
-#define MSGTR_AvailableAudioOutputDrivers "Available audio output drivers:\n"
-#define MSGTR_AvailableAudioCodecs "Available audio codecs:\n"
-#define MSGTR_AvailableVideoCodecs "Available video codecs:\n"
-#define MSGTR_AvailableAudioFm "Available (compiled-in) audio codec families/drivers:\n"
-#define MSGTR_AvailableVideoFm "Available (compiled-in) video codec families/drivers:\n"
-#define MSGTR_AvailableFsType "Available fullscreen layer change modes:\n"
-#define MSGTR_CannotReadVideoProperties "Video: Cannot read properties.\n"
-#define MSGTR_NoStreamFound "No stream found.\n"
-#define MSGTR_ErrorInitializingVODevice "Error opening/initializing the selected video_out (-vo) device.\n"
-#define MSGTR_ForcedVideoCodec "Forced video codec: %s\n"
-#define MSGTR_ForcedAudioCodec "Forced audio codec: %s\n"
-#define MSGTR_Video_NoVideo "Video: no video\n"
-#define MSGTR_NotInitializeVOPorVO "\nFATAL: Could not initialize video filters (-vf) or video output (-vo).\n"
-#define MSGTR_Paused " ===== PAUSE =====" // no more than 23 characters (status line for audio files)
-#define MSGTR_PlaylistLoadUnable "\nUnable to load playlist %s.\n"
-#define MSGTR_Exit_SIGILL_RTCpuSel \
-"- MPlayer crashed by an 'Illegal Instruction'.\n"\
-" It may be a bug in our new runtime CPU-detection code...\n"\
-" Please read DOCS/HTML/en/bugreports.html.\n"
-#define MSGTR_Exit_SIGILL \
-"- MPlayer crashed by an 'Illegal Instruction'.\n"\
-" It usually happens when you run it on a CPU different than the one it was\n"\
-" compiled/optimized for.\n"\
-" Verify this!\n"
-#define MSGTR_Exit_SIGSEGV_SIGFPE \
-"- MPlayer crashed by bad usage of CPU/FPU/RAM.\n"\
-" Recompile MPlayer with --enable-debug and make a 'gdb' backtrace and\n"\
-" disassembly. Details in DOCS/HTML/en/bugreports_what.html#bugreports_crash.\n"
-#define MSGTR_Exit_SIGCRASH \
-"- MPlayer crashed. This shouldn't happen.\n"\
-" It can be a bug in the MPlayer code _or_ in your drivers _or_ in your\n"\
-" gcc version. If you think it's MPlayer's fault, please read\n"\
-" DOCS/HTML/en/bugreports.html and follow the instructions there. We can't and\n"\
-" won't help unless you provide this information when reporting a possible bug.\n"
-#define MSGTR_LoadingConfig "Loading config '%s'\n"
-#define MSGTR_LoadingProtocolProfile "Loading protocol-related profile '%s'\n"
-#define MSGTR_LoadingExtensionProfile "Loading extension-related profile '%s'\n"
-#define MSGTR_AddedSubtitleFile "SUB: Added subtitle file (%d): %s\n"
-#define MSGTR_RemovedSubtitleFile "SUB: Removed subtitle file (%d): %s\n"
-#define MSGTR_ErrorOpeningOutputFile "Error opening file [%s] for writing!\n"
-#define MSGTR_RTCDeviceNotOpenable "Failed to open %s: %s (it should be readable by the user.)\n"
-#define MSGTR_LinuxRTCInitErrorIrqpSet "Linux RTC init error in ioctl (rtc_irqp_set %lu): %s\n"
-#define MSGTR_IncreaseRTCMaxUserFreq "Try adding \"echo %lu > /proc/sys/dev/rtc/max-user-freq\" to your system startup scripts.\n"
-#define MSGTR_LinuxRTCInitErrorPieOn "Linux RTC init error in ioctl (rtc_pie_on): %s\n"
-#define MSGTR_UsingTimingType "Using %s timing.\n"
-#define MSGTR_Getch2InitializedTwice "WARNING: getch2_init called twice!\n"
-#define MSGTR_DumpstreamFdUnavailable "Cannot dump this stream - no file descriptor available.\n"
-#define MSGTR_CantOpenLibmenuFilterWithThisRootMenu "Can't open libmenu video filter with root menu %s.\n"
-#define MSGTR_AudioFilterChainPreinitError "Error at audio filter chain pre-init!\n"
-#define MSGTR_LinuxRTCReadError "Linux RTC read error: %s\n"
-#define MSGTR_SoftsleepUnderflow "Warning! Softsleep underflow!\n"
-#define MSGTR_DvdnavNullEvent "DVDNAV Event NULL?!\n"
-#define MSGTR_DvdnavHighlightEventBroken "DVDNAV Event: Highlight event broken\n"
-#define MSGTR_DvdnavEvent "DVDNAV Event: %s\n"
-#define MSGTR_DvdnavHighlightHide "DVDNAV Event: Highlight Hide\n"
-#define MSGTR_DvdnavStillFrame "######################################## DVDNAV Event: Still Frame: %d sec(s)\n"
-#define MSGTR_DvdnavNavStop "DVDNAV Event: Nav Stop\n"
-#define MSGTR_DvdnavNavNOP "DVDNAV Event: Nav NOP\n"
-#define MSGTR_DvdnavNavSpuStreamChangeVerbose "DVDNAV Event: Nav SPU Stream Change: phys: %d/%d/%d logical: %d\n"
-#define MSGTR_DvdnavNavSpuStreamChange "DVDNAV Event: Nav SPU Stream Change: phys: %d logical: %d\n"
-#define MSGTR_DvdnavNavAudioStreamChange "DVDNAV Event: Nav Audio Stream Change: phys: %d logical: %d\n"
-#define MSGTR_DvdnavNavVTSChange "DVDNAV Event: Nav VTS Change\n"
-#define MSGTR_DvdnavNavCellChange "DVDNAV Event: Nav Cell Change\n"
-#define MSGTR_DvdnavNavSpuClutChange "DVDNAV Event: Nav SPU CLUT Change\n"
-#define MSGTR_DvdnavNavSeekDone "DVDNAV Event: Nav Seek Done\n"
-#define MSGTR_MenuCall "Menu call\n"
-#define MSGTR_MasterQuit "Option -udp-slave: exiting because master exited\n"
-#define MSGTR_InvalidIP "Option -udp-ip: invalid IP address\n"
-#define MSGTR_Forking "Forking...\n"
-#define MSGTR_Forked "Forked...\n"
-#define MSGTR_CouldntStartGdb "Couldn't start gdb\n"
-#define MSGTR_CouldntFork "Couldn't fork\n"
-#define MSGTR_FilenameTooLong "Filename is too long, can not load file or directory specific config files\n"
-#define MSGTR_AudioDeviceStuck "Audio device got stuck!\n"
-#define MSGTR_AudioOutputTruncated "Audio output truncated at end.\n"
-#define MSGTR_ASSCannotAddVideoFilter "ASS: cannot add video filter\n"
-#define MSGTR_PtsAfterFiltersMissing "pts after filters MISSING\n"
-#define MSGTR_CommandLine "CommandLine:"
-#define MSGTR_MenuInitFailed "Menu init failed.\n"
-
-// --- edit decision lists
-#define MSGTR_EdlOutOfMem "Can't allocate enough memory to hold EDL data.\n"
-#define MSGTR_EdlOutOfMemFile "Can't allocate enough memory to hold EDL file name [%s].\n"
-#define MSGTR_EdlRecordsNo "Read %d EDL actions.\n"
-#define MSGTR_EdlQueueEmpty "There are no EDL actions to take care of.\n"
-#define MSGTR_EdlCantOpenForWrite "Can't open EDL file [%s] for writing.\n"
-#define MSGTR_EdlCantOpenForRead "Can't open EDL file [%s] for reading.\n"
-#define MSGTR_EdlNOsh_video "Cannot use EDL without video, disabling.\n"
-#define MSGTR_EdlNOValidLine "Invalid EDL line: %s\n"
-#define MSGTR_EdlBadlyFormattedLine "Badly formatted EDL line [%d], discarding.\n"
-#define MSGTR_EdlBadLineOverlap "Last stop position was [%f]; next start is [%f].\n"\
-"Entries must be in chronological order, cannot overlap. Discarding.\n"
-#define MSGTR_EdlBadLineBadStop "Stop time has to be after start time.\n"
-#define MSGTR_EdloutBadStop "EDL skip canceled, last start > stop\n"
-#define MSGTR_EdloutStartSkip "EDL skip start, press 'i' again to end block.\n"
-#define MSGTR_EdloutEndSkip "EDL skip end, line written.\n"
-
-// mplayer.c OSD
-#define MSGTR_OSDenabled "enabled"
-#define MSGTR_OSDdisabled "disabled"
-#define MSGTR_OSDAudio "Audio: %s"
-#define MSGTR_OSDVideo "Video: %s"
-#define MSGTR_OSDChannel "Channel: %s"
-#define MSGTR_OSDSubDelay "Sub delay: %d ms"
-#define MSGTR_OSDSpeed "Speed: x %6.2f"
-#define MSGTR_OSDosd "OSD: %s"
-#define MSGTR_OSDChapter "Chapter: (%d) %s"
-#define MSGTR_OSDAngle "Angle: %d/%d"
-#define MSGTR_OSDDeinterlace "Deinterlace: %s"
-#define MSGTR_OSDCapturing "Capturing: %s"
-#define MSGTR_OSDCapturingFailure "Capturing failed"
-
-// property values
-#define MSGTR_Enabled "enabled"
-#define MSGTR_EnabledEdl "enabled (EDL)"
-#define MSGTR_Disabled "disabled"
-#define MSGTR_HardFrameDrop "hard"
-#define MSGTR_Unknown "unknown"
-#define MSGTR_Bottom "bottom"
-#define MSGTR_Center "center"
-#define MSGTR_Top "top"
-#define MSGTR_SubSourceFile "file"
-#define MSGTR_SubSourceVobsub "vobsub"
-#define MSGTR_SubSourceDemux "embedded"
-
-// OSD bar names
-#define MSGTR_Volume "Volume"
-#define MSGTR_Panscan "Panscan"
-#define MSGTR_Gamma "Gamma"
-#define MSGTR_Brightness "Brightness"
-#define MSGTR_Contrast "Contrast"
-#define MSGTR_Saturation "Saturation"
-#define MSGTR_Hue "Hue"
-#define MSGTR_Balance "Balance"
-
-// property state
-#define MSGTR_LoopStatus "Loop: %s"
-#define MSGTR_MuteStatus "Mute: %s"
-#define MSGTR_AVDelayStatus "A-V delay: %s"
-#define MSGTR_OnTopStatus "Stay on top: %s"
-#define MSGTR_RootwinStatus "Rootwin: %s"
-#define MSGTR_BorderStatus "Border: %s"
-#define MSGTR_FramedroppingStatus "Framedropping: %s"
-#define MSGTR_VSyncStatus "VSync: %s"
-#define MSGTR_SubSelectStatus "Subtitles: %s"
-#define MSGTR_SubSourceStatus "Sub source: %s"
-#define MSGTR_SubPosStatus "Sub position: %s/100"
-#define MSGTR_SubAlignStatus "Sub alignment: %s"
-#define MSGTR_SubDelayStatus "Sub delay: %s"
-#define MSGTR_SubScale "Sub Scale: %s"
-#define MSGTR_SubVisibleStatus "Subtitles: %s"
-#define MSGTR_SubForcedOnlyStatus "Forced sub only: %s"
-
-// mencoder.c
-#define MSGTR_UsingPass3ControlFile "Using pass3 control file: %s\n"
-#define MSGTR_MissingFilename "\nFilename missing.\n\n"
-#define MSGTR_CannotOpenFile_Device "Cannot open file/device.\n"
-#define MSGTR_CannotOpenDemuxer "Cannot open demuxer.\n"
-#define MSGTR_NoAudioEncoderSelected "\nNo audio encoder (-oac) selected. Select one (see -oac help) or use -nosound.\n"
-#define MSGTR_NoVideoEncoderSelected "\nNo video encoder (-ovc) selected. Select one (see -ovc help).\n"
-#define MSGTR_CannotOpenOutputFile "Cannot open output file '%s'.\n"
-#define MSGTR_EncoderOpenFailed "Failed to open the encoder.\n"
-#define MSGTR_MencoderWrongFormatAVI "\nWARNING: OUTPUT FILE FORMAT IS _AVI_. See -of help.\n"
-#define MSGTR_MencoderWrongFormatMPG "\nWARNING: OUTPUT FILE FORMAT IS _MPEG_. See -of help.\n"
-#define MSGTR_MissingOutputFilename "No output file specified, please see the -o option."
-#define MSGTR_ForcingOutputFourcc "Forcing output FourCC to %x [%.4s].\n"
-#define MSGTR_ForcingOutputAudiofmtTag "Forcing output audio format tag to 0x%x.\n"
-#define MSGTR_DuplicateFrames "\n%d duplicate frame(s)!\n"
-#define MSGTR_SkipFrame "\nSkipping frame!\n"
-#define MSGTR_ResolutionDoesntMatch "\nNew video file has different resolution or colorspace than the previous one.\n"
-#define MSGTR_FrameCopyFileMismatch "\nAll video files must have identical fps, resolution, and codec for -ovc copy.\n"
-#define MSGTR_AudioCopyFileMismatch "\nAll files must have identical audio codec and format for -oac copy.\n"
-#define MSGTR_NoAudioFileMismatch "\nCannot mix video-only files with audio and video files. Try -nosound.\n"
-#define MSGTR_NoSpeedWithFrameCopy "WARNING: -speed is not guaranteed to work correctly with -oac copy!\n"\
-"Your encode might be broken!\n"
-#define MSGTR_ErrorWritingFile "%s: Error writing file.\n"
-#define MSGTR_FlushingVideoFrames "\nFlushing video frames.\n"
-#define MSGTR_FiltersHaveNotBeenConfiguredEmptyFile "Filters have not been configured! Empty file?\n"
-#define MSGTR_RecommendedVideoBitrate "Recommended video bitrate for %s CD: %d\n"
-#define MSGTR_VideoStreamResult "\nVideo stream: %8.3f kbit/s (%d B/s) size: %"PRIu64" bytes %5.3f secs %d frames\n"
-#define MSGTR_AudioStreamResult "\nAudio stream: %8.3f kbit/s (%d B/s) size: %"PRIu64" bytes %5.3f secs\n"
-#define MSGTR_EdlSkipStartEndCurrent "EDL SKIP: Start: %.2f End: %.2f Current: V: %.2f A: %.2f \r"
-#define MSGTR_OpenedStream "success: format: %d data: 0x%X - 0x%x\n"
-#define MSGTR_VCodecFramecopy "videocodec: framecopy (%dx%d %dbpp fourcc=%x)\n"
-#define MSGTR_ACodecFramecopy "audiocodec: framecopy (format=%x chans=%d rate=%d bits=%d B/s=%d sample-%d)\n"
-#define MSGTR_CBRPCMAudioSelected "CBR PCM audio selected.\n"
-#define MSGTR_MP3AudioSelected "MP3 audio selected.\n"
-#define MSGTR_CannotAllocateBytes "Couldn't allocate %d bytes.\n"
-#define MSGTR_SettingAudioDelay "Setting audio delay to %5.3fs.\n"
-#define MSGTR_SettingVideoDelay "Setting video delay to %5.3fs.\n"
-#define MSGTR_LimitingAudioPreload "Limiting audio preload to 0.4s.\n"
-#define MSGTR_IncreasingAudioDensity "Increasing audio density to 4.\n"
-#define MSGTR_ZeroingAudioPreloadAndMaxPtsCorrection "Forcing audio preload to 0, max pts correction to 0.\n"
-#define MSGTR_LameVersion "LAME version %s (%s)\n\n"
-#define MSGTR_InvalidBitrateForLamePreset "Error: The bitrate specified is out of the valid range for this preset.\n"\
-"\n"\
-"When using this mode you must enter a value between \"8\" and \"320\".\n"\
-"\n"\
-"For further information try: \"-lameopts preset=help\"\n"
-#define MSGTR_InvalidLamePresetOptions "Error: You did not enter a valid profile and/or options with preset.\n"\
-"\n"\
-"Available profiles are:\n"\
-"\n"\
-" <fast> standard\n"\
-" <fast> extreme\n"\
-" insane\n"\
-" <cbr> (ABR Mode) - The ABR Mode is implied. To use it,\n"\
-" simply specify a bitrate. For example:\n"\
-" \"preset=185\" activates this\n"\
-" preset and uses 185 as an average kbps.\n"\
-"\n"\
-" Some examples:\n"\
-"\n"\
-" \"-lameopts fast:preset=standard \"\n"\
-" or \"-lameopts cbr:preset=192 \"\n"\
-" or \"-lameopts preset=172 \"\n"\
-" or \"-lameopts preset=extreme \"\n"\
-"\n"\
-"For further information try: \"-lameopts preset=help\"\n"
-#define MSGTR_LamePresetsLongInfo "\n"\
-"The preset switches are designed to provide the highest possible quality.\n"\
-"\n"\
-"They have for the most part been subjected to and tuned via rigorous double\n"\
-"blind listening tests to verify and achieve this objective.\n"\
-"\n"\
-"These are continually updated to coincide with the latest developments that\n"\
-"occur and as a result should provide you with nearly the best quality\n"\
-"currently possible from LAME.\n"\
-"\n"\
-"To activate these presets:\n"\
-"\n"\
-" For VBR modes (generally highest quality):\n"\
-"\n"\
-" \"preset=standard\" This preset should generally be transparent\n"\
-" to most people on most music and is already\n"\
-" quite high in quality.\n"\
-"\n"\
-" \"preset=extreme\" If you have extremely good hearing and similar\n"\
-" equipment, this preset will generally provide\n"\
-" slightly higher quality than the \"standard\"\n"\
-" mode.\n"\
-"\n"\
-" For CBR 320kbps (highest quality possible from the preset switches):\n"\
-"\n"\
-" \"preset=insane\" This preset will usually be overkill for most\n"\
-" people and most situations, but if you must\n"\
-" have the absolute highest quality with no\n"\
-" regard to filesize, this is the way to go.\n"\
-"\n"\
-" For ABR modes (high quality per given bitrate but not as high as VBR):\n"\
-"\n"\
-" \"preset=<kbps>\" Using this preset will usually give you good\n"\
-" quality at a specified bitrate. Depending on the\n"\
-" bitrate entered, this preset will determine the\n"\
-" optimal settings for that particular situation.\n"\
-" While this approach works, it is not nearly as\n"\
-" flexible as VBR, and usually will not attain the\n"\
-" same level of quality as VBR at higher bitrates.\n"\
-"\n"\
-"The following options are also available for the corresponding profiles:\n"\
-"\n"\
-" <fast> standard\n"\
-" <fast> extreme\n"\
-" insane\n"\
-" <cbr> (ABR Mode) - The ABR Mode is implied. To use it,\n"\
-" simply specify a bitrate. For example:\n"\
-" \"preset=185\" activates this\n"\
-" preset and uses 185 as an average kbps.\n"\
-"\n"\
-" \"fast\" - Enables the new fast VBR for a particular profile. The\n"\
-" disadvantage to the speed switch is that often times the\n"\
-" bitrate will be slightly higher than with the normal mode\n"\
-" and quality may be slightly lower also.\n"\
-" Warning: with the current version fast presets might result in too\n"\
-" high bitrate compared to regular presets.\n"\
-"\n"\
-" \"cbr\" - If you use the ABR mode (read above) with a significant\n"\
-" bitrate such as 80, 96, 112, 128, 160, 192, 224, 256, 320,\n"\
-" you can use the \"cbr\" option to force CBR mode encoding\n"\
-" instead of the standard abr mode. ABR does provide higher\n"\
-" quality but CBR may be useful in situations such as when\n"\
-" streaming an MP3 over the internet may be important.\n"\
-"\n"\
-" For example:\n"\
-"\n"\
-" \"-lameopts fast:preset=standard \"\n"\
-" or \"-lameopts cbr:preset=192 \"\n"\
-" or \"-lameopts preset=172 \"\n"\
-" or \"-lameopts preset=extreme \"\n"\
-"\n"\
-"\n"\
-"A few aliases are available for ABR mode:\n"\
-"phone => 16kbps/mono phon+/lw/mw-eu/sw => 24kbps/mono\n"\
-"mw-us => 40kbps/mono voice => 56kbps/mono\n"\
-"fm/radio/tape => 112kbps hifi => 160kbps\n"\
-"cd => 192kbps studio => 256kbps"
-#define MSGTR_LameCantInit \
-"Cannot set LAME options, check bitrate/samplerate, some very low bitrates\n"\
-"(<32) need lower samplerates (i.e. -srate 8000).\n"\
-"If everything else fails, try a preset."
-#define MSGTR_ConfigFileError "Config file error"
-#define MSGTR_ErrorParsingCommandLine "error parsing command line"
-#define MSGTR_VideoStreamRequired "Video stream is mandatory!\n"
-#define MSGTR_ForcingInputFPS "Input fps will be interpreted as %5.3f instead.\n"
-#define MSGTR_RawvideoDoesNotSupportAudio "Output file format RAWVIDEO does not support audio - disabling audio.\n"
-#define MSGTR_DemuxerDoesntSupportNosound "This demuxer doesn't support -nosound yet.\n"
-#define MSGTR_MemAllocFailed "Memory allocation failed.\n"
-#define MSGTR_NoMatchingFilter "Couldn't find matching filter/ao format!\n"
-#define MSGTR_MP3WaveFormatSizeNot30 "sizeof(MPEGLAYER3WAVEFORMAT)==%d!=30, maybe broken C compiler?\n"
-#define MSGTR_NoLavcAudioCodecName "Audio LAVC, Missing codec name!\n"
-#define MSGTR_LavcAudioCodecNotFound "Audio LAVC, couldn't find encoder for codec %s.\n"
-#define MSGTR_CouldntAllocateLavcContext "Audio LAVC, couldn't allocate context!\n"
-#define MSGTR_CouldntOpenCodec "Couldn't open codec %s, br=%d.\n"
-#define MSGTR_CantCopyAudioFormat "Audio format 0x%x is incompatible with '-oac copy', please try '-oac pcm' instead or use '-fafmttag' to override it.\n"
-
-// cfg-mencoder.h
-#define MSGTR_MEncoderMP3LameHelp "\n\n"\
-" vbr=<0-4> variable bitrate method\n"\
-" 0: cbr (constant bitrate)\n"\
-" 1: mt (Mark Taylor VBR algorithm)\n"\
-" 2: rh (Robert Hegemann VBR algorithm - default)\n"\
-" 3: abr (average bitrate)\n"\
-" 4: mtrh (Mark Taylor Robert Hegemann VBR algorithm)\n"\
-"\n"\
-" abr average bitrate\n"\
-"\n"\
-" cbr constant bitrate\n"\
-" Also forces CBR mode encoding on subsequent ABR presets modes.\n"\
-"\n"\
-" br=<0-1024> specify bitrate in kBit (CBR and ABR only)\n"\
-"\n"\
-" q=<0-9> quality (0-highest, 9-lowest) (only for VBR)\n"\
-"\n"\
-" aq=<0-9> algorithmic quality (0-best/slowest, 9-worst/fastest)\n"\
-"\n"\
-" ratio=<1-100> compression ratio\n"\
-"\n"\
-" vol=<0-10> set audio input gain\n"\
-"\n"\
-" mode=<0-3> (default: auto)\n"\
-" 0: stereo\n"\
-" 1: joint-stereo\n"\
-" 2: dualchannel\n"\
-" 3: mono\n"\
-"\n"\
-" padding=<0-2>\n"\
-" 0: no\n"\
-" 1: all\n"\
-" 2: adjust\n"\
-"\n"\
-" fast Switch on faster encoding on subsequent VBR presets modes,\n"\
-" slightly lower quality and higher bitrates.\n"\
-"\n"\
-" preset=<value> Provide the highest possible quality settings.\n"\
-" medium: VBR encoding, good quality\n"\
-" (150-180 kbps bitrate range)\n"\
-" standard: VBR encoding, high quality\n"\
-" (170-210 kbps bitrate range)\n"\
-" extreme: VBR encoding, very high quality\n"\
-" (200-240 kbps bitrate range)\n"\
-" insane: CBR encoding, highest preset quality\n"\
-" (320 kbps bitrate)\n"\
-" <8-320>: ABR encoding at average given kbps bitrate.\n\n"
-
-// codec-cfg.c
-#define MSGTR_DuplicateFourcc "duplicated FourCC"
-#define MSGTR_TooManyFourccs "too many FourCCs/formats..."
-#define MSGTR_ParseError "parse error"
-#define MSGTR_ParseErrorFIDNotNumber "parse error (format ID not a number?)"
-#define MSGTR_ParseErrorFIDAliasNotNumber "parse error (format ID alias not a number?)"
-#define MSGTR_DuplicateFID "duplicated format ID"
-#define MSGTR_TooManyOut "too many out..."
-#define MSGTR_InvalidCodecName "\ncodec(%s) name is not valid!\n"
-#define MSGTR_CodecLacksFourcc "\ncodec(%s) does not have FourCC/format!\n"
-#define MSGTR_CodecLacksDriver "\ncodec(%s) does not have a driver!\n"
-#define MSGTR_CodecNeedsDLL "\ncodec(%s) needs a 'dll'!\n"
-#define MSGTR_CodecNeedsOutfmt "\ncodec(%s) needs an 'outfmt'!\n"
-#define MSGTR_CantAllocateComment "Can't allocate memory for comment. "
-#define MSGTR_GetTokenMaxNotLessThanMAX_NR_TOKEN "get_token(): max >= MAX_MR_TOKEN!"
-#define MSGTR_CantGetMemoryForLine "Can't get memory for 'line': %s\n"
-#define MSGTR_CantReallocCodecsp "Can't realloc '*codecsp': %s\n"
-#define MSGTR_CodecNameNotUnique "Codec name '%s' isn't unique."
-#define MSGTR_CantStrdupName "Can't strdup -> 'name': %s\n"
-#define MSGTR_CantStrdupInfo "Can't strdup -> 'info': %s\n"
-#define MSGTR_CantStrdupDriver "Can't strdup -> 'driver': %s\n"
-#define MSGTR_CantStrdupDLL "Can't strdup -> 'dll': %s"
-#define MSGTR_AudioVideoCodecTotals "%d audio & %d video codecs\n"
-#define MSGTR_CodecDefinitionIncorrect "Codec is not defined correctly."
-#define MSGTR_OutdatedCodecsConf "This codecs.conf is too old and incompatible with this MPlayer release!"
-
-// fifo.c
-#define MSGTR_CannotMakePipe "Cannot make PIPE!\n"
-
-// parser-mecmd.c, parser-mpcmd.c
-#define MSGTR_NoFileGivenOnCommandLine "'--' indicates no more options, but no filename was given on the command line.\n"
-#define MSGTR_TheLoopOptionMustBeAnInteger "The loop option must be an integer: %s\n"
-#define MSGTR_UnknownOptionOnCommandLine "Unknown option on the command line: -%s\n"
-#define MSGTR_ErrorParsingOptionOnCommandLine "Error parsing option on the command line: -%s\n"
-#define MSGTR_InvalidPlayEntry "Invalid play entry %s\n"
-#define MSGTR_NotAnMEncoderOption "-%s is not an MEncoder option\n"
-#define MSGTR_NoFileGiven "No file given\n"
-
-// m_config.c
-#define MSGTR_SaveSlotTooOld "Save slot found from lvl %d is too old: %d !!!\n"
-#define MSGTR_InvalidCfgfileOption "The %s option can't be used in a config file.\n"
-#define MSGTR_InvalidCmdlineOption "The %s option can't be used on the command line.\n"
-#define MSGTR_InvalidSuboption "Error: option '%s' has no suboption '%s'.\n"
-#define MSGTR_MissingSuboptionParameter "Error: suboption '%s' of '%s' must have a parameter!\n"
-#define MSGTR_MissingOptionParameter "Error: option '%s' must have a parameter!\n"
-#define MSGTR_OptionListHeader "\n Name Type Min Max Global CL Cfg\n\n"
-#define MSGTR_TotalOptions "\nTotal: %d options\n"
-#define MSGTR_ProfileInclusionTooDeep "WARNING: Profile inclusion too deep.\n"
-#define MSGTR_NoProfileDefined "No profiles have been defined.\n"
-#define MSGTR_AvailableProfiles "Available profiles:\n"
-#define MSGTR_UnknownProfile "Unknown profile '%s'.\n"
-#define MSGTR_Profile "Profile %s: %s\n"
-
-// m_property.c
-#define MSGTR_PropertyListHeader "\n Name Type Min Max\n\n"
-#define MSGTR_TotalProperties "\nTotal: %d properties\n"
-
-// loader/ldt_keeper.c
-#define MSGTR_LOADER_DYLD_Warning "WARNING: Attempting to use DLL codecs but environment variable\n DYLD_BIND_AT_LAUNCH not set. This will likely crash.\n"
-
-
-// ====================== GUI messages/buttons ========================
-
-// --- labels ---
-#define MSGTR_About "About"
-#define MSGTR_FileSelect "Select file..."
-#define MSGTR_SubtitleSelect "Select subtitle..."
-#define MSGTR_OtherSelect "Select..."
-#define MSGTR_AudioFileSelect "Select external audio channel..."
-#define MSGTR_FontSelect "Select font..."
-// Note: If you change MSGTR_PlayList please see if it still fits MSGTR_MENU_PlayList
-#define MSGTR_PlayList "Playlist"
-#define MSGTR_Equalizer "Equalizer"
-#define MSGTR_ConfigureEqualizer "Configure Equalizer"
-#define MSGTR_SkinBrowser "Skin Browser"
-#define MSGTR_Network "Network streaming..."
-// Note: If you change MSGTR_Preferences please see if it still fits MSGTR_MENU_Preferences
-#define MSGTR_Preferences "Preferences"
-#define MSGTR_AudioPreferences "Audio driver configuration"
-#define MSGTR_NoMediaOpened "No media opened."
-#define MSGTR_Title "Title %d"
-#define MSGTR_NoChapter "No chapter"
-#define MSGTR_Chapter "Chapter %d"
-#define MSGTR_NoFileLoaded "No file loaded."
-#define MSGTR_Filter_UTF8Subtitles "UTF-8 encoded subtitles (*.utf, *.utf-8, *.utf8)"
-#define MSGTR_Filter_AllSubtitles "All subtitles"
-#define MSGTR_Filter_AllFiles "All files"
-#define MSGTR_Filter_TTF "True Type fonts (*.ttf)"
-#define MSGTR_Filter_Type1 "Type1 fonts (*.pfb)"
-#define MSGTR_Filter_AllFonts "All fonts"
-#define MSGTR_Filter_FontFiles "Font files (*.desc)"
-#define MSGTR_Filter_DDRawAudio "Dolby Digital / PCM (*.ac3, *.pcm)"
-#define MSGTR_Filter_MPEGAudio "MPEG audio (*.mp2, *.mp3, *.mpga, *.m4a, *.aac, *.f4a)"
-#define MSGTR_Filter_MatroskaAudio "Matroska audio (*.mka)"
-#define MSGTR_Filter_OGGAudio "Ogg audio (*.oga, *.ogg, *.spx)"
-#define MSGTR_Filter_WAVAudio "WAV audio (*.wav)"
-#define MSGTR_Filter_WMAAudio "Windows Media audio (*.wma)"
-#define MSGTR_Filter_AllAudioFiles "All audio files"
-#define MSGTR_Filter_AllVideoFiles "All video files"
-#define MSGTR_Filter_AVIFiles "AVI files"
-#define MSGTR_Filter_DivXFiles "DivX files"
-#define MSGTR_Filter_FlashVideo "Flash Video"
-#define MSGTR_Filter_MP3Files "MP3 files"
-#define MSGTR_Filter_MP4Files "MP4 files"
-#define MSGTR_Filter_MPEGFiles "MPEG files"
-#define MSGTR_Filter_MP2TS "MPEG-2 transport streams"
-#define MSGTR_Filter_MatroskaMedia "Matroska media"
-#define MSGTR_Filter_OGGMedia "Ogg media"
-#define MSGTR_Filter_QTMedia "QuickTime media"
-#define MSGTR_Filter_RNMedia "RealNetworks media"
-#define MSGTR_Filter_VideoCDImages "VCD/SVCD images"
-#define MSGTR_Filter_WAVFiles "WAV files"
-#define MSGTR_Filter_WindowsMedia "Windows media"
-#define MSGTR_Filter_Playlists "Playlists"
-
-// --- buttons ---
-#define MSGTR_Ok "OK"
-#define MSGTR_Cancel "Cancel"
-#define MSGTR_Add "Add"
-#define MSGTR_Remove "Remove"
-#define MSGTR_Clear "Clear"
-#define MSGTR_Config "Config"
-#define MSGTR_ConfigDriver "Configure driver"
-#define MSGTR_Browse "Browse"
-
-// --- error messages ---
-#define MSGTR_NEMDB "Sorry, not enough memory to draw buffer.\n"
-#define MSGTR_NEMFMR "Sorry, not enough memory for menu rendering."
-#define MSGTR_IDFGCVD "Sorry, no GUI-compatible video output driver found.\n"
-#define MSGTR_NEEDLAVC "Sorry, you cannot play non-MPEG files with your DXR3/H+ device without reencoding.\nPlease enable lavc in the DXR3/H+ configuration box."
-#define MSGTR_ICONERROR "Icon '%s' (size %d) not found or unsupported format.\n"
-
-// --- skin loader error messages
-#define MSGTR_SKIN_ERRORMESSAGE "Error in skin config file on line %d: %s"
-#define MSGTR_SKIN_ERROR_SECTION "No section specified for '%s'.\n"
-#define MSGTR_SKIN_ERROR_WINDOW "No window specified for '%s'.\n"
-#define MSGTR_SKIN_ERROR_ITEM "This item is not supported by '%s'.\n"
-#define MSGTR_SKIN_UNKNOWN_ITEM "Unknown item '%s'\n"
-#define MSGTR_SKIN_UNKNOWN_NAME "Unknown name '%s'\n"
-#define MSGTR_SKIN_SkinFileNotFound "Skin file %s not found.\n"
-#define MSGTR_SKIN_SkinFileNotReadable "Skin file %s not readable.\n"
-#define MSGTR_SKIN_BITMAP_16bit "Color depth of bitmap %s is 16 bits or less which is not supported.\n"
-#define MSGTR_SKIN_BITMAP_FileNotFound "Bitmap %s not found.\n"
-#define MSGTR_SKIN_BITMAP_PNGReadError "PNG read error in %s\n"
-#define MSGTR_SKIN_BITMAP_ConversionError "24 bit to 32 bit conversion error in %s\n"
-#define MSGTR_SKIN_UnknownMessage "Unknown message '%s'\n"
-#define MSGTR_SKIN_NotEnoughMemory "Not enough memory\n"
-#define MSGTR_SKIN_TooManyItemsDeclared "Too many items declared.\n"
-#define MSGTR_SKIN_FONT_TooManyFontsDeclared "Too many fonts declared.\n"
-#define MSGTR_SKIN_FONT_FontFileNotFound "Font description file not found.\n"
-#define MSGTR_SKIN_FONT_FontImageNotFound "Font image file not found.\n"
-#define MSGTR_SKIN_FONT_NonExistentFont "Font '%s' not found.\n"
-#define MSGTR_SKIN_UnknownParameter "Unknown parameter '%s'\n"
-#define MSGTR_SKIN_SKINCFG_SkinNotFound "Skin '%s' not found.\n"
-#define MSGTR_SKIN_SKINCFG_SelectedSkinNotFound "Selected skin '%s' not found, trying skin 'default'...\n"
-#define MSGTR_SKIN_SKINCFG_SkinCfgError "Config file processing error with skin '%s'\n"
-#define MSGTR_SKIN_LABEL "Skins:"
-
-// --- GTK menus
-#define MSGTR_MENU_AboutMPlayer "About MPlayer"
-#define MSGTR_MENU_Open "Open..."
-#define MSGTR_MENU_PlayFile "Play file..."
-#define MSGTR_MENU_PlayCD "Play CD..."
-#define MSGTR_MENU_PlayVCD "Play VCD..."
-#define MSGTR_MENU_PlayDVD "Play DVD..."
-#define MSGTR_MENU_PlayURL "Play URL..."
-#define MSGTR_MENU_LoadSubtitle "Load subtitle..."
-#define MSGTR_MENU_DropSubtitle "Drop subtitle..."
-#define MSGTR_MENU_LoadExternAudioFile "Load external audio file..."
-#define MSGTR_MENU_Playing "Playing"
-#define MSGTR_MENU_Play "Play"
-#define MSGTR_MENU_Pause "Pause"
-#define MSGTR_MENU_Stop "Stop"
-#define MSGTR_MENU_NextStream "Next stream"
-#define MSGTR_MENU_PrevStream "Prev stream"
-#define MSGTR_MENU_Size "Size"
-#define MSGTR_MENU_HalfSize "Half size"
-#define MSGTR_MENU_NormalSize "Normal size"
-#define MSGTR_MENU_DoubleSize "Double size"
-#define MSGTR_MENU_FullScreen "Fullscreen"
-#define MSGTR_MENU_CD "CD"
-#define MSGTR_MENU_DVD "DVD"
-#define MSGTR_MENU_VCD "VCD"
-#define MSGTR_MENU_PlayDisc "Open disc..."
-#define MSGTR_MENU_ShowDVDMenu "Show DVD menu"
-#define MSGTR_MENU_Titles "Titles"
-#define MSGTR_MENU_Title "Title %2d"
-#define MSGTR_MENU_None "(none)"
-#define MSGTR_MENU_Chapters "Chapters"
-#define MSGTR_MENU_Chapter "Chapter %2d"
-#define MSGTR_MENU_AudioLanguages "Audio languages"
-#define MSGTR_MENU_SubtitleLanguages "Subtitle languages"
-#define MSGTR_MENU_PlayList MSGTR_PlayList
-#define MSGTR_MENU_SkinBrowser "Skin browser"
-#define MSGTR_MENU_Preferences MSGTR_Preferences
-#define MSGTR_MENU_Exit "Exit"
-#define MSGTR_MENU_Mute "Mute"
-#define MSGTR_MENU_Original "Original"
-#define MSGTR_MENU_AspectRatio "Aspect ratio"
-#define MSGTR_MENU_AudioTrack "Audio track"
-#define MSGTR_MENU_Track "Track %d"
-#define MSGTR_MENU_VideoTrack "Video track"
-#define MSGTR_MENU_Subtitles "Subtitles"
-
-// --- equalizer
-// Note: If you change MSGTR_EQU_Audio please see if it still fits MSGTR_PREFERENCES_Audio
-#define MSGTR_EQU_Audio "Audio"
-// Note: If you change MSGTR_EQU_Video please see if it still fits MSGTR_PREFERENCES_Video
-#define MSGTR_EQU_Video "Video"
-#define MSGTR_EQU_Contrast "Contrast: "
-#define MSGTR_EQU_Brightness "Brightness: "
-#define MSGTR_EQU_Hue "Hue: "
-#define MSGTR_EQU_Saturation "Saturation: "
-#define MSGTR_EQU_Front_Left "Front Left"
-#define MSGTR_EQU_Front_Right "Front Right"
-#define MSGTR_EQU_Back_Left "Rear Left"
-#define MSGTR_EQU_Back_Right "Rear Right"
-#define MSGTR_EQU_Center "Center"
-#define MSGTR_EQU_Bass "Bass"
-#define MSGTR_EQU_All "All"
-#define MSGTR_EQU_Channel1 "Channel 1:"
-#define MSGTR_EQU_Channel2 "Channel 2:"
-#define MSGTR_EQU_Channel3 "Channel 3:"
-#define MSGTR_EQU_Channel4 "Channel 4:"
-#define MSGTR_EQU_Channel5 "Channel 5:"
-#define MSGTR_EQU_Channel6 "Channel 6:"
-
-// --- playlist
-#define MSGTR_PLAYLIST_Path "Path"
-#define MSGTR_PLAYLIST_Selected "Selected files"
-#define MSGTR_PLAYLIST_Files "Files"
-#define MSGTR_PLAYLIST_DirectoryTree "Directory tree"
-
-// --- preferences
-#define MSGTR_PREFERENCES_Audio MSGTR_EQU_Audio
-#define MSGTR_PREFERENCES_Video MSGTR_EQU_Video
-#define MSGTR_PREFERENCES_SubtitleOSD "Subtitles & OSD"
-#define MSGTR_PREFERENCES_Codecs "Codecs & demuxer"
-// Note: If you change MSGTR_PREFERENCES_Misc see if it still fits MSGTR_PREFERENCES_FRAME_Misc
-#define MSGTR_PREFERENCES_Misc "Misc"
-#define MSGTR_PREFERENCES_None "None"
-#define MSGTR_PREFERENCES_DriverDefault "driver default"
-#define MSGTR_PREFERENCES_AvailableDrivers "Available drivers:"
-#define MSGTR_PREFERENCES_DoNotPlaySound "Do not play sound"
-#define MSGTR_PREFERENCES_NormalizeSound "Normalize sound"
-#define MSGTR_PREFERENCES_EnableEqualizer "Enable equalizer"
-#define MSGTR_PREFERENCES_SoftwareMixer "Enable Software Mixer"
-#define MSGTR_PREFERENCES_ExtraStereo "Enable extra stereo"
-#define MSGTR_PREFERENCES_Coefficient "Coefficient:"
-#define MSGTR_PREFERENCES_AudioDelay "Audio delay"
-#define MSGTR_PREFERENCES_DoubleBuffer "Enable double buffering"
-#define MSGTR_PREFERENCES_DirectRender "Enable direct rendering"
-#define MSGTR_PREFERENCES_FrameDrop "Enable frame dropping"
-#define MSGTR_PREFERENCES_HFrameDrop "Enable HARD frame dropping (dangerous)"
-#define MSGTR_PREFERENCES_Flip "Flip image upside down"
-#define MSGTR_PREFERENCES_Panscan "Panscan: "
-#define MSGTR_PREFERENCES_OSD_LEVEL0 "Subtitles only"
-#define MSGTR_PREFERENCES_OSD_LEVEL1 "Volume and seek"
-#define MSGTR_PREFERENCES_OSD_LEVEL2 "Volume, seek, timer and percentage"
-#define MSGTR_PREFERENCES_OSD_LEVEL3 "Volume, seek, timer, percentage and total time"
-#define MSGTR_PREFERENCES_Subtitle "Subtitle:"
-#define MSGTR_PREFERENCES_SUB_Delay "Delay: "
-#define MSGTR_PREFERENCES_SUB_FPS "FPS:"
-#define MSGTR_PREFERENCES_SUB_POS "Position: "
-#define MSGTR_PREFERENCES_SUB_AutoLoad "Disable subtitle autoloading"
-#define MSGTR_PREFERENCES_SUB_Unicode "Unicode subtitle"
-#define MSGTR_PREFERENCES_SUB_MPSUB "Convert the given subtitle to MPlayer's subtitle format"
-#define MSGTR_PREFERENCES_SUB_SRT "Convert the given subtitle to the time based SubViewer (SRT) format"
-#define MSGTR_PREFERENCES_SUB_Overlap "Toggle subtitle overlapping"
-#define MSGTR_PREFERENCES_SUB_USE_ASS "SSA/ASS subtitle rendering"
-#define MSGTR_PREFERENCES_SUB_ASS_USE_MARGINS "Use margins"
-#define MSGTR_PREFERENCES_SUB_ASS_TOP_MARGIN "Top: "
-#define MSGTR_PREFERENCES_SUB_ASS_BOTTOM_MARGIN "Bottom: "
-#define MSGTR_PREFERENCES_Font "Font:"
-#define MSGTR_PREFERENCES_FontFactor "Font factor:"
-#define MSGTR_PREFERENCES_PostProcess "Enable postprocessing"
-#define MSGTR_PREFERENCES_AutoQuality "Auto quality: "
-#define MSGTR_PREFERENCES_NI "Use non-interleaved AVI parser"
-#define MSGTR_PREFERENCES_IDX "Rebuild index table, if needed"
-#define MSGTR_PREFERENCES_VideoCodecFamily "Video codec family:"
-#define MSGTR_PREFERENCES_AudioCodecFamily "Audio codec family:"
-#define MSGTR_PREFERENCES_FRAME_OSD_Level "OSD level"
-#define MSGTR_PREFERENCES_FRAME_Subtitle "Subtitle"
-#define MSGTR_PREFERENCES_FRAME_Font "Font"
-#define MSGTR_PREFERENCES_FRAME_PostProcess "Postprocessing"
-#define MSGTR_PREFERENCES_FRAME_CodecDemuxer "Codec & demuxer"
-#define MSGTR_PREFERENCES_FRAME_Cache "Cache"
-#define MSGTR_PREFERENCES_FRAME_Misc MSGTR_PREFERENCES_Misc
-#define MSGTR_PREFERENCES_Audio_Device "Device:"
-#define MSGTR_PREFERENCES_Audio_Mixer "Mixer:"
-#define MSGTR_PREFERENCES_Audio_MixerChannel "Mixer channel:"
-#define MSGTR_PREFERENCES_Message "Please remember that you need to restart playback for some options to take effect!"
-#define MSGTR_PREFERENCES_DXR3_VENC "Video encoder:"
-#define MSGTR_PREFERENCES_DXR3_LAVC "Use LAVC (FFmpeg)"
-#define MSGTR_PREFERENCES_FontEncoding1 "Unicode"
-#define MSGTR_PREFERENCES_FontEncoding2 "Western European Languages (ISO-8859-1)"
-#define MSGTR_PREFERENCES_FontEncoding3 "Western European Languages with Euro (ISO-8859-15)"
-#define MSGTR_PREFERENCES_FontEncoding4 "Slavic/Central European Languages (ISO-8859-2)"
-#define MSGTR_PREFERENCES_FontEncoding5 "Esperanto, Galician, Maltese, Turkish (ISO-8859-3)"
-#define MSGTR_PREFERENCES_FontEncoding6 "Old Baltic charset (ISO-8859-4)"
-#define MSGTR_PREFERENCES_FontEncoding7 "Cyrillic (ISO-8859-5)"
-#define MSGTR_PREFERENCES_FontEncoding8 "Arabic (ISO-8859-6)"
-#define MSGTR_PREFERENCES_FontEncoding9 "Modern Greek (ISO-8859-7)"
-#define MSGTR_PREFERENCES_FontEncoding10 "Turkish (ISO-8859-9)"
-#define MSGTR_PREFERENCES_FontEncoding11 "Baltic (ISO-8859-13)"
-#define MSGTR_PREFERENCES_FontEncoding12 "Celtic (ISO-8859-14)"
-#define MSGTR_PREFERENCES_FontEncoding13 "Hebrew charsets (ISO-8859-8)"
-#define MSGTR_PREFERENCES_FontEncoding14 "Russian (KOI8-R)"
-#define MSGTR_PREFERENCES_FontEncoding15 "Ukrainian, Belarusian (KOI8-U/RU)"
-#define MSGTR_PREFERENCES_FontEncoding16 "Simplified Chinese charset (CP936)"
-#define MSGTR_PREFERENCES_FontEncoding17 "Traditional Chinese charset (BIG5)"
-#define MSGTR_PREFERENCES_FontEncoding18 "Japanese charsets (SHIFT-JIS)"
-#define MSGTR_PREFERENCES_FontEncoding19 "Korean charset (CP949)"
-#define MSGTR_PREFERENCES_FontEncoding20 "Thai charset (CP874)"
-#define MSGTR_PREFERENCES_FontEncoding21 "Cyrillic Windows (CP1251)"
-#define MSGTR_PREFERENCES_FontEncoding22 "Slavic/Central European Windows (CP1250)"
-#define MSGTR_PREFERENCES_FontEncoding23 "Arabic Windows (CP1256)"
-#define MSGTR_PREFERENCES_FontNoAutoScale "No autoscale"
-#define MSGTR_PREFERENCES_FontPropWidth "Proportional to movie width"
-#define MSGTR_PREFERENCES_FontPropHeight "Proportional to movie height"
-#define MSGTR_PREFERENCES_FontPropDiagonal "Proportional to movie diagonal"
-#define MSGTR_PREFERENCES_FontEncoding "Encoding:"
-#define MSGTR_PREFERENCES_FontBlur "Blur:"
-#define MSGTR_PREFERENCES_FontOutLine "Outline:"
-#define MSGTR_PREFERENCES_FontTextScale "Text scale:"
-#define MSGTR_PREFERENCES_FontOSDScale "OSD scale:"
-#define MSGTR_PREFERENCES_Cache "Cache on/off"
-#define MSGTR_PREFERENCES_CacheSize "Cache size: "
-#define MSGTR_PREFERENCES_LoadFullscreen "Start in fullscreen"
-#define MSGTR_PREFERENCES_SaveWinPos "Save window position"
-#define MSGTR_PREFERENCES_XSCREENSAVER "Stop XScreenSaver"
-#define MSGTR_PREFERENCES_PlayBar "Enable playbar"
-#define MSGTR_PREFERENCES_NoIdle "Quit after playing"
-#define MSGTR_PREFERENCES_AutoSync "AutoSync on/off"
-#define MSGTR_PREFERENCES_AutoSyncValue "Autosync: "
-#define MSGTR_PREFERENCES_CDROMDevice "CD-ROM device:"
-#define MSGTR_PREFERENCES_DVDDevice "DVD device:"
-#define MSGTR_PREFERENCES_FPS "Movie FPS:"
-#define MSGTR_PREFERENCES_ShowVideoWindow "Show video window when inactive"
-#define MSGTR_PREFERENCES_ArtsBroken "Newer aRts versions are incompatible "\
- "with GTK 1.x and will crash GMPlayer!"
-
-// -- aboutbox
-#define MSGTR_ABOUT_UHU "GUI development sponsored by UHU Linux\n"
-#define MSGTR_ABOUT_Contributors "Code and documentation contributors\n"
-#define MSGTR_ABOUT_Codecs_libs_contributions "Codecs and third party libraries\n"
-#define MSGTR_ABOUT_Translations "Translations\n"
-#define MSGTR_ABOUT_Skins "Skins\n"
-
-// --- messagebox
-#define MSGTR_MSGBOX_LABEL_FatalError "Fatal error!"
-#define MSGTR_MSGBOX_LABEL_Error "Error!"
-#define MSGTR_MSGBOX_LABEL_Warning "Warning!"
-
-// cfg.c
-#define MSGTR_UnableToSaveOption "Unable to save option '%s'.\n"
-
-// interface.c
-#define MSGTR_DeletingSubtitles "Deleting subtitles.\n"
-#define MSGTR_LoadingSubtitles "Loading subtitles '%s'.\n"
-#define MSGTR_AddingVideoFilter "Adding video filter '%s'.\n"
-
-// mw.c
-#define MSGTR_NotAFile "This does not seem to be a file: %s !\n"
-
-// ws.c
-#define MSGTR_WS_RemoteDisplay "Remote display, disabling XMITSHM.\n"
-#define MSGTR_WS_NoXshm "Sorry, your system does not support the X shared memory extension.\n"
-#define MSGTR_WS_NoXshape "Sorry, your system does not support the XShape extension.\n"
-#define MSGTR_WS_ColorDepthTooLow "Sorry, the color depth is too low.\n"
-#define MSGTR_WS_TooManyOpenWindows "There are too many open windows.\n"
-#define MSGTR_WS_ShmError "shared memory extension error\n"
-#define MSGTR_WS_NotEnoughMemoryDrawBuffer "Sorry, not enough memory to draw buffer.\n"
-#define MSGTR_WS_DpmsUnavailable "DPMS not available?\n"
-#define MSGTR_WS_DpmsNotEnabled "Could not enable DPMS.\n"
-#define MSGTR_WS_XError "An X11 Error has occurred!\n"
-
-// wsxdnd.c
-#define MSGTR_WS_NotAFile "This does not seem to be a file...\n"
-#define MSGTR_WS_DDNothing "D&D: Nothing returned!\n"
-
-// Win32 GUI
-#define MSGTR_Close "Close"
-#define MSGTR_Default "Defaults"
-#define MSGTR_Down "Down"
-#define MSGTR_Load "Load"
-#define MSGTR_Save "Save"
-#define MSGTR_Up "Up"
-#define MSGTR_DirectorySelect "Select directory..."
-#define MSGTR_PlaylistSave "Save playlist..."
-#define MSGTR_PlaylistSelect "Select playlist..."
-#define MSGTR_SelectTitleChapter "Select title/chapter..."
-#define MSGTR_MENU_DebugConsole "Debug Console"
-#define MSGTR_MENU_OnlineHelp "Online Help"
-#define MSGTR_MENU_PlayDirectory "Play directory..."
-#define MSGTR_MENU_SeekBack "Seek Backwards"
-#define MSGTR_MENU_SeekForw "Seek Forwards"
-#define MSGTR_MENU_ShowHide "Show/Hide"
-#define MSGTR_MENU_SubtitlesOnOff "Subtitle Visibility On/Off"
-#define MSGTR_PLAYLIST_AddFile "Add File..."
-#define MSGTR_PLAYLIST_AddURL "Add URL..."
-#define MSGTR_PREFERENCES_Priority "Priority:"
-#define MSGTR_PREFERENCES_PriorityHigh "high"
-#define MSGTR_PREFERENCES_PriorityLow "low"
-#define MSGTR_PREFERENCES_PriorityNormal "normal"
-#define MSGTR_PREFERENCES_PriorityNormalAbove "above normal"
-#define MSGTR_PREFERENCES_PriorityNormalBelow "below normal"
-#define MSGTR_PREFERENCES_ShowInVideoWin "Display in the video window (DirectX only)"
-
-
-// ======================= video output drivers ========================
-
-#define MSGTR_VOincompCodec "The selected video_out device is incompatible with this codec.\n"\
- "Try appending the scale filter to your filter list,\n"\
- "e.g. -vf spp,scale instead of -vf spp.\n"
-#define MSGTR_VO_GenericError "This error has occurred"
-#define MSGTR_VO_UnableToAccess "Unable to access"
-#define MSGTR_VO_ExistsButNoDirectory "already exists, but is not a directory."
-#define MSGTR_VO_DirExistsButNotWritable "Output directory already exists, but is not writable."
-#define MSGTR_VO_DirExistsAndIsWritable "Output directory already exists and is writable."
-#define MSGTR_VO_CantCreateDirectory "Unable to create output directory."
-#define MSGTR_VO_CantCreateFile "Unable to create output file."
-#define MSGTR_VO_DirectoryCreateSuccess "Output directory successfully created."
-#define MSGTR_VO_ValueOutOfRange "value out of range"
-#define MSGTR_VO_NoValueSpecified "No value specified."
-#define MSGTR_VO_UnknownSuboptions "unknown suboption(s)"
-
-// aspect.c
-#define MSGTR_LIBVO_ASPECT_NoSuitableNewResFound "[ASPECT] Warning: No suitable new res found!\n"
-#define MSGTR_LIBVO_ASPECT_NoNewSizeFoundThatFitsIntoRes "[ASPECT] Error: No new size found that fits into res!\n"
-
-// font_load_ft.c
-#define MSGTR_LIBVO_FONT_LOAD_FT_NewFaceFailed "New_Face failed. Maybe the font path is wrong.\nPlease supply the text font file (~/.mplayer/subfont.ttf).\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_NewMemoryFaceFailed "New_Memory_Face failed..\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_SubFaceFailed "subtitle font: load_sub_face failed.\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_SubFontCharsetFailed "subtitle font: prepare_charset failed.\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_CannotPrepareSubtitleFont "Cannot prepare subtitle font.\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_CannotPrepareOSDFont "Cannot prepare OSD font.\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_CannotGenerateTables "Cannot generate tables.\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_DoneFreeTypeFailed "FT_Done_FreeType failed.\n"
-#define MSGTR_LIBVO_FONT_LOAD_FT_FontconfigNoMatch "Fontconfig failed to select a font. Trying without fontconfig...\n"
-
-// sub.c
-#define MSGTR_VO_SUB_Seekbar "Seekbar"
-#define MSGTR_VO_SUB_Play "Play"
-#define MSGTR_VO_SUB_Pause "Pause"
-#define MSGTR_VO_SUB_Stop "Stop"
-#define MSGTR_VO_SUB_Rewind "Rewind"
-#define MSGTR_VO_SUB_Forward "Forward"
-#define MSGTR_VO_SUB_Clock "Clock"
-#define MSGTR_VO_SUB_Contrast "Contrast"
-#define MSGTR_VO_SUB_Saturation "Saturation"
-#define MSGTR_VO_SUB_Volume "Volume"
-#define MSGTR_VO_SUB_Brightness "Brightness"
-#define MSGTR_VO_SUB_Hue "Hue"
-#define MSGTR_VO_SUB_Balance "Balance"
-
-// vo_3dfx.c
-#define MSGTR_LIBVO_3DFX_Only16BppSupported "[VO_3DFX] Only 16bpp supported!"
-#define MSGTR_LIBVO_3DFX_VisualIdIs "[VO_3DFX] Visual ID is %lx.\n"
-#define MSGTR_LIBVO_3DFX_UnableToOpenDevice "[VO_3DFX] Unable to open /dev/3dfx.\n"
-#define MSGTR_LIBVO_3DFX_Error "[VO_3DFX] Error: %d.\n"
-#define MSGTR_LIBVO_3DFX_CouldntMapMemoryArea "[VO_3DFX] Couldn't map 3dfx memory areas: %p,%p,%d.\n"
-#define MSGTR_LIBVO_3DFX_DisplayInitialized "[VO_3DFX] Initialized: %p.\n"
-#define MSGTR_LIBVO_3DFX_UnknownSubdevice "[VO_3DFX] Unknown subdevice: %s.\n"
-
-// vo_aa.c
-#define MSGTR_VO_AA_HelpHeader "\n\nHere are the aalib vo_aa suboptions:\n"
-#define MSGTR_VO_AA_AdditionalOptions "Additional options vo_aa provides:\n" \
-" help print this help message\n" \
-" osdcolor set OSD color\n subcolor set subtitle color\n" \
-" the color parameters are:\n 0 : normal\n" \
-" 1 : dim\n 2 : bold\n 3 : boldfont\n" \
-" 4 : reverse\n 5 : special\n\n\n"
-
-// vo_dxr3.c
-#define MSGTR_LIBVO_DXR3_UnableToLoadNewSPUPalette "[VO_DXR3] Unable to load new SPU palette!\n"
-#define MSGTR_LIBVO_DXR3_UnableToSetPlaymode "[VO_DXR3] Unable to set playmode!\n"
-#define MSGTR_LIBVO_DXR3_UnableToSetSubpictureMode "[VO_DXR3] Unable to set subpicture mode!\n"
-#define MSGTR_LIBVO_DXR3_UnableToGetTVNorm "[VO_DXR3] Unable to get TV norm!\n"
-#define MSGTR_LIBVO_DXR3_AutoSelectedTVNormByFrameRate "[VO_DXR3] Auto-selected TV norm by framerate: "
-#define MSGTR_LIBVO_DXR3_UnableToSetTVNorm "[VO_DXR3] Unable to set TV norm!\n"
-#define MSGTR_LIBVO_DXR3_SettingUpForNTSC "[VO_DXR3] Setting up for NTSC.\n"
-#define MSGTR_LIBVO_DXR3_SettingUpForPALSECAM "[VO_DXR3] Setting up for PAL/SECAM.\n"
-#define MSGTR_LIBVO_DXR3_SettingAspectRatioTo43 "[VO_DXR3] Setting aspect ratio to 4:3.\n"
-#define MSGTR_LIBVO_DXR3_SettingAspectRatioTo169 "[VO_DXR3] Setting aspect ratio to 16:9.\n"
-#define MSGTR_LIBVO_DXR3_OutOfMemory "[VO_DXR3] out of memory\n"
-#define MSGTR_LIBVO_DXR3_UnableToAllocateKeycolor "[VO_DXR3] Unable to allocate keycolor!\n"
-#define MSGTR_LIBVO_DXR3_UnableToAllocateExactKeycolor "[VO_DXR3] Unable to allocate exact keycolor, using closest match (0x%lx).\n"
-#define MSGTR_LIBVO_DXR3_Uninitializing "[VO_DXR3] Uninitializing.\n"
-#define MSGTR_LIBVO_DXR3_FailedRestoringTVNorm "[VO_DXR3] Failed restoring TV norm!\n"
-#define MSGTR_LIBVO_DXR3_EnablingPrebuffering "[VO_DXR3] Enabling prebuffering.\n"
-#define MSGTR_LIBVO_DXR3_UsingNewSyncEngine "[VO_DXR3] Using new sync engine.\n"
-#define MSGTR_LIBVO_DXR3_UsingOverlay "[VO_DXR3] Using overlay.\n"
-#define MSGTR_LIBVO_DXR3_ErrorYouNeedToCompileMplayerWithX11 "[VO_DXR3] Error: Overlay requires compiling with X11 libs/headers installed.\n"
-#define MSGTR_LIBVO_DXR3_WillSetTVNormTo "[VO_DXR3] Will set TV norm to: "
-#define MSGTR_LIBVO_DXR3_AutoAdjustToMovieFrameRatePALPAL60 "auto-adjust to movie framerate (PAL/PAL-60)"
-#define MSGTR_LIBVO_DXR3_AutoAdjustToMovieFrameRatePALNTSC "auto-adjust to movie framerate (PAL/NTSC)"
-#define MSGTR_LIBVO_DXR3_UseCurrentNorm "Use current norm."
-#define MSGTR_LIBVO_DXR3_UseUnknownNormSuppliedCurrentNorm "Unknown norm supplied. Use current norm."
-#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingTrying "[VO_DXR3] Error opening %s for writing, trying /dev/em8300 instead.\n"
-#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingTryingMV "[VO_DXR3] Error opening %s for writing, trying /dev/em8300_mv instead.\n"
-#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingAsWell "[VO_DXR3] Error opening /dev/em8300 for writing as well!\nBailing out.\n"
-#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingAsWellMV "[VO_DXR3] Error opening /dev/em8300_mv for writing as well!\nBailing out.\n"
-#define MSGTR_LIBVO_DXR3_Opened "[VO_DXR3] Opened: %s.\n"
-#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingTryingSP "[VO_DXR3] Error opening %s for writing, trying /dev/em8300_sp instead.\n"
-#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingAsWellSP "[VO_DXR3] Error opening /dev/em8300_sp for writing as well!\nBailing out.\n"
-#define MSGTR_LIBVO_DXR3_UnableToOpenDisplayDuringHackSetup "[VO_DXR3] Unable to open display during overlay hack setup!\n"
-#define MSGTR_LIBVO_DXR3_UnableToInitX11 "[VO_DXR3] Unable to init X11!\n"
-#define MSGTR_LIBVO_DXR3_FailedSettingOverlayAttribute "[VO_DXR3] Failed setting overlay attribute.\n"
-#define MSGTR_LIBVO_DXR3_FailedSettingOverlayScreen "[VO_DXR3] Failed setting overlay screen!\nExiting.\n"
-#define MSGTR_LIBVO_DXR3_FailedEnablingOverlay "[VO_DXR3] Failed enabling overlay!\nExiting.\n"
-#define MSGTR_LIBVO_DXR3_FailedResizingOverlayWindow "[VO_DXR3] Failed resizing overlay window!\n"
-#define MSGTR_LIBVO_DXR3_FailedSettingOverlayBcs "[VO_DXR3] Failed setting overlay bcs!\n"
-#define MSGTR_LIBVO_DXR3_FailedGettingOverlayYOffsetValues "[VO_DXR3] Failed getting overlay Y-offset values!\nExiting.\n"
-#define MSGTR_LIBVO_DXR3_FailedGettingOverlayXOffsetValues "[VO_DXR3] Failed getting overlay X-offset values!\nExiting.\n"
-#define MSGTR_LIBVO_DXR3_FailedGettingOverlayXScaleCorrection "[VO_DXR3] Failed getting overlay X scale correction!\nExiting.\n"
-#define MSGTR_LIBVO_DXR3_YOffset "[VO_DXR3] Yoffset: %d.\n"
-#define MSGTR_LIBVO_DXR3_XOffset "[VO_DXR3] Xoffset: %d.\n"
-#define MSGTR_LIBVO_DXR3_XCorrection "[VO_DXR3] Xcorrection: %d.\n"
-#define MSGTR_LIBVO_DXR3_FailedSetSignalMix "[VO_DXR3] Failed to set signal mix!\n"
-
-// vo_jpeg.c
-#define MSGTR_VO_JPEG_ProgressiveJPEG "Progressive JPEG enabled."
-#define MSGTR_VO_JPEG_NoProgressiveJPEG "Progressive JPEG disabled."
-#define MSGTR_VO_JPEG_BaselineJPEG "Baseline JPEG enabled."
-#define MSGTR_VO_JPEG_NoBaselineJPEG "Baseline JPEG disabled."
-
-// vo_mga.c
-#define MSGTR_LIBVO_MGA_AspectResized "[VO_MGA] aspect(): resized to %dx%d.\n"
-#define MSGTR_LIBVO_MGA_Uninit "[VO] uninit!\n"
-
-// mga_template.c
-#define MSGTR_LIBVO_MGA_ErrorInConfigIoctl "[MGA] error in mga_vid_config ioctl (wrong mga_vid.o version?)"
-#define MSGTR_LIBVO_MGA_CouldNotGetLumaValuesFromTheKernelModule "[MGA] Could not get luma values from the kernel module!\n"
-#define MSGTR_LIBVO_MGA_CouldNotSetLumaValuesFromTheKernelModule "[MGA] Could not set luma values from the kernel module!\n"
-#define MSGTR_LIBVO_MGA_ScreenWidthHeightUnknown "[MGA] Screen width/height unknown!\n"
-#define MSGTR_LIBVO_MGA_InvalidOutputFormat "[MGA] invalid output format %0X\n"
-#define MSGTR_LIBVO_MGA_IncompatibleDriverVersion "[MGA] Your mga_vid driver version is incompatible with this MPlayer version!\n"
-#define MSGTR_LIBVO_MGA_CouldntOpen "[MGA] Couldn't open: %s\n"
-#define MSGTR_LIBVO_MGA_ResolutionTooHigh "[MGA] Source resolution exceeds 1023x1023 in at least one dimension.\n[MGA] Rescale in software or use -lavdopts lowres=1.\n"
-#define MSGTR_LIBVO_MGA_mgavidVersionMismatch "[MGA] mismatch between kernel (%u) and MPlayer (%u) mga_vid driver versions\n"
-
-// vo_null.c
-#define MSGTR_LIBVO_NULL_UnknownSubdevice "[VO_NULL] Unknown subdevice: %s.\n"
-
-// vo_png.c
-#define MSGTR_LIBVO_PNG_Warning1 "[VO_PNG] Warning: compression level set to 0, compression disabled!\n"
-#define MSGTR_LIBVO_PNG_Warning2 "[VO_PNG] Info: Use -vo png:z=<n> to set compression level from 0 to 9.\n"
-#define MSGTR_LIBVO_PNG_Warning3 "[VO_PNG] Info: (0 = no compression, 1 = fastest, lowest - 9 best, slowest compression)\n"
-#define MSGTR_LIBVO_PNG_ErrorOpeningForWriting "\n[VO_PNG] Error opening '%s' for writing!\n"
-#define MSGTR_LIBVO_PNG_ErrorInCreatePng "[VO_PNG] Error in create_png.\n"
-
-// vo_pnm.c
-#define MSGTR_VO_PNM_ASCIIMode "ASCII mode enabled."
-#define MSGTR_VO_PNM_RawMode "Raw mode enabled."
-#define MSGTR_VO_PNM_PPMType "Will write PPM files."
-#define MSGTR_VO_PNM_PGMType "Will write PGM files."
-#define MSGTR_VO_PNM_PGMYUVType "Will write PGMYUV files."
-
-// vo_sdl.c
-#define MSGTR_LIBVO_SDL_CouldntGetAnyAcceptableSDLModeForOutput "[VO_SDL] Couldn't get any acceptable SDL Mode for output.\n"
-#define MSGTR_LIBVO_SDL_SetVideoModeFailed "[VO_SDL] set_video_mode: SDL_SetVideoMode failed: %s.\n"
-#define MSGTR_LIBVO_SDL_MappingI420ToIYUV "[VO_SDL] Mapping I420 to IYUV.\n"
-#define MSGTR_LIBVO_SDL_UnsupportedImageFormat "[VO_SDL] Unsupported image format (0x%X).\n"
-#define MSGTR_LIBVO_SDL_InfoPleaseUseVmOrZoom "[VO_SDL] Info - please use -vm or -zoom to switch to the best resolution.\n"
-#define MSGTR_LIBVO_SDL_FailedToSetVideoMode "[VO_SDL] Failed to set video mode: %s.\n"
-#define MSGTR_LIBVO_SDL_CouldntCreateAYUVOverlay "[VO_SDL] Couldn't create a YUV overlay: %s.\n"
-#define MSGTR_LIBVO_SDL_CouldntCreateARGBSurface "[VO_SDL] Couldn't create an RGB surface: %s.\n"
-#define MSGTR_LIBVO_SDL_UsingDepthColorspaceConversion "[VO_SDL] Using depth/colorspace conversion, this will slow things down (%ibpp -> %ibpp).\n"
-#define MSGTR_LIBVO_SDL_UnsupportedImageFormatInDrawslice "[VO_SDL] Unsupported image format in draw_slice, contact MPlayer developers!\n"
-#define MSGTR_LIBVO_SDL_BlitFailed "[VO_SDL] Blit failed: %s.\n"
-#define MSGTR_LIBVO_SDL_InitializationFailed "[VO_SDL] SDL initialization failed: %s.\n"
-#define MSGTR_LIBVO_SDL_UsingDriver "[VO_SDL] Using driver: %s.\n"
-
-// vo_svga.c
-#define MSGTR_LIBVO_SVGA_ForcedVidmodeNotAvailable "[VO_SVGA] Forced vid_mode %d (%s) not available.\n"
-#define MSGTR_LIBVO_SVGA_ForcedVidmodeTooSmall "[VO_SVGA] Forced vid_mode %d (%s) too small.\n"
-#define MSGTR_LIBVO_SVGA_Vidmode "[VO_SVGA] Vid_mode: %d, %dx%d %dbpp.\n"
-#define MSGTR_LIBVO_SVGA_VgasetmodeFailed "[VO_SVGA] Vga_setmode(%d) failed.\n"
-#define MSGTR_LIBVO_SVGA_VideoModeIsLinearAndMemcpyCouldBeUsed "[VO_SVGA] Video mode is linear and memcpy could be used for image transfer.\n"
-#define MSGTR_LIBVO_SVGA_VideoModeHasHardwareAcceleration "[VO_SVGA] Video mode has hardware acceleration and put_image could be used.\n"
-#define MSGTR_LIBVO_SVGA_IfItWorksForYouIWouldLikeToKnow "[VO_SVGA] If it works for you I would like to know.\n[VO_SVGA] (send log with `mplayer test.avi -v -v -v -v &> svga.log`). Thx!\n"
-#define MSGTR_LIBVO_SVGA_VideoModeHas "[VO_SVGA] Video mode has %d page(s).\n"
-#define MSGTR_LIBVO_SVGA_CenteringImageStartAt "[VO_SVGA] Centering image. Starting at (%d,%d)\n"
-#define MSGTR_LIBVO_SVGA_UsingVidix "[VO_SVGA] Using VIDIX. w=%i h=%i mw=%i mh=%i\n"
-
-// vo_tdfx_vid.c
-#define MSGTR_LIBVO_TDFXVID_Move "[VO_TDXVID] Move %d(%d) x %d => %d.\n"
-#define MSGTR_LIBVO_TDFXVID_AGPMoveFailedToClearTheScreen "[VO_TDFXVID] AGP move failed to clear the screen.\n"
-#define MSGTR_LIBVO_TDFXVID_BlitFailed "[VO_TDFXVID] Blit failed.\n"
-#define MSGTR_LIBVO_TDFXVID_NonNativeOverlayFormatNeedConversion "[VO_TDFXVID] Non-native overlay format needs conversion.\n"
-#define MSGTR_LIBVO_TDFXVID_UnsupportedInputFormat "[VO_TDFXVID] Unsupported input format 0x%x.\n"
-#define MSGTR_LIBVO_TDFXVID_OverlaySetupFailed "[VO_TDFXVID] Overlay setup failed.\n"
-#define MSGTR_LIBVO_TDFXVID_OverlayOnFailed "[VO_TDFXVID] Overlay on failed.\n"
-#define MSGTR_LIBVO_TDFXVID_OverlayReady "[VO_TDFXVID] Overlay ready: %d(%d) x %d @ %d => %d(%d) x %d @ %d.\n"
-#define MSGTR_LIBVO_TDFXVID_TextureBlitReady "[VO_TDFXVID] Texture blit ready: %d(%d) x %d @ %d => %d(%d) x %d @ %d.\n"
-#define MSGTR_LIBVO_TDFXVID_OverlayOffFailed "[VO_TDFXVID] Overlay off failed\n"
-#define MSGTR_LIBVO_TDFXVID_CantOpen "[VO_TDFXVID] Can't open %s: %s.\n"
-#define MSGTR_LIBVO_TDFXVID_CantGetCurrentCfg "[VO_TDFXVID] Can't get current configuration: %s.\n"
-#define MSGTR_LIBVO_TDFXVID_MemmapFailed "[VO_TDFXVID] Memmap failed !!!!!\n"
-#define MSGTR_LIBVO_TDFXVID_GetImageTodo "Get image todo.\n"
-#define MSGTR_LIBVO_TDFXVID_AgpMoveFailed "[VO_TDFXVID] AGP move failed.\n"
-#define MSGTR_LIBVO_TDFXVID_SetYuvFailed "[VO_TDFXVID] Set YUV failed.\n"
-#define MSGTR_LIBVO_TDFXVID_AgpMoveFailedOnYPlane "[VO_TDFXVID] AGP move failed on Y plane.\n"
-#define MSGTR_LIBVO_TDFXVID_AgpMoveFailedOnUPlane "[VO_TDFXVID] AGP move failed on U plane.\n"
-#define MSGTR_LIBVO_TDFXVID_AgpMoveFailedOnVPlane "[VO_TDFXVID] AGP move failed on V plane.\n"
-#define MSGTR_LIBVO_TDFXVID_UnknownFormat "[VO_TDFXVID] unknown format: 0x%x.\n"
-
-// vo_tdfxfb.c
-#define MSGTR_LIBVO_TDFXFB_CantOpen "[VO_TDFXFB] Can't open %s: %s.\n"
-#define MSGTR_LIBVO_TDFXFB_ProblemWithFbitgetFscreenInfo "[VO_TDFXFB] Problem with FBITGET_FSCREENINFO ioctl: %s.\n"
-#define MSGTR_LIBVO_TDFXFB_ProblemWithFbitgetVscreenInfo "[VO_TDFXFB] Problem with FBITGET_VSCREENINFO ioctl: %s.\n"
-#define MSGTR_LIBVO_TDFXFB_ThisDriverOnlySupports "[VO_TDFXFB] This driver only supports the 3Dfx Banshee, Voodoo3 and Voodoo 5.\n"
-#define MSGTR_LIBVO_TDFXFB_OutputIsNotSupported "[VO_TDFXFB] %d bpp output is not supported.\n"
-#define MSGTR_LIBVO_TDFXFB_CouldntMapMemoryAreas "[VO_TDFXFB] Couldn't map memory areas: %s.\n"
-#define MSGTR_LIBVO_TDFXFB_BppOutputIsNotSupported "[VO_TDFXFB] %d bpp output is not supported (This should never have happened).\n"
-#define MSGTR_LIBVO_TDFXFB_SomethingIsWrongWithControl "[VO_TDFXFB] Eik! Something's wrong with control().\n"
-#define MSGTR_LIBVO_TDFXFB_NotEnoughVideoMemoryToPlay "[VO_TDFXFB] Not enough video memory to play this movie. Try at a lower resolution.\n"
-#define MSGTR_LIBVO_TDFXFB_ScreenIs "[VO_TDFXFB] Screen is %dx%d at %d bpp, in is %dx%d at %d bpp, norm is %dx%d.\n"
-
-// vo_tga.c
-#define MSGTR_LIBVO_TGA_UnknownSubdevice "[VO_TGA] Unknown subdevice: %s.\n"
-
-// vo_vesa.c
-#define MSGTR_LIBVO_VESA_FatalErrorOccurred "[VO_VESA] Fatal error occurred! Can't continue.\n"
-#define MSGTR_LIBVO_VESA_UnknownSubdevice "[VO_VESA] unknown subdevice: '%s'.\n"
-#define MSGTR_LIBVO_VESA_YouHaveTooLittleVideoMemory "[VO_VESA] You have too little video memory for this mode:\n[VO_VESA] Required: %08lX present: %08lX.\n"
-#define MSGTR_LIBVO_VESA_YouHaveToSpecifyTheCapabilitiesOfTheMonitor "[VO_VESA] You have to specify the capabilities of the monitor. Not changing refresh rate.\n"
-#define MSGTR_LIBVO_VESA_UnableToFitTheMode "[VO_VESA] The mode does not fit the monitor limits. Not changing refresh rate.\n"
-#define MSGTR_LIBVO_VESA_DetectedInternalFatalError "[VO_VESA] Detected internal fatal error: init is called before preinit.\n"
-#define MSGTR_LIBVO_VESA_SwitchFlipIsNotSupported "[VO_VESA] The -flip option is not supported.\n"
-#define MSGTR_LIBVO_VESA_PossibleReasonNoVbe2BiosFound "[VO_VESA] Possible reason: No VBE2 BIOS found.\n"
-#define MSGTR_LIBVO_VESA_FoundVesaVbeBiosVersion "[VO_VESA] Found VESA VBE BIOS Version %x.%x Revision: %x.\n"
-#define MSGTR_LIBVO_VESA_VideoMemory "[VO_VESA] Video memory: %u Kb.\n"
-#define MSGTR_LIBVO_VESA_Capabilites "[VO_VESA] VESA Capabilities: %s %s %s %s %s.\n"
-#define MSGTR_LIBVO_VESA_BelowWillBePrintedOemInfo "[VO_VESA] !!! OEM info will be printed below !!!\n"
-#define MSGTR_LIBVO_VESA_YouShouldSee5OemRelatedLines "[VO_VESA] You should see 5 OEM related lines below; If not, you've broken vm86.\n"
-#define MSGTR_LIBVO_VESA_OemInfo "[VO_VESA] OEM info: %s.\n"
-#define MSGTR_LIBVO_VESA_OemRevision "[VO_VESA] OEM Revision: %x.\n"
-#define MSGTR_LIBVO_VESA_OemVendor "[VO_VESA] OEM vendor: %s.\n"
-#define MSGTR_LIBVO_VESA_OemProductName "[VO_VESA] OEM Product Name: %s.\n"
-#define MSGTR_LIBVO_VESA_OemProductRev "[VO_VESA] OEM Product Rev: %s.\n"
-#define MSGTR_LIBVO_VESA_Hint "[VO_VESA] Hint: For working TV-Out you should have plugged in the TV connector\n"\
-"[VO_VESA] before booting since VESA BIOS initializes itself only during POST.\n"
-#define MSGTR_LIBVO_VESA_UsingVesaMode "[VO_VESA] Using VESA mode (%u) = %x [%ux%u@%u]\n"
-#define MSGTR_LIBVO_VESA_CantInitializeSwscaler "[VO_VESA] Can't initialize software scaler.\n"
-#define MSGTR_LIBVO_VESA_CantUseDga "[VO_VESA] Can't use DGA. Force bank switching mode. :(\n"
-#define MSGTR_LIBVO_VESA_UsingDga "[VO_VESA] Using DGA (physical resources: %08lXh, %08lXh)"
-#define MSGTR_LIBVO_VESA_CantUseDoubleBuffering "[VO_VESA] Can't use double buffering: not enough video memory.\n"
-#define MSGTR_LIBVO_VESA_CantFindNeitherDga "[VO_VESA] Can find neither DGA nor relocatable window frame.\n"
-#define MSGTR_LIBVO_VESA_YouveForcedDga "[VO_VESA] You've forced DGA. Exiting\n"
-#define MSGTR_LIBVO_VESA_CantFindValidWindowAddress "[VO_VESA] Can't find valid window address.\n"
-#define MSGTR_LIBVO_VESA_UsingBankSwitchingMode "[VO_VESA] Using bank switching mode (physical resources: %08lXh, %08lXh).\n"
-#define MSGTR_LIBVO_VESA_CantAllocateTemporaryBuffer "[VO_VESA] Can't allocate temporary buffer.\n"
-#define MSGTR_LIBVO_VESA_SorryUnsupportedMode "[VO_VESA] Sorry, unsupported mode -- try -x 640 -zoom.\n"
-#define MSGTR_LIBVO_VESA_OhYouReallyHavePictureOnTv "[VO_VESA] Oh you really have a picture on the TV!\n"
-#define MSGTR_LIBVO_VESA_CantInitialozeLinuxVideoOverlay "[VO_VESA] Can't initialize Linux Video Overlay.\n"
-#define MSGTR_LIBVO_VESA_UsingVideoOverlay "[VO_VESA] Using video overlay: %s.\n"
-#define MSGTR_LIBVO_VESA_CantInitializeVidixDriver "[VO_VESA] Can't initialize VIDIX driver.\n"
-#define MSGTR_LIBVO_VESA_UsingVidix "[VO_VESA] Using VIDIX.\n"
-#define MSGTR_LIBVO_VESA_CantFindModeFor "[VO_VESA] Can't find mode for: %ux%u@%u.\n"
-#define MSGTR_LIBVO_VESA_InitializationComplete "[VO_VESA] VESA initialization complete.\n"
-
-// vesa_lvo.c
-#define MSGTR_LIBVO_VESA_ThisBranchIsNoLongerSupported "[VESA_LVO] This branch is no longer supported.\n[VESA_LVO] Please use -vo vesa:vidix instead.\n"
-#define MSGTR_LIBVO_VESA_CouldntOpen "[VESA_LVO] Couldn't open: '%s'\n"
-#define MSGTR_LIBVO_VESA_InvalidOutputFormat "[VESA_LVI] Invalid output format: %s(%0X)\n"
-#define MSGTR_LIBVO_VESA_IncompatibleDriverVersion "[VESA_LVO] Your fb_vid driver version is incompatible with this MPlayer version!\n"
-
-// vo_x11.c
-#define MSGTR_LIBVO_X11_DrawFrameCalled "[VO_X11] draw_frame() called!!!!!!\n"
-
-// vo_xv.c
-#define MSGTR_LIBVO_XV_DrawFrameCalled "[VO_XV] draw_frame() called!!!!!!\n"
-#define MSGTR_LIBVO_XV_SharedMemoryNotSupported "[VO_XV] Shared memory not supported\nReverting to normal Xv.\n"
-#define MSGTR_LIBVO_XV_XvNotSupportedByX11 "[VO_XV] Sorry, Xv not supported by this X11 version/driver\n[VO_XV] ******** Try with -vo x11 or -vo sdl *********\n"
-#define MSGTR_LIBVO_XV_XvQueryAdaptorsFailed "[VO_XV] XvQueryAdaptors failed.\n"
-#define MSGTR_LIBVO_XV_InvalidPortParameter "[VO_XV] Invalid port parameter, overriding with port 0.\n"
-#define MSGTR_LIBVO_XV_CouldNotGrabPort "[VO_XV] Could not grab port %i.\n"
-#define MSGTR_LIBVO_XV_CouldNotFindFreePort "[VO_XV] Could not find free Xvideo port - maybe another process is already\n"\
-"[VO_XV] using it. Close all video applications, and try again. If that does\n"\
-"[VO_XV] not help, see 'mplayer -vo help' for other (non-xv) video out drivers.\n"
-#define MSGTR_LIBVO_XV_NoXvideoSupport "[VO_XV] It seems there is no Xvideo support for your video card available.\n"\
-"[VO_XV] Run 'xvinfo' to verify its Xv support and read\n"\
-"[VO_XV] DOCS/HTML/en/video.html#xv!\n"\
-"[VO_XV] See 'mplayer -vo help' for other (non-xv) video out drivers.\n"\
-"[VO_XV] Try -vo x11.\n"
-#define MSGTR_VO_XV_ImagedimTooHigh "Source image dimensions are too high: %ux%u (maximum is %ux%u)\n"
-
-// vo_yuv4mpeg.c
-#define MSGTR_VO_YUV4MPEG_InterlacedHeightDivisibleBy4 "Interlaced mode requires image height to be divisible by 4."
-#define MSGTR_VO_YUV4MPEG_InterlacedLineBufAllocFail "Unable to allocate line buffer for interlaced mode."
-#define MSGTR_VO_YUV4MPEG_WidthDivisibleBy2 "Image width must be divisible by 2."
-#define MSGTR_VO_YUV4MPEG_OutFileOpenError "Can't get memory or file handle to write \"%s\"!"
-#define MSGTR_VO_YUV4MPEG_OutFileWriteError "Error writing image to output!"
-#define MSGTR_VO_YUV4MPEG_UnknownSubDev "Unknown subdevice: %s"
-#define MSGTR_VO_YUV4MPEG_InterlacedTFFMode "Using interlaced output mode, top-field first."
-#define MSGTR_VO_YUV4MPEG_InterlacedBFFMode "Using interlaced output mode, bottom-field first."
-#define MSGTR_VO_YUV4MPEG_ProgressiveMode "Using (default) progressive frame mode."
-
-// vosub_vidix.c
-#define MSGTR_LIBVO_SUB_VIDIX_CantStartPlayback "[VO_SUB_VIDIX] Can't start playback: %s\n"
-#define MSGTR_LIBVO_SUB_VIDIX_CantStopPlayback "[VO_SUB_VIDIX] Can't stop playback: %s\n"
-#define MSGTR_LIBVO_SUB_VIDIX_InterleavedUvForYuv410pNotSupported "[VO_SUB_VIDIX] Interleaved UV for YUV410P not supported.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_DummyVidixdrawsliceWasCalled "[VO_SUB_VIDIX] Dummy vidix_draw_slice() was called.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_DummyVidixdrawframeWasCalled "[VO_SUB_VIDIX] Dummy vidix_draw_frame() was called.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_UnsupportedFourccForThisVidixDriver "[VO_SUB_VIDIX] Unsupported FourCC for this VIDIX driver: %x (%s).\n"
-#define MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedResolution "[VO_SUB_VIDIX] Video server has unsupported resolution (%dx%d), supported: %dx%d-%dx%d.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedColorDepth "[VO_SUB_VIDIX] Video server has unsupported color depth by vidix (%d).\n"
-#define MSGTR_LIBVO_SUB_VIDIX_DriverCantUpscaleImage "[VO_SUB_VIDIX] VIDIX driver can't upscale image (%d%d -> %d%d).\n"
-#define MSGTR_LIBVO_SUB_VIDIX_DriverCantDownscaleImage "[VO_SUB_VIDIX] VIDIX driver can't downscale image (%d%d -> %d%d).\n"
-#define MSGTR_LIBVO_SUB_VIDIX_CantConfigurePlayback "[VO_SUB_VIDIX] Can't configure playback: %s.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_YouHaveWrongVersionOfVidixLibrary "[VO_SUB_VIDIX] You have the wrong version of the VIDIX library.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_CouldntFindWorkingVidixDriver "[VO_SUB_VIDIX] Couldn't find working VIDIX driver.\n"
-#define MSGTR_LIBVO_SUB_VIDIX_CouldntGetCapability "[VO_SUB_VIDIX] Couldn't get capability: %s.\n"
-
-// x11_common.c
-#define MSGTR_EwmhFullscreenStateFailed "\nX11: Couldn't send EWMH fullscreen event!\n"
-#define MSGTR_CouldNotFindXScreenSaver "xscreensaver_disable: Could not find XScreenSaver window.\n"
-#define MSGTR_SelectedVideoMode "XF86VM: Selected video mode %dx%d for image size %dx%d.\n"
-
-#define MSGTR_InsertingAfVolume "[Mixer] No hardware mixing, inserting volume filter.\n"
-#define MSGTR_NoVolume "[Mixer] No volume control available.\n"
-#define MSGTR_NoBalance "[Mixer] No balance control available.\n"
-
-// old vo drivers that have been replaced
-#define MSGTR_VO_PGM_HasBeenReplaced "The pgm video output driver has been replaced by -vo pnm:pgmyuv.\n"
-#define MSGTR_VO_MD5_HasBeenReplaced "The md5 video output driver has been replaced by -vo md5sum.\n"
-#define MSGTR_VO_GL2_HasBeenRenamed "The gl2 video output driver has been renamed to -vo gl_tiled, but you really should be using -vo gl instead.\n"
-
-
-// ======================= audio output drivers ========================
-
-// audio_out.c
-#define MSGTR_AO_ALSA9_1x_Removed "audio_out: alsa9 and alsa1x modules were removed, use -ao alsa instead.\n"
-#define MSGTR_AO_NoSuchDriver "No such audio driver '%.*s'\n"
-#define MSGTR_AO_FailedInit "Failed to initialize audio driver '%s'\n"
-
-// ao_oss.c
-#define MSGTR_AO_OSS_CantOpenMixer "[AO OSS] audio_setup: Can't open mixer device %s: %s\n"
-#define MSGTR_AO_OSS_ChanNotFound "[AO OSS] audio_setup: Audio card mixer does not have channel '%s', using default.\n"
-#define MSGTR_AO_OSS_CantOpenDev "[AO OSS] audio_setup: Can't open audio device %s: %s\n"
-#define MSGTR_AO_OSS_CantMakeFd "[AO OSS] audio_setup: Can't make file descriptor blocking: %s\n"
-#define MSGTR_AO_OSS_CantSet "[AO OSS] Can't set audio device %s to %s output, trying %s...\n"
-#define MSGTR_AO_OSS_CantSetChans "[AO OSS] audio_setup: Failed to set audio device to %d channels.\n"
-#define MSGTR_AO_OSS_CantUseGetospace "[AO OSS] audio_setup: driver doesn't support SNDCTL_DSP_GETOSPACE :-(\n"
-#define MSGTR_AO_OSS_CantUseSelect "[AO OSS]\n *** Your audio driver DOES NOT support select() ***\n Recompile MPlayer with #undef HAVE_AUDIO_SELECT in config.h !\n\n"
-#define MSGTR_AO_OSS_CantReopen "[AO OSS]\nFatal error: *** CANNOT RE-OPEN / RESET AUDIO DEVICE *** %s\n"
-#define MSGTR_AO_OSS_UnknownUnsupportedFormat "[AO OSS] Unknown/Unsupported OSS format: %x.\n"
-
-// ao_arts.c
-#define MSGTR_AO_ARTS_CantInit "[AO ARTS] %s\n"
-#define MSGTR_AO_ARTS_ServerConnect "[AO ARTS] Connected to sound server.\n"
-#define MSGTR_AO_ARTS_CantOpenStream "[AO ARTS] Unable to open a stream.\n"
-#define MSGTR_AO_ARTS_StreamOpen "[AO ARTS] Stream opened.\n"
-#define MSGTR_AO_ARTS_BufferSize "[AO ARTS] buffer size: %d\n"
-
-// ao_dxr2.c
-#define MSGTR_AO_DXR2_SetVolFailed "[AO DXR2] Setting volume to %d failed.\n"
-#define MSGTR_AO_DXR2_UnsupSamplerate "[AO DXR2] %d Hz not supported, try to resample.\n"
-
-// ao_esd.c
-#define MSGTR_AO_ESD_CantOpenSound "[AO ESD] esd_open_sound failed: %s\n"
-#define MSGTR_AO_ESD_LatencyInfo "[AO ESD] latency: [server: %0.2fs, net: %0.2fs] (adjust %0.2fs)\n"
-#define MSGTR_AO_ESD_CantOpenPBStream "[AO ESD] failed to open ESD playback stream: %s\n"
-
-// ao_mpegpes.c
-#define MSGTR_AO_MPEGPES_CantSetMixer "[AO MPEGPES] DVB audio set mixer failed: %s.\n"
-#define MSGTR_AO_MPEGPES_UnsupSamplerate "[AO MPEGPES] %d Hz not supported, try to resample.\n"
-
-// ao_pcm.c
-#define MSGTR_AO_PCM_FileInfo "[AO PCM] File: %s (%s)\nPCM: Samplerate: %iHz Channels: %s Format %s\n"
-#define MSGTR_AO_PCM_HintInfo "[AO PCM] Info: Faster dumping is achieved with -benchmark -vc null -vo null -ao pcm:fast\n[AO PCM] Info: To write WAVE files use -ao pcm:waveheader (default).\n"
-#define MSGTR_AO_PCM_CantOpenOutputFile "[AO PCM] Failed to open %s for writing!\n"
-
-// ao_sdl.c
-#define MSGTR_AO_SDL_INFO "[AO SDL] Samplerate: %iHz Channels: %s Format %s\n"
-#define MSGTR_AO_SDL_DriverInfo "[AO SDL] using %s audio driver.\n"
-#define MSGTR_AO_SDL_UnsupportedAudioFmt "[AO SDL] Unsupported audio format: 0x%x.\n"
-#define MSGTR_AO_SDL_CantInit "[AO SDL] SDL Audio initialization failed: %s\n"
-#define MSGTR_AO_SDL_CantOpenAudio "[AO SDL] Unable to open audio: %s\n"
-
-// ao_sgi.c
-#define MSGTR_AO_SGI_INFO "[AO SGI] control.\n"
-#define MSGTR_AO_SGI_InitInfo "[AO SGI] init: Samplerate: %iHz Channels: %s Format %s\n"
-#define MSGTR_AO_SGI_InvalidDevice "[AO SGI] play: invalid device.\n"
-#define MSGTR_AO_SGI_CantSetParms_Samplerate "[AO SGI] init: setparams failed: %s\nCould not set desired samplerate.\n"
-#define MSGTR_AO_SGI_CantSetAlRate "[AO SGI] init: AL_RATE was not accepted on the given resource.\n"
-#define MSGTR_AO_SGI_CantGetParms "[AO SGI] init: getparams failed: %s\n"
-#define MSGTR_AO_SGI_SampleRateInfo "[AO SGI] init: samplerate is now %f (desired rate is %f)\n"
-#define MSGTR_AO_SGI_InitConfigError "[AO SGI] init: %s\n"
-#define MSGTR_AO_SGI_InitOpenAudioFailed "[AO SGI] init: Unable to open audio channel: %s\n"
-#define MSGTR_AO_SGI_Uninit "[AO SGI] uninit: ...\n"
-#define MSGTR_AO_SGI_Reset "[AO SGI] reset: ...\n"
-#define MSGTR_AO_SGI_PauseInfo "[AO SGI] audio_pause: ...\n"
-#define MSGTR_AO_SGI_ResumeInfo "[AO SGI] audio_resume: ...\n"
-
-// ao_sun.c
-#define MSGTR_AO_SUN_RtscSetinfoFailed "[AO SUN] rtsc: SETINFO failed.\n"
-#define MSGTR_AO_SUN_RtscWriteFailed "[AO SUN] rtsc: write failed.\n"
-#define MSGTR_AO_SUN_CantOpenAudioDev "[AO SUN] Can't open audio device %s, %s -> nosound.\n"
-#define MSGTR_AO_SUN_UnsupSampleRate "[AO SUN] audio_setup: your card doesn't support %d channel, %s, %d Hz samplerate.\n"
-#define MSGTR_AO_SUN_CantUseSelect "[AO SUN]\n *** Your audio driver DOES NOT support select() ***\nRecompile MPlayer with #undef HAVE_AUDIO_SELECT in config.h !\n\n"
-#define MSGTR_AO_SUN_CantReopenReset "[AO SUN]\nFatal error: *** CANNOT REOPEN / RESET AUDIO DEVICE (%s) ***\n"
-
-// ao_alsa.c
-#define MSGTR_AO_ALSA_InvalidMixerIndexDefaultingToZero "[AO_ALSA] Invalid mixer index. Defaulting to 0.\n"
-#define MSGTR_AO_ALSA_MixerOpenError "[AO_ALSA] Mixer open error: %s\n"
-#define MSGTR_AO_ALSA_MixerAttachError "[AO_ALSA] Mixer attach %s error: %s\n"
-#define MSGTR_AO_ALSA_MixerRegisterError "[AO_ALSA] Mixer register error: %s\n"
-#define MSGTR_AO_ALSA_MixerLoadError "[AO_ALSA] Mixer load error: %s\n"
-#define MSGTR_AO_ALSA_UnableToFindSimpleControl "[AO_ALSA] Unable to find simple control '%s',%i.\n"
-#define MSGTR_AO_ALSA_ErrorSettingLeftChannel "[AO_ALSA] Error setting left channel, %s\n"
-#define MSGTR_AO_ALSA_ErrorSettingRightChannel "[AO_ALSA] Error setting right channel, %s\n"
-#define MSGTR_AO_ALSA_CommandlineHelp "\n[AO_ALSA] -ao alsa commandline help:\n"\
-"[AO_ALSA] Example: mplayer -ao alsa:device=hw=0.3\n"\
-"[AO_ALSA] Sets first card fourth hardware device.\n\n"\
-"[AO_ALSA] Options:\n"\
-"[AO_ALSA] noblock\n"\
-"[AO_ALSA] Opens device in non-blocking mode.\n"\
-"[AO_ALSA] device=<device-name>\n"\
-"[AO_ALSA] Sets device (change , to . and : to =)\n"
-#define MSGTR_AO_ALSA_ChannelsNotSupported "[AO_ALSA] %d channels are not supported.\n"
-#define MSGTR_AO_ALSA_OpenInNonblockModeFailed "[AO_ALSA] Open in nonblock-mode failed, trying to open in block-mode.\n"
-#define MSGTR_AO_ALSA_PlaybackOpenError "[AO_ALSA] Playback open error: %s\n"
-#define MSGTR_AO_ALSA_ErrorSetBlockMode "[AL_ALSA] Error setting block-mode %s.\n"
-#define MSGTR_AO_ALSA_UnableToGetInitialParameters "[AO_ALSA] Unable to get initial parameters: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetAccessType "[AO_ALSA] Unable to set access type: %s\n"
-#define MSGTR_AO_ALSA_FormatNotSupportedByHardware "[AO_ALSA] Format %s is not supported by hardware, trying default.\n"
-#define MSGTR_AO_ALSA_UnableToSetFormat "[AO_ALSA] Unable to set format: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetChannels "[AO_ALSA] Unable to set channels: %s\n"
-#define MSGTR_AO_ALSA_UnableToDisableResampling "[AO_ALSA] Unable to disable resampling: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetSamplerate2 "[AO_ALSA] Unable to set samplerate-2: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetBufferTimeNear "[AO_ALSA] Unable to set buffer time near: %s\n"
-#define MSGTR_AO_ALSA_UnableToGetPeriodSize "[AO ALSA] Unable to get period size: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetPeriods "[AO_ALSA] Unable to set periods: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetHwParameters "[AO_ALSA] Unable to set hw-parameters: %s\n"
-#define MSGTR_AO_ALSA_UnableToGetBufferSize "[AO_ALSA] Unable to get buffersize: %s\n"
-#define MSGTR_AO_ALSA_UnableToGetSwParameters "[AO_ALSA] Unable to get sw-parameters: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetSwParameters "[AO_ALSA] Unable to set sw-parameters: %s\n"
-#define MSGTR_AO_ALSA_UnableToGetBoundary "[AO_ALSA] Unable to get boundary: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetStartThreshold "[AO_ALSA] Unable to set start threshold: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetStopThreshold "[AO_ALSA] Unable to set stop threshold: %s\n"
-#define MSGTR_AO_ALSA_UnableToSetSilenceSize "[AO_ALSA] Unable to set silence size: %s\n"
-#define MSGTR_AO_ALSA_PcmCloseError "[AO_ALSA] pcm close error: %s\n"
-#define MSGTR_AO_ALSA_NoHandlerDefined "[AO_ALSA] No handler defined!\n"
-#define MSGTR_AO_ALSA_PcmPrepareError "[AO_ALSA] pcm prepare error: %s\n"
-#define MSGTR_AO_ALSA_PcmPauseError "[AO_ALSA] pcm pause error: %s\n"
-#define MSGTR_AO_ALSA_PcmDropError "[AO_ALSA] pcm drop error: %s\n"
-#define MSGTR_AO_ALSA_PcmResumeError "[AO_ALSA] pcm resume error: %s\n"
-#define MSGTR_AO_ALSA_DeviceConfigurationError "[AO_ALSA] Device configuration error."
-#define MSGTR_AO_ALSA_PcmInSuspendModeTryingResume "[AO_ALSA] Pcm in suspend mode, trying to resume.\n"
-#define MSGTR_AO_ALSA_WriteError "[AO_ALSA] Write error: %s\n"
-#define MSGTR_AO_ALSA_TryingToResetSoundcard "[AO_ALSA] Trying to reset soundcard.\n"
-#define MSGTR_AO_ALSA_CannotGetPcmStatus "[AO_ALSA] Cannot get pcm status: %s\n"
-
-// ao_plugin.c
-#define MSGTR_AO_PLUGIN_InvalidPlugin "[AO PLUGIN] invalid plugin: %s\n"
-
-
-// ======================= audio filters ================================
-
-// af_scaletempo.c
-#define MSGTR_AF_ValueOutOfRange MSGTR_VO_ValueOutOfRange
-
-// af_ladspa.c
-#define MSGTR_AF_LADSPA_AvailableLabels "available labels in"
-#define MSGTR_AF_LADSPA_WarnNoInputs "WARNING! This LADSPA plugin has no audio inputs.\n The incoming audio signal will be lost."
-#define MSGTR_AF_LADSPA_ErrMultiChannel "Multi-channel (>2) plugins are not supported (yet).\n Use only mono and stereo plugins."
-#define MSGTR_AF_LADSPA_ErrNoOutputs "This LADSPA plugin has no audio outputs."
-#define MSGTR_AF_LADSPA_ErrInOutDiff "The number of audio inputs and audio outputs of the LADSPA plugin differ."
-#define MSGTR_AF_LADSPA_ErrFailedToLoad "failed to load"
-#define MSGTR_AF_LADSPA_ErrNoDescriptor "Couldn't find ladspa_descriptor() function in the specified library file."
-#define MSGTR_AF_LADSPA_ErrLabelNotFound "Couldn't find label in plugin library."
-#define MSGTR_AF_LADSPA_ErrNoSuboptions "No suboptions specified."
-#define MSGTR_AF_LADSPA_ErrNoLibFile "No library file specified."
-#define MSGTR_AF_LADSPA_ErrNoLabel "No filter label specified."
-#define MSGTR_AF_LADSPA_ErrNotEnoughControls "Not enough controls specified on the command line."
-#define MSGTR_AF_LADSPA_ErrControlBelow "%s: Input control #%d is below lower boundary of %0.4f.\n"
-#define MSGTR_AF_LADSPA_ErrControlAbove "%s: Input control #%d is above upper boundary of %0.4f.\n"
-
-// format.c
-#define MSGTR_AF_FORMAT_UnknownFormat "unknown format "
-
-
-// ========================== INPUT =========================================
-
-// joystick.c
-#define MSGTR_INPUT_JOYSTICK_CantOpen "Can't open joystick device %s: %s\n"
-#define MSGTR_INPUT_JOYSTICK_ErrReading "Error while reading joystick device: %s\n"
-#define MSGTR_INPUT_JOYSTICK_LoosingBytes "Joystick: We lose %d bytes of data\n"
-#define MSGTR_INPUT_JOYSTICK_WarnLostSync "Joystick: warning init event, we have lost sync with driver.\n"
-#define MSGTR_INPUT_JOYSTICK_WarnUnknownEvent "Joystick warning unknown event type %d\n"
-
-// appleir.c
-#define MSGTR_INPUT_APPLE_IR_CantOpen "Can't open Apple IR device: %s\n"
-
-// input.c
-#define MSGTR_INPUT_INPUT_ErrCantRegister2ManyCmdFds "Too many command file descriptors, cannot register file descriptor %d.\n"
-#define MSGTR_INPUT_INPUT_ErrCantRegister2ManyKeyFds "Too many key file descriptors, cannot register file descriptor %d.\n"
-#define MSGTR_INPUT_INPUT_ErrArgMustBeInt "Command %s: argument %d isn't an integer.\n"
-#define MSGTR_INPUT_INPUT_ErrArgMustBeFloat "Command %s: argument %d isn't a float.\n"
-#define MSGTR_INPUT_INPUT_ErrUnterminatedArg "Command %s: argument %d is unterminated.\n"
-#define MSGTR_INPUT_INPUT_ErrUnknownArg "Unknown argument %d\n"
-#define MSGTR_INPUT_INPUT_Err2FewArgs "Command %s requires at least %d arguments, we found only %d so far.\n"
-#define MSGTR_INPUT_INPUT_ErrReadingCmdFd "Error while reading command file descriptor %d: %s\n"
-#define MSGTR_INPUT_INPUT_ErrCmdBufferFullDroppingContent "Command buffer of file descriptor %d is full: dropping content.\n"
-#define MSGTR_INPUT_INPUT_ErrInvalidCommandForKey "Invalid command for bound key %s"
-#define MSGTR_INPUT_INPUT_ErrSelect "Select error: %s\n"
-#define MSGTR_INPUT_INPUT_ErrOnKeyInFd "Error on key input file descriptor %d\n"
-#define MSGTR_INPUT_INPUT_ErrDeadKeyOnFd "Dead key input on file descriptor %d\n"
-#define MSGTR_INPUT_INPUT_Err2ManyKeyDowns "Too many key down events at the same time\n"
-#define MSGTR_INPUT_INPUT_ErrOnCmdFd "Error on command file descriptor %d\n"
-#define MSGTR_INPUT_INPUT_ErrReadingInputConfig "Error while reading input config file %s: %s\n"
-#define MSGTR_INPUT_INPUT_ErrUnknownKey "Unknown key '%s'\n"
-#define MSGTR_INPUT_INPUT_ErrUnfinishedBinding "Unfinished binding %s\n"
-#define MSGTR_INPUT_INPUT_ErrBuffer2SmallForKeyName "Buffer is too small for this key name: %s\n"
-#define MSGTR_INPUT_INPUT_ErrNoCmdForKey "No command found for key %s"
-#define MSGTR_INPUT_INPUT_ErrBuffer2SmallForCmd "Buffer is too small for command %s\n"
-#define MSGTR_INPUT_INPUT_ErrWhyHere "What are we doing here?\n"
-#define MSGTR_INPUT_INPUT_ErrCantInitJoystick "Can't init input joystick\n"
-#define MSGTR_INPUT_INPUT_ErrCantOpenFile "Can't open %s: %s\n"
-#define MSGTR_INPUT_INPUT_ErrCantInitAppleRemote "Can't init Apple Remote.\n"
-
-// lirc.c
-#define MSGTR_LIRCopenfailed "Failed to open LIRC support. You will not be able to use your remote control.\n"
-#define MSGTR_LIRCcfgerr "Failed to read LIRC config file %s.\n"
-
-
-// ========================== LIBMPDEMUX ===================================
-
-// muxer.c, muxer_*.c
-#define MSGTR_TooManyStreams "Too many streams!"
-#define MSGTR_RawMuxerOnlyOneStream "Rawaudio muxer supports only one audio stream!\n"
-#define MSGTR_IgnoringVideoStream "Ignoring video stream!\n"
-#define MSGTR_UnknownStreamType "Warning, unknown stream type: %d\n"
-#define MSGTR_WarningLenIsntDivisible "Warning, len isn't divisible by samplesize!\n"
-#define MSGTR_MuxbufMallocErr "Muxer frame buffer cannot allocate memory!\n"
-#define MSGTR_MuxbufReallocErr "Muxer frame buffer cannot reallocate memory!\n"
-#define MSGTR_WritingHeader "Writing header...\n"
-#define MSGTR_WritingTrailer "Writing index...\n"
-
-// demuxer.c, demux_*.c
-#define MSGTR_AudioStreamRedefined "WARNING: Audio stream header %d redefined.\n"
-#define MSGTR_VideoStreamRedefined "WARNING: Video stream header %d redefined.\n"
-#define MSGTR_TooManyAudioInBuffer "\nToo many audio packets in the buffer: (%d in %d bytes).\n"
-#define MSGTR_TooManyVideoInBuffer "\nToo many video packets in the buffer: (%d in %d bytes).\n"
-#define MSGTR_MaybeNI "Maybe you are playing a non-interleaved stream/file or the codec failed?\n" \
- "For AVI files, try to force non-interleaved mode with the -ni option.\n"
-#define MSGTR_WorkAroundBlockAlignHeaderBug "AVI: Working around CBR-MP3 nBlockAlign header bug!\n"
-#define MSGTR_SwitchToNi "\nBadly interleaved AVI file detected - switching to -ni mode...\n"
-#define MSGTR_InvalidAudioStreamNosound "AVI: invalid audio stream ID: %d - ignoring (nosound)\n"
-#define MSGTR_InvalidAudioStreamUsingDefault "AVI: invalid video stream ID: %d - ignoring (using default)\n"
-#define MSGTR_ON2AviFormat "ON2 AVI format"
-#define MSGTR_Detected_XXX_FileFormat "%s file format detected.\n"
-#define MSGTR_DetectedAudiofile "Audio file detected.\n"
-#define MSGTR_InvalidMPEGES "Invalid MPEG-ES stream??? Contact the author, it may be a bug :(\n"
-#define MSGTR_FormatNotRecognized "============ Sorry, this file format is not recognized/supported =============\n"\
- "=== If this file is an AVI, ASF or MPEG stream, please contact the author! ===\n"
-#define MSGTR_SettingProcessPriority "Setting process priority: %s\n"
-#define MSGTR_FilefmtFourccSizeFpsFtime "[V] filefmt:%d fourcc:0x%X size:%dx%d fps:%5.3f ftime:=%6.4f\n"
-#define MSGTR_CannotInitializeMuxer "Cannot initialize muxer."
-#define MSGTR_MissingVideoStream "No video stream found.\n"
-#define MSGTR_MissingAudioStream "No audio stream found -> no sound.\n"
-#define MSGTR_MissingVideoStreamBug "Missing video stream!? Contact the author, it may be a bug :(\n"
-
-#define MSGTR_DoesntContainSelectedStream "demux: File doesn't contain the selected audio or video stream.\n"
-
-#define MSGTR_NI_Forced "Forced"
-#define MSGTR_NI_Detected "Detected"
-#define MSGTR_NI_Message "%s NON-INTERLEAVED AVI file format.\n"
-
-#define MSGTR_UsingNINI "Using NON-INTERLEAVED broken AVI file format.\n"
-#define MSGTR_CouldntDetFNo "Could not determine number of frames (for absolute seek).\n"
-#define MSGTR_CantSeekRawAVI "Cannot seek in raw AVI streams. (Index required, try with the -idx switch.)\n"
-#define MSGTR_CantSeekFile "Cannot seek in this file.\n"
-
-#define MSGTR_MOVcomprhdr "MOV: Compressed headers support requires ZLIB!\n"
-#define MSGTR_MOVvariableFourCC "MOV: WARNING: Variable FourCC detected!?\n"
-#define MSGTR_MOVtooManyTrk "MOV: WARNING: too many tracks"
-#define MSGTR_DetectedTV "TV detected! ;-)\n"
-#define MSGTR_ErrorOpeningOGGDemuxer "Unable to open the Ogg demuxer.\n"
-#define MSGTR_CannotOpenAudioStream "Cannot open audio stream: %s\n"
-#define MSGTR_CannotOpenSubtitlesStream "Cannot open subtitle stream: %s\n"
-#define MSGTR_OpeningAudioDemuxerFailed "Failed to open audio demuxer: %s\n"
-#define MSGTR_OpeningSubtitlesDemuxerFailed "Failed to open subtitle demuxer: %s\n"
-#define MSGTR_TVInputNotSeekable "TV input is not seekable! (Seeking will probably be for changing channels ;)\n"
-#define MSGTR_DemuxerInfoChanged "Demuxer info %s changed to %s\n"
-#define MSGTR_ClipInfo "Clip info:\n"
-
-#define MSGTR_LeaveTelecineMode "\ndemux_mpg: 30000/1001fps NTSC content detected, switching framerate.\n"
-#define MSGTR_EnterTelecineMode "\ndemux_mpg: 24000/1001fps progressive NTSC content detected, switching framerate.\n"
-
-#define MSGTR_CacheFill "\rCache fill: %5.2f%% (%"PRId64" bytes) "
-#define MSGTR_NoBindFound "No bind found for key '%s'.\n"
-#define MSGTR_FailedToOpen "Failed to open %s.\n"
-
-#define MSGTR_VideoID "[%s] Video stream found, -vid %d\n"
-#define MSGTR_AudioID "[%s] Audio stream found, -aid %d\n"
-#define MSGTR_SubtitleID "[%s] Subtitle stream found, -sid %d\n"
-
-// asfheader.c
-#define MSGTR_MPDEMUX_ASFHDR_HeaderSizeOver1MB "FATAL: header size bigger than 1 MB (%d)!\nPlease contact MPlayer authors, and upload/send this file.\n"
-#define MSGTR_MPDEMUX_ASFHDR_HeaderMallocFailed "Could not allocate %d bytes for header.\n"
-#define MSGTR_MPDEMUX_ASFHDR_EOFWhileReadingHeader "EOF while reading ASF header, broken/incomplete file?\n"
-#define MSGTR_MPDEMUX_ASFHDR_DVRWantsLibavformat "DVR will probably only work with libavformat, try -demuxer 35 if you have problems\n"
-#define MSGTR_MPDEMUX_ASFHDR_NoDataChunkAfterHeader "No data chunk following header!\n"
-#define MSGTR_MPDEMUX_ASFHDR_AudioVideoHeaderNotFound "ASF: no audio or video headers found - broken file?\n"
-#define MSGTR_MPDEMUX_ASFHDR_InvalidLengthInASFHeader "Invalid length in ASF header!\n"
-#define MSGTR_MPDEMUX_ASFHDR_DRMLicenseURL "DRM License URL: %s\n"
-#define MSGTR_MPDEMUX_ASFHDR_DRMProtected "This file has been encumbered with DRM encryption, it will not play in MPlayer!\n"
-
-// aviheader.c
-#define MSGTR_MPDEMUX_AVIHDR_EmptyList "** empty list?!\n"
-#define MSGTR_MPDEMUX_AVIHDR_WarnNotExtendedAVIHdr "** Warning: this is no extended AVI header..\n"
-#define MSGTR_MPDEMUX_AVIHDR_BuildingODMLidx "AVI: ODML: Building ODML index (%d superindexchunks).\n"
-#define MSGTR_MPDEMUX_AVIHDR_BrokenODMLfile "AVI: ODML: Broken (incomplete?) file detected. Will use traditional index.\n"
-#define MSGTR_MPDEMUX_AVIHDR_CantReadIdxFile "Can't read index file %s: %s\n"
-#define MSGTR_MPDEMUX_AVIHDR_NotValidMPidxFile "%s is not a valid MPlayer index file.\n"
-#define MSGTR_MPDEMUX_AVIHDR_FailedMallocForIdxFile "Could not allocate memory for index data from %s.\n"
-#define MSGTR_MPDEMUX_AVIHDR_PrematureEOF "premature end of index file %s\n"
-#define MSGTR_MPDEMUX_AVIHDR_IdxFileLoaded "Loaded index file: %s\n"
-#define MSGTR_MPDEMUX_AVIHDR_GeneratingIdx "Generating Index: %3lu %s \r"
-#define MSGTR_MPDEMUX_AVIHDR_IdxGeneratedForHowManyChunks "AVI: Generated index table for %d chunks!\n"
-#define MSGTR_MPDEMUX_AVIHDR_Failed2WriteIdxFile "Couldn't write index file %s: %s\n"
-#define MSGTR_MPDEMUX_AVIHDR_IdxFileSaved "Saved index file: %s\n"
-
-// demux_audio.c
-#define MSGTR_MPDEMUX_AUDIO_BadID3v2TagSize "Audio demuxer: bad ID3v2 tag size: larger than stream (%u).\n"
-#define MSGTR_MPDEMUX_AUDIO_DamagedAppendedID3v2Tag "Audio demuxer: damaged appended ID3v2 tag detected.\n"
-#define MSGTR_MPDEMUX_AUDIO_UnknownFormat "Audio demuxer: unknown format %d.\n"
-
-// demux_demuxers.c
-#define MSGTR_MPDEMUX_DEMUXERS_FillBufferError "fill_buffer error: bad demuxer: not vd, ad or sd.\n"
-
-// demux_mkv.c
-#define MSGTR_MPDEMUX_MKV_ZlibInitializationFailed "[mkv] zlib initialization failed.\n"
-#define MSGTR_MPDEMUX_MKV_ZlibDecompressionFailed "[mkv] zlib decompression failed.\n"
-#define MSGTR_MPDEMUX_MKV_LzoInitializationFailed "[mkv] lzo initialization failed.\n"
-#define MSGTR_MPDEMUX_MKV_LzoDecompressionFailed "[mkv] lzo decompression failed.\n"
-#define MSGTR_MPDEMUX_MKV_TrackEncrypted "[mkv] Track number %u has been encrypted and decryption has not yet been\n[mkv] implemented. Skipping track.\n"
-#define MSGTR_MPDEMUX_MKV_UnknownContentEncoding "[mkv] Unknown content encoding type for track %u. Skipping track.\n"
-#define MSGTR_MPDEMUX_MKV_UnknownCompression "[mkv] Track %u has been compressed with an unknown/unsupported compression\n[mkv] algorithm (%u). Skipping track.\n"
-#define MSGTR_MPDEMUX_MKV_ZlibCompressionUnsupported "[mkv] Track %u was compressed with zlib but mplayer has not been compiled\n[mkv] with support for zlib compression. Skipping track.\n"
-#define MSGTR_MPDEMUX_MKV_TrackIDName "[mkv] Track ID %u: %s (%s) \"%s\", %s\n"
-#define MSGTR_MPDEMUX_MKV_TrackID "[mkv] Track ID %u: %s (%s), %s\n"
-#define MSGTR_MPDEMUX_MKV_UnknownCodecID "[mkv] Unknown/unsupported CodecID (%s) or missing/bad CodecPrivate\n[mkv] data (track %u).\n"
-#define MSGTR_MPDEMUX_MKV_FlacTrackDoesNotContainValidHeaders "[mkv] FLAC track does not contain valid headers.\n"
-#define MSGTR_MPDEMUX_MKV_UnknownAudioCodec "[mkv] Unknown/unsupported audio codec ID '%s' for track %u or missing/faulty\n[mkv] private codec data.\n"
-#define MSGTR_MPDEMUX_MKV_SubtitleTypeNotSupported "[mkv] Subtitle type '%s' is not supported.\n"
-#define MSGTR_MPDEMUX_MKV_WillPlayVideoTrack "[mkv] Will play video track %u.\n"
-#define MSGTR_MPDEMUX_MKV_NoVideoTrackFound "[mkv] No video track found/wanted.\n"
-#define MSGTR_MPDEMUX_MKV_NoAudioTrackFound "[mkv] No audio track found/wanted.\n"
-#define MSGTR_MPDEMUX_MKV_WillDisplaySubtitleTrack "[mkv] Will display subtitle track %u.\n"
-#define MSGTR_MPDEMUX_MKV_NoBlockDurationForSubtitleTrackFound "[mkv] Warning: No BlockDuration for subtitle track found.\n"
-#define MSGTR_MPDEMUX_MKV_TooManySublines "[mkv] Warning: too many sublines to render, skipping.\n"
-#define MSGTR_MPDEMUX_MKV_TooManySublinesSkippingAfterFirst "\n[mkv] Warning: too many sublines to render, skipping after first %i.\n"
-
-// demux_nuv.c
-#define MSGTR_MPDEMUX_NUV_NoVideoBlocksInFile "No video blocks in file.\n"
-
-// demux_xmms.c
-#define MSGTR_MPDEMUX_XMMS_FoundPlugin "Found plugin: %s (%s).\n"
-#define MSGTR_MPDEMUX_XMMS_ClosingPlugin "Closing plugin: %s.\n"
-#define MSGTR_MPDEMUX_XMMS_WaitForStart "Waiting for the XMMS plugin to start playback of '%s'...\n"
-
-
-// ========================== LIBMENU ===================================
-
-// common
-#define MSGTR_LIBMENU_NoEntryFoundInTheMenuDefinition "[MENU] No entry found in the menu definition.\n"
-
-// libmenu/menu.c
-#define MSGTR_LIBMENU_SyntaxErrorAtLine "[MENU] syntax error at line: %d\n"
-#define MSGTR_LIBMENU_MenuDefinitionsNeedANameAttrib "[MENU] Menu definitions need a name attribute (line %d).\n"
-#define MSGTR_LIBMENU_BadAttrib "[MENU] bad attribute %s=%s in menu '%s' at line %d\n"
-#define MSGTR_LIBMENU_UnknownMenuType "[MENU] unknown menu type '%s' at line %d\n"
-#define MSGTR_LIBMENU_CantOpenConfigFile "[MENU] Can't open menu config file: %s\n"
-#define MSGTR_LIBMENU_ConfigFileIsTooBig "[MENU] Config file is too big (> %d KB)\n"
-#define MSGTR_LIBMENU_ConfigFileIsEmpty "[MENU] Config file is empty.\n"
-#define MSGTR_LIBMENU_MenuNotFound "[MENU] Menu %s not found.\n"
-#define MSGTR_LIBMENU_MenuInitFailed "[MENU] Menu '%s': Init failed.\n"
-#define MSGTR_LIBMENU_UnsupportedOutformat "[MENU] Unsupported output format!!!!\n"
-
-// libmenu/menu_cmdlist.c
-#define MSGTR_LIBMENU_ListMenuEntryDefinitionsNeedAName "[MENU] List menu entry definitions need a name (line %d).\n"
-#define MSGTR_LIBMENU_ListMenuNeedsAnArgument "[MENU] List menu needs an argument.\n"
-
-// libmenu/menu_console.c
-#define MSGTR_LIBMENU_WaitPidError "[MENU] Waitpid error: %s.\n"
-#define MSGTR_LIBMENU_SelectError "[MENU] Select error.\n"
-#define MSGTR_LIBMENU_ReadErrorOnChildFD "[MENU] Read error on child's file descriptor: %s.\n"
-#define MSGTR_LIBMENU_ConsoleRun "[MENU] Console run: %s ...\n"
-#define MSGTR_LIBMENU_AChildIsAlreadyRunning "[MENU] A child is already running.\n"
-#define MSGTR_LIBMENU_ForkFailed "[MENU] Fork failed !!!\n"
-#define MSGTR_LIBMENU_WriteError "[MENU] write error\n"
-
-// libmenu/menu_filesel.c
-#define MSGTR_LIBMENU_OpendirError "[MENU] opendir error: %s\n"
-#define MSGTR_LIBMENU_ReallocError "[MENU] realloc error: %s\n"
-#define MSGTR_LIBMENU_MallocError "[MENU] memory allocation error: %s\n"
-#define MSGTR_LIBMENU_ReaddirError "[MENU] readdir error: %s\n"
-#define MSGTR_LIBMENU_CantOpenDirectory "[MENU] Can't open directory %s.\n"
-
-// libmenu/menu_param.c
-#define MSGTR_LIBMENU_SubmenuDefinitionNeedAMenuAttribut "[MENU] Submenu definition needs a 'menu' attribute.\n"
-#define MSGTR_LIBMENU_InvalidProperty "[MENU] Invalid property '%s' in pref menu entry. (line %d).\n"
-#define MSGTR_LIBMENU_PrefMenuEntryDefinitionsNeed "[MENU] Pref menu entry definitions need a valid 'property' or 'txt' attribute (line %d).\n"
-#define MSGTR_LIBMENU_PrefMenuNeedsAnArgument "[MENU] Pref menu needs an argument.\n"
-
-// libmenu/menu_pt.c
-#define MSGTR_LIBMENU_CantfindTheTargetItem "[MENU] Can't find the target item ????\n"
-#define MSGTR_LIBMENU_FailedToBuildCommand "[MENU] Failed to build command: %s.\n"
-
-// libmenu/menu_txt.c
-#define MSGTR_LIBMENU_MenuTxtNeedATxtFileName "[MENU] Text menu needs a textfile name (parameter file).\n"
-#define MSGTR_LIBMENU_MenuTxtCantOpen "[MENU] Can't open %s.\n"
-#define MSGTR_LIBMENU_WarningTooLongLineSplitting "[MENU] Warning, line too long. Splitting it.\n"
-#define MSGTR_LIBMENU_ParsedLines "[MENU] Parsed %d lines.\n"
-
-// libmenu/vf_menu.c
-#define MSGTR_LIBMENU_UnknownMenuCommand "[MENU] Unknown command: '%s'.\n"
-#define MSGTR_LIBMENU_FailedToOpenMenu "[MENU] Failed to open menu: '%s'.\n"
-
-
-// ========================== LIBMPCODECS ===================================
-
-// dec_video.c & dec_audio.c:
-#define MSGTR_CantOpenCodec "Could not open codec.\n"
-#define MSGTR_CantCloseCodec "Could not close codec.\n"
-
-#define MSGTR_MissingDLLcodec "ERROR: Could not open required DirectShow codec %s.\n"
-#define MSGTR_ACMiniterror "Could not load/initialize Win32/ACM audio codec (missing DLL file?).\n"
-#define MSGTR_MissingLAVCcodec "Cannot find codec '%s' in libavcodec...\n"
-
-#define MSGTR_MpegNoSequHdr "MPEG: FATAL: EOF while searching for sequence header.\n"
-#define MSGTR_CannotReadMpegSequHdr "FATAL: Cannot read sequence header.\n"
-#define MSGTR_CannotReadMpegSequHdrEx "FATAL: Cannot read sequence header extension.\n"
-#define MSGTR_BadMpegSequHdr "MPEG: bad sequence header\n"
-#define MSGTR_BadMpegSequHdrEx "MPEG: bad sequence header extension\n"
-
-#define MSGTR_ShMemAllocFail "Cannot allocate shared memory.\n"
-#define MSGTR_CantAllocAudioBuf "Cannot allocate audio out buffer.\n"
-
-#define MSGTR_UnknownAudio "Unknown/missing audio format -> no sound\n"
-
-#define MSGTR_UsingExternalPP "[PP] Using external postprocessing filter, max q = %d.\n"
-#define MSGTR_UsingCodecPP "[PP] Using codec's postprocessing, max q = %d.\n"
-#define MSGTR_VideoCodecFamilyNotAvailableStr "Requested video codec family [%s] (vfm=%s) not available.\nEnable it at compilation.\n"
-#define MSGTR_AudioCodecFamilyNotAvailableStr "Requested audio codec family [%s] (afm=%s) not available.\nEnable it at compilation.\n"
-#define MSGTR_OpeningVideoDecoder "Opening video decoder: [%s] %s\n"
-#define MSGTR_SelectedVideoCodec "Selected video codec: [%s] vfm: %s (%s)\n"
-#define MSGTR_OpeningAudioDecoder "Opening audio decoder: [%s] %s\n"
-#define MSGTR_SelectedAudioCodec "Selected audio codec: [%s] afm: %s (%s)\n"
-#define MSGTR_VDecoderInitFailed "VDecoder init failed :(\n"
-#define MSGTR_ADecoderInitFailed "ADecoder init failed :(\n"
-#define MSGTR_ADecoderPreinitFailed "ADecoder preinit failed :(\n"
-
-// ad_dvdpcm.c:
-#define MSGTR_SamplesWanted "Samples of this format are needed to improve support. Please contact the developers.\n"
-
-// libmpcodecs/ad_libdv.c
-#define MSGTR_MPCODECS_AudioFramesizeDiffers "[AD_LIBDV] Warning! Audio framesize differs! read=%d hdr=%d.\n"
-
-// vd.c
-#define MSGTR_CodecDidNotSet "VDec: Codec did not set sh->disp_w and sh->disp_h, trying workaround.\n"
-#define MSGTR_CouldNotFindColorspace "Could not find matching colorspace - retrying with -vf scale...\n"
-#define MSGTR_MovieAspectIsSet "Movie-Aspect is %.2f:1 - prescaling to correct movie aspect.\n"
-#define MSGTR_MovieAspectUndefined "Movie-Aspect is undefined - no prescaling applied.\n"
-
-// vd_dshow.c, vd_dmo.c
-#define MSGTR_DownloadCodecPackage "You need to upgrade/install the binary codecs package.\nGo to http://www.mplayerhq.hu/dload.html\n"
-
-// libmpcodecs/vd_dmo.c vd_dshow.c vd_vfw.c
-#define MSGTR_MPCODECS_CouldntAllocateImageForCinepakCodec "[VD_DMO] Couldn't allocate image for cinepak codec.\n"
-
-// libmpcodecs/vd_ffmpeg.c
-#define MSGTR_MPCODECS_XVMCAcceleratedCodec "[VD_FFMPEG] XVMC accelerated codec.\n"
-#define MSGTR_MPCODECS_ArithmeticMeanOfQP "[VD_FFMPEG] Arithmetic mean of QP: %2.4f, Harmonic mean of QP: %2.4f\n"
-#define MSGTR_MPCODECS_DRIFailure "[VD_FFMPEG] DRI failure.\n"
-#define MSGTR_MPCODECS_CouldntAllocateImageForCodec "[VD_FFMPEG] Couldn't allocate image for codec.\n"
-#define MSGTR_MPCODECS_XVMCAcceleratedMPEG2 "[VD_FFMPEG] XVMC-accelerated MPEG-2.\n"
-#define MSGTR_MPCODECS_TryingPixfmt "[VD_FFMPEG] Trying pixfmt=%d.\n"
-#define MSGTR_MPCODECS_McGetBufferShouldWorkOnlyWithXVMC "[VD_FFMPEG] The mc_get_buffer should work only with XVMC acceleration!!"
-#define MSGTR_MPCODECS_UnexpectedInitVoError "[VD_FFMPEG] Unexpected init_vo error.\n"
-#define MSGTR_MPCODECS_UnrecoverableErrorRenderBuffersNotTaken "[VD_FFMPEG] Unrecoverable error, render buffers not taken.\n"
-#define MSGTR_MPCODECS_OnlyBuffersAllocatedByVoXvmcAllowed "[VD_FFMPEG] Only buffers allocated by vo_xvmc allowed.\n"
-
-// libmpcodecs/ve_lavc.c
-#define MSGTR_MPCODECS_HighQualityEncodingSelected "[VE_LAVC] High quality encoding selected (non-realtime)!\n"
-#define MSGTR_MPCODECS_UsingConstantQscale "[VE_LAVC] Using constant qscale = %f (VBR).\n"
-
-// libmpcodecs/ve_raw.c
-#define MSGTR_MPCODECS_OutputWithFourccNotSupported "[VE_RAW] Raw output with FourCC [%x] not supported!\n"
-#define MSGTR_MPCODECS_NoVfwCodecSpecified "[VE_RAW] Required VfW codec not specified!!\n"
-
-// vf.c
-#define MSGTR_CouldNotFindVideoFilter "Couldn't find video filter '%s'.\n"
-#define MSGTR_CouldNotOpenVideoFilter "Couldn't open video filter '%s'.\n"
-#define MSGTR_OpeningVideoFilter "Opening video filter: "
-#define MSGTR_CannotFindColorspace "Cannot find matching colorspace, even by inserting 'scale' :(\n"
-
-// libmpcodecs/vf_crop.c
-#define MSGTR_MPCODECS_CropBadPositionWidthHeight "[CROP] Bad position/width/height - cropped area outside of the original!\n"
-
-// libmpcodecs/vf_cropdetect.c
-#define MSGTR_MPCODECS_CropArea "[CROP] Crop area: X: %d..%d Y: %d..%d (-vf crop=%d:%d:%d:%d).\n"
-
-// libmpcodecs/vf_format.c, vf_palette.c, vf_noformat.c
-#define MSGTR_MPCODECS_UnknownFormatName "[VF_FORMAT] Unknown format name: '%s'.\n"
-
-// libmpcodecs/vf_framestep.c vf_noformat.c vf_palette.c vf_tile.c
-#define MSGTR_MPCODECS_ErrorParsingArgument "[VF_FRAMESTEP] Error parsing argument.\n"
-
-// libmpcodecs/ve_vfw.c
-#define MSGTR_MPCODECS_CompressorType "Compressor type: %.4lx\n"
-#define MSGTR_MPCODECS_CompressorSubtype "Compressor subtype: %.4lx\n"
-#define MSGTR_MPCODECS_CompressorFlags "Compressor flags: %lu, version %lu, ICM version: %lu\n"
-#define MSGTR_MPCODECS_Flags "Flags:"
-#define MSGTR_MPCODECS_Quality " quality"
-
-// libmpcodecs/vf_expand.c
-#define MSGTR_MPCODECS_FullDRNotPossible "Full DR not possible, trying SLICES instead!\n"
-#define MSGTR_MPCODECS_WarnNextFilterDoesntSupportSlices "WARNING! Next filter doesn't support SLICES, get ready for sig11...\n"
-#define MSGTR_MPCODECS_FunWhydowegetNULL "Why do we get NULL??\n"
-
-// libmpcodecs/vf_test.c, vf_yuy2.c, vf_yvu9.c
-#define MSGTR_MPCODECS_WarnNextFilterDoesntSupport "%s not supported by next filter/vo :(\n"
-
-
-// ================================== LIBASS ====================================
-
-// ass_bitmap.c
-#define MSGTR_LIBASS_FT_Glyph_To_BitmapError "[ass] FT_Glyph_To_Bitmap error %d \n"
-#define MSGTR_LIBASS_UnsupportedPixelMode "[ass] Unsupported pixel mode: %d\n"
-#define MSGTR_LIBASS_GlyphBBoxTooLarge "[ass] Glyph bounding box too large: %dx%dpx\n"
-
-// ass.c
-#define MSGTR_LIBASS_NoStyleNamedXFoundUsingY "[ass] [%p] Warning: no style named '%s' found, using '%s'\n"
-#define MSGTR_LIBASS_BadTimestamp "[ass] bad timestamp\n"
-#define MSGTR_LIBASS_BadEncodedDataSize "[ass] bad encoded data size\n"
-#define MSGTR_LIBASS_FontLineTooLong "[ass] Font line too long: %d, %s\n"
-#define MSGTR_LIBASS_EventFormatHeaderMissing "[ass] Event format header missing\n"
-#define MSGTR_LIBASS_ErrorOpeningIconvDescriptor "[ass] error opening iconv descriptor.\n"
-#define MSGTR_LIBASS_ErrorRecodingFile "[ass] error recoding file.\n"
-#define MSGTR_LIBASS_FopenFailed "[ass] ass_read_file(%s): fopen failed\n"
-#define MSGTR_LIBASS_FseekFailed "[ass] ass_read_file(%s): fseek failed\n"
-#define MSGTR_LIBASS_RefusingToLoadSubtitlesLargerThan100M "[ass] ass_read_file(%s): Refusing to load subtitles larger than 100M\n"
-#define MSGTR_LIBASS_ReadFailed "Read failed, %d: %s\n"
-#define MSGTR_LIBASS_AddedSubtitleFileMemory "[ass] Added subtitle file: <memory> (%d styles, %d events)\n"
-#define MSGTR_LIBASS_AddedSubtitleFileFname "[ass] Added subtitle file: %s (%d styles, %d events)\n"
-#define MSGTR_LIBASS_FailedToCreateDirectory "[ass] Failed to create directory %s\n"
-#define MSGTR_LIBASS_NotADirectory "[ass] Not a directory: %s\n"
-
-// ass_cache.c
-#define MSGTR_LIBASS_TooManyFonts "[ass] Too many fonts\n"
-#define MSGTR_LIBASS_ErrorOpeningFont "[ass] Error opening font: %s, %d\n"
-
-// ass_fontconfig.c
-#define MSGTR_LIBASS_SelectedFontFamilyIsNotTheRequestedOne "[ass] fontconfig: Selected font is not the requested one: '%s' != '%s'\n"
-#define MSGTR_LIBASS_UsingDefaultFontFamily "[ass] fontconfig_select: Using default font family: (%s, %d, %d) -> %s, %d\n"
-#define MSGTR_LIBASS_UsingDefaultFont "[ass] fontconfig_select: Using default font: (%s, %d, %d) -> %s, %d\n"
-#define MSGTR_LIBASS_UsingArialFontFamily "[ass] fontconfig_select: Using 'Arial' font family: (%s, %d, %d) -> %s, %d\n"
-#define MSGTR_LIBASS_FcInitLoadConfigAndFontsFailed "[ass] FcInitLoadConfigAndFonts failed.\n"
-#define MSGTR_LIBASS_UpdatingFontCache "[ass] Updating font cache.\n"
-#define MSGTR_LIBASS_BetaVersionsOfFontconfigAreNotSupported "[ass] Beta versions of fontconfig are not supported.\n[ass] Update before reporting any bugs.\n"
-#define MSGTR_LIBASS_FcStrSetAddFailed "[ass] FcStrSetAdd failed.\n"
-#define MSGTR_LIBASS_FcDirScanFailed "[ass] FcDirScan failed.\n"
-#define MSGTR_LIBASS_FcDirSave "[ass] FcDirSave failed.\n"
-#define MSGTR_LIBASS_FcConfigAppFontAddDirFailed "[ass] FcConfigAppFontAddDir failed\n"
-#define MSGTR_LIBASS_FontconfigDisabledDefaultFontWillBeUsed "[ass] Fontconfig disabled, only default font will be used.\n"
-#define MSGTR_LIBASS_FunctionCallFailed "[ass] %s failed\n"
-
-// ass_render.c
-#define MSGTR_LIBASS_NeitherPlayResXNorPlayResYDefined "[ass] Neither PlayResX nor PlayResY defined. Assuming 384x288.\n"
-#define MSGTR_LIBASS_PlayResYUndefinedSettingY "[ass] PlayResY undefined, setting %d.\n"
-#define MSGTR_LIBASS_PlayResXUndefinedSettingX "[ass] PlayResX undefined, setting %d.\n"
-#define MSGTR_LIBASS_FT_Init_FreeTypeFailed "[ass] FT_Init_FreeType failed.\n"
-#define MSGTR_LIBASS_Init "[ass] Init\n"
-#define MSGTR_LIBASS_InitFailed "[ass] Init failed.\n"
-#define MSGTR_LIBASS_BadCommand "[ass] Bad command: %c%c\n"
-#define MSGTR_LIBASS_ErrorLoadingGlyph "[ass] Error loading glyph.\n"
-#define MSGTR_LIBASS_FT_Glyph_Stroke_Error "[ass] FT_Glyph_Stroke error %d \n"
-#define MSGTR_LIBASS_UnknownEffectType_InternalError "[ass] Unknown effect type (internal error)\n"
-#define MSGTR_LIBASS_NoStyleFound "[ass] No style found!\n"
-#define MSGTR_LIBASS_EmptyEvent "[ass] Empty event!\n"
-#define MSGTR_LIBASS_MAX_GLYPHS_Reached "[ass] MAX_GLYPHS reached: event %d, start = %llu, duration = %llu\n Text = %s\n"
-#define MSGTR_LIBASS_EventHeightHasChanged "[ass] Warning! Event height has changed! \n"
-
-// ass_font.c
-#define MSGTR_LIBASS_GlyphNotFoundReselectingFont "[ass] Glyph 0x%X not found, selecting one more font for (%s, %d, %d)\n"
-#define MSGTR_LIBASS_GlyphNotFound "[ass] Glyph 0x%X not found in font for (%s, %d, %d)\n"
-#define MSGTR_LIBASS_ErrorOpeningMemoryFont "[ass] Error opening memory font: %s\n"
-#define MSGTR_LIBASS_NoCharmaps "[ass] font face with no charmaps\n"
-#define MSGTR_LIBASS_NoCharmapAutodetected "[ass] no charmap autodetected, trying the first one\n"
-
-
-// ================================== stream ====================================
-
-// ai_alsa.c
-#define MSGTR_MPDEMUX_AIALSA_CannotSetSamplerate "Cannot set samplerate.\n"
-#define MSGTR_MPDEMUX_AIALSA_CannotSetBufferTime "Cannot set buffer time.\n"
-#define MSGTR_MPDEMUX_AIALSA_CannotSetPeriodTime "Cannot set period time.\n"
-
-// ai_alsa.c
-#define MSGTR_MPDEMUX_AIALSA_PcmBrokenConfig "Broken configuration for this PCM: no configurations available.\n"
-#define MSGTR_MPDEMUX_AIALSA_UnavailableAccessType "Access type not available.\n"
-#define MSGTR_MPDEMUX_AIALSA_UnavailableSampleFmt "Sample format not available.\n"
-#define MSGTR_MPDEMUX_AIALSA_UnavailableChanCount "Channel count not available - reverting to default: %d\n"
-#define MSGTR_MPDEMUX_AIALSA_CannotInstallHWParams "Unable to install hardware parameters: %s"
-#define MSGTR_MPDEMUX_AIALSA_PeriodEqualsBufferSize "Can't use period equal to buffer size (%u == %lu)\n"
-#define MSGTR_MPDEMUX_AIALSA_CannotInstallSWParams "Unable to install software parameters:\n"
-#define MSGTR_MPDEMUX_AIALSA_ErrorOpeningAudio "Error opening audio: %s\n"
-#define MSGTR_MPDEMUX_AIALSA_AlsaStatusError "ALSA status error: %s"
-#define MSGTR_MPDEMUX_AIALSA_AlsaXRUN "ALSA xrun!!! (at least %.3f ms long)\n"
-#define MSGTR_MPDEMUX_AIALSA_AlsaXRUNPrepareError "ALSA xrun: prepare error: %s"
-#define MSGTR_MPDEMUX_AIALSA_AlsaReadWriteError "ALSA read/write error"
-
-// ai_oss.c
-#define MSGTR_MPDEMUX_AIOSS_Unable2SetChanCount "Unable to set channel count: %d\n"
-#define MSGTR_MPDEMUX_AIOSS_Unable2SetStereo "Unable to set stereo: %d\n"
-#define MSGTR_MPDEMUX_AIOSS_Unable2Open "Unable to open '%s': %s\n"
-#define MSGTR_MPDEMUX_AIOSS_UnsupportedFmt "unsupported format\n"
-#define MSGTR_MPDEMUX_AIOSS_Unable2SetAudioFmt "Unable to set audio format."
-#define MSGTR_MPDEMUX_AIOSS_Unable2SetSamplerate "Unable to set samplerate: %d\n"
-#define MSGTR_MPDEMUX_AIOSS_Unable2SetTrigger "Unable to set trigger: %d\n"
-#define MSGTR_MPDEMUX_AIOSS_Unable2GetBlockSize "Unable to get block size!\n"
-#define MSGTR_MPDEMUX_AIOSS_AudioBlockSizeZero "Audio block size is zero, setting to %d!\n"
-#define MSGTR_MPDEMUX_AIOSS_AudioBlockSize2Low "Audio block size too low, setting to %d!\n"
-
-// asf_mmst_streaming.c
-#define MSGTR_MPDEMUX_MMST_WriteError "write error\n"
-#define MSGTR_MPDEMUX_MMST_EOFAlert "\nAlert! EOF\n"
-#define MSGTR_MPDEMUX_MMST_PreHeaderReadFailed "pre-header read failed\n"
-#define MSGTR_MPDEMUX_MMST_InvalidHeaderSize "Invalid header size, giving up.\n"
-#define MSGTR_MPDEMUX_MMST_HeaderDataReadFailed "Header data read failed.\n"
-#define MSGTR_MPDEMUX_MMST_packet_lenReadFailed "packet_len read failed.\n"
-#define MSGTR_MPDEMUX_MMST_InvalidRTSPPacketSize "Invalid RTSP packet size, giving up.\n"
-#define MSGTR_MPDEMUX_MMST_CmdDataReadFailed "Command data read failed.\n"
-#define MSGTR_MPDEMUX_MMST_HeaderObject "header object\n"
-#define MSGTR_MPDEMUX_MMST_DataObject "data object\n"
-#define MSGTR_MPDEMUX_MMST_FileObjectPacketLen "file object, packet length = %d (%d)\n"
-#define MSGTR_MPDEMUX_MMST_StreamObjectStreamID "stream object, stream ID: %d\n"
-#define MSGTR_MPDEMUX_MMST_2ManyStreamID "Too many IDs, stream skipped."
-#define MSGTR_MPDEMUX_MMST_UnknownObject "unknown object\n"
-#define MSGTR_MPDEMUX_MMST_MediaDataReadFailed "Media data read failed.\n"
-#define MSGTR_MPDEMUX_MMST_MissingSignature "missing signature\n"
-#define MSGTR_MPDEMUX_MMST_PatentedTechnologyJoke "Everything done. Thank you for downloading a media file containing proprietary and patented technology.\n"
-#define MSGTR_MPDEMUX_MMST_UnknownCmd "unknown command %02x\n"
-#define MSGTR_MPDEMUX_MMST_GetMediaPacketErr "get_media_packet error : %s\n"
-#define MSGTR_MPDEMUX_MMST_Connected "Connected\n"
-
-// asf_streaming.c
-#define MSGTR_MPDEMUX_ASF_StreamChunkSize2Small "Ahhhh, stream_chunck size is too small: %d\n"
-#define MSGTR_MPDEMUX_ASF_SizeConfirmMismatch "size_confirm mismatch!: %d %d\n"
-#define MSGTR_MPDEMUX_ASF_WarnDropHeader "Warning: drop header ????\n"
-#define MSGTR_MPDEMUX_ASF_ErrorParsingChunkHeader "Error while parsing chunk header\n"
-#define MSGTR_MPDEMUX_ASF_NoHeaderAtFirstChunk "Didn't get a header as first chunk !!!!\n"
-#define MSGTR_MPDEMUX_ASF_BufferMallocFailed "Error: Can't allocate %d bytes buffer.\n"
-#define MSGTR_MPDEMUX_ASF_ErrReadingNetworkStream "Error while reading network stream.\n"
-#define MSGTR_MPDEMUX_ASF_ErrChunk2Small "Error: Chunk is too small.\n"
-#define MSGTR_MPDEMUX_ASF_ErrSubChunkNumberInvalid "Error: Subchunk number is invalid.\n"
-#define MSGTR_MPDEMUX_ASF_Bandwidth2SmallCannotPlay "Bandwidth too small, file cannot be played!\n"
-#define MSGTR_MPDEMUX_ASF_Bandwidth2SmallDeselectedAudio "Bandwidth too small, deselected audio stream.\n"
-#define MSGTR_MPDEMUX_ASF_Bandwidth2SmallDeselectedVideo "Bandwidth too small, deselected video stream.\n"
-#define MSGTR_MPDEMUX_ASF_InvalidLenInHeader "Invalid length in ASF header!\n"
-#define MSGTR_MPDEMUX_ASF_ErrReadingChunkHeader "Error while reading chunk header.\n"
-#define MSGTR_MPDEMUX_ASF_ErrChunkBiggerThanPacket "Error: chunk_size > packet_size\n"
-#define MSGTR_MPDEMUX_ASF_ErrReadingChunk "Error while reading chunk.\n"
-#define MSGTR_MPDEMUX_ASF_ASFRedirector "=====> ASF Redirector\n"
-#define MSGTR_MPDEMUX_ASF_InvalidProxyURL "invalid proxy URL\n"
-#define MSGTR_MPDEMUX_ASF_UnknownASFStreamType "unknown ASF stream type\n"
-#define MSGTR_MPDEMUX_ASF_Failed2ParseHTTPResponse "Failed to parse HTTP response.\n"
-#define MSGTR_MPDEMUX_ASF_ServerReturn "Server returned %d:%s\n"
-#define MSGTR_MPDEMUX_ASF_ASFHTTPParseWarnCuttedPragma "ASF HTTP PARSE WARNING : Pragma %s cut from %zu bytes to %zu\n"
-#define MSGTR_MPDEMUX_ASF_SocketWriteError "socket write error: %s\n"
-#define MSGTR_MPDEMUX_ASF_HeaderParseFailed "Failed to parse header.\n"
-#define MSGTR_MPDEMUX_ASF_NoStreamFound "No stream found.\n"
-#define MSGTR_MPDEMUX_ASF_UnknownASFStreamingType "unknown ASF streaming type\n"
-#define MSGTR_MPDEMUX_ASF_InfoStreamASFURL "STREAM_ASF, URL: %s\n"
-#define MSGTR_MPDEMUX_ASF_StreamingFailed "Failed, exiting.\n"
-
-// audio_in.c
-#define MSGTR_MPDEMUX_AUDIOIN_ErrReadingAudio "\nError reading audio: %s\n"
-#define MSGTR_MPDEMUX_AUDIOIN_XRUNSomeFramesMayBeLeftOut "Recovered from cross-run, some frames may be left out!\n"
-#define MSGTR_MPDEMUX_AUDIOIN_ErrFatalCannotRecover "Fatal error, cannot recover!\n"
-#define MSGTR_MPDEMUX_AUDIOIN_NotEnoughSamples "\nNot enough audio samples!\n"
-
-// cache2.c
-#define MSGTR_MPDEMUX_CACHE2_NonCacheableStream "\rThis stream is non-cacheable.\n"
-#define MSGTR_MPDEMUX_CACHE2_ReadFileposDiffers "!!! read_filepos differs!!! Report this bug...\n"
-
-// network.c
-#define MSGTR_MPDEMUX_NW_UnknownAF "Unknown address family %d\n"
-#define MSGTR_MPDEMUX_NW_ResolvingHostForAF "Resolving %s for %s...\n"
-#define MSGTR_MPDEMUX_NW_CantResolv "Couldn't resolve name for %s: %s\n"
-#define MSGTR_MPDEMUX_NW_ConnectingToServer "Connecting to server %s[%s]: %d...\n"
-#define MSGTR_MPDEMUX_NW_CantConnect2Server "Failed to connect to server with %s\n"
-#define MSGTR_MPDEMUX_NW_SelectFailed "Select failed.\n"
-#define MSGTR_MPDEMUX_NW_ConnTimeout "connection timeout\n"
-#define MSGTR_MPDEMUX_NW_GetSockOptFailed "getsockopt failed: %s\n"
-#define MSGTR_MPDEMUX_NW_ConnectError "connect error: %s\n"
-#define MSGTR_MPDEMUX_NW_InvalidProxySettingTryingWithout "Invalid proxy setting... Trying without proxy.\n"
-#define MSGTR_MPDEMUX_NW_CantResolvTryingWithoutProxy "Could not resolve remote hostname for AF_INET. Trying without proxy.\n"
-#define MSGTR_MPDEMUX_NW_ErrSendingHTTPRequest "Error while sending HTTP request: Didn't send all the request.\n"
-#define MSGTR_MPDEMUX_NW_ReadFailed "Read failed.\n"
-#define MSGTR_MPDEMUX_NW_Read0CouldBeEOF "http_read_response read 0 (i.e. EOF).\n"
-#define MSGTR_MPDEMUX_NW_AuthFailed "Authentication failed. Please use the -user and -passwd options to provide your\n"\
-"username/password for a list of URLs, or form an URL like:\n"\
-"http://username:password@hostname/file\n"
-#define MSGTR_MPDEMUX_NW_AuthRequiredFor "Authentication required for %s\n"
-#define MSGTR_MPDEMUX_NW_AuthRequired "Authentication required.\n"
-#define MSGTR_MPDEMUX_NW_NoPasswdProvidedTryingBlank "No password provided, trying blank password.\n"
-#define MSGTR_MPDEMUX_NW_ErrServerReturned "Server returns %d: %s\n"
-#define MSGTR_MPDEMUX_NW_CacheSizeSetTo "Cache size set to %d KBytes\n"
-
-// open.c, stream.c:
-#define MSGTR_CdDevNotfound "CD-ROM Device '%s' not found.\n"
-#define MSGTR_ErrTrackSelect "Error selecting VCD track."
-#define MSGTR_ReadSTDIN "Reading from stdin...\n"
-#define MSGTR_UnableOpenURL "Unable to open URL: %s\n"
-#define MSGTR_ConnToServer "Connected to server: %s\n"
-#define MSGTR_FileNotFound "File not found: '%s'\n"
-
-#define MSGTR_SMBInitError "Cannot init the libsmbclient library: %d\n"
-#define MSGTR_SMBFileNotFound "Could not open from LAN: '%s'\n"
-#define MSGTR_SMBNotCompiled "MPlayer was not compiled with SMB reading support.\n"
-
-#define MSGTR_CantOpenBluray "Couldn't open Blu-ray device: %s\n"
-#define MSGTR_CantOpenDVD "Couldn't open DVD device: %s (%s)\n"
-
-#define MSGTR_URLParsingFailed "URL parsing failed on url %s\n"
-#define MSGTR_FailedSetStreamOption "Failed to set stream option %s=%s\n"
-#define MSGTR_StreamNeedType "Streams need a type!\n"
-#define MSGTR_StreamProtocolNULL "Stream type %s has protocols == NULL, it's a bug\n"
-#define MSGTR_StreamCantHandleURL "No stream found to handle url %s\n"
-#define MSGTR_StreamNULLFilename "open_output_stream(), NULL filename, report this bug\n"
-#define MSGTR_StreamErrorWritingCapture "Error writing capture file: %s\n"
-#define MSGTR_StreamSeekFailed "Seek failed\n"
-#define MSGTR_StreamNotSeekable "Stream not seekable!\n"
-#define MSGTR_StreamCannotSeekBackward "Cannot seek backward in linear streams!\n"
-
-// stream_cdda.c
-#define MSGTR_MPDEMUX_CDDA_CantOpenCDDADevice "Can't open CDDA device.\n"
-#define MSGTR_MPDEMUX_CDDA_CantOpenDisc "Can't open disc.\n"
-#define MSGTR_MPDEMUX_CDDA_AudioCDFoundWithNTracks "Found audio CD with %d tracks.\n"
-
-// stream_cddb.c
-#define MSGTR_MPDEMUX_CDDB_FailedToReadTOC "Failed to read TOC.\n"
-#define MSGTR_MPDEMUX_CDDB_FailedToOpenDevice "Failed to open %s device.\n"
-#define MSGTR_MPDEMUX_CDDB_NotAValidURL "not a valid URL\n"
-#define MSGTR_MPDEMUX_CDDB_FailedToSendHTTPRequest "Failed to send the HTTP request.\n"
-#define MSGTR_MPDEMUX_CDDB_FailedToReadHTTPResponse "Failed to read the HTTP response.\n"
-#define MSGTR_MPDEMUX_CDDB_HTTPErrorNOTFOUND "Not Found.\n"
-#define MSGTR_MPDEMUX_CDDB_HTTPErrorUnknown "unknown error code\n"
-#define MSGTR_MPDEMUX_CDDB_NoCacheFound "No cache found.\n"
-#define MSGTR_MPDEMUX_CDDB_NotAllXMCDFileHasBeenRead "Not all the xmcd file has been read.\n"
-#define MSGTR_MPDEMUX_CDDB_FailedToCreateDirectory "Failed to create directory %s.\n"
-#define MSGTR_MPDEMUX_CDDB_NotAllXMCDFileHasBeenWritten "Not all of the xmcd file has been written.\n"
-#define MSGTR_MPDEMUX_CDDB_InvalidXMCDDatabaseReturned "Invalid xmcd database file returned.\n"
-#define MSGTR_MPDEMUX_CDDB_UnexpectedFIXME "unexpected FIXME\n"
-#define MSGTR_MPDEMUX_CDDB_UnhandledCode "unhandled code\n"
-#define MSGTR_MPDEMUX_CDDB_UnableToFindEOL "Unable to find end of line.\n"
-#define MSGTR_MPDEMUX_CDDB_ParseOKFoundAlbumTitle "Parse OK, found: %s\n"
-#define MSGTR_MPDEMUX_CDDB_AlbumNotFound "Album not found.\n"
-#define MSGTR_MPDEMUX_CDDB_ServerReturnsCommandSyntaxErr "Server returns: Command syntax error\n"
-#define MSGTR_MPDEMUX_CDDB_NoSitesInfoAvailable "No sites information available.\n"
-#define MSGTR_MPDEMUX_CDDB_FailedToGetProtocolLevel "Failed to get the protocol level.\n"
-#define MSGTR_MPDEMUX_CDDB_NoCDInDrive "No CD in the drive.\n"
-
-// stream_cue.c
-#define MSGTR_MPDEMUX_CUEREAD_UnexpectedCuefileLine "[bincue] Unexpected cuefile line: %s\n"
-#define MSGTR_MPDEMUX_CUEREAD_BinFilenameTested "[bincue] bin filename tested: %s\n"
-#define MSGTR_MPDEMUX_CUEREAD_CannotFindBinFile "[bincue] Couldn't find the bin file - giving up.\n"
-#define MSGTR_MPDEMUX_CUEREAD_UsingBinFile "[bincue] Using bin file %s.\n"
-#define MSGTR_MPDEMUX_CUEREAD_UnknownModeForBinfile "[bincue] unknown mode for binfile. Should not happen. Aborting.\n"
-#define MSGTR_MPDEMUX_CUEREAD_CannotOpenCueFile "[bincue] Cannot open %s.\n"
-#define MSGTR_MPDEMUX_CUEREAD_ErrReadingFromCueFile "[bincue] Error reading from %s\n"
-#define MSGTR_MPDEMUX_CUEREAD_ErrGettingBinFileSize "[bincue] Error getting size of bin file.\n"
-#define MSGTR_MPDEMUX_CUEREAD_InfoTrackFormat "track %02d: format=%d %02d:%02d:%02d\n"
-#define MSGTR_MPDEMUX_CUEREAD_UnexpectedBinFileEOF "[bincue] unexpected end of bin file\n"
-#define MSGTR_MPDEMUX_CUEREAD_CannotReadNBytesOfPayload "[bincue] Couldn't read %d bytes of payload.\n"
-#define MSGTR_MPDEMUX_CUEREAD_CueStreamInfo_FilenameTrackTracksavail "CUE stream_open, filename=%s, track=%d, available tracks: %d -> %d\n"
-
-// stream_dvd.c
-#define MSGTR_DVDspeedCantOpen "Couldn't open DVD device for writing, changing DVD speed needs write access.\n"
-#define MSGTR_DVDrestoreSpeed "Restoring DVD speed... "
-#define MSGTR_DVDlimitSpeed "Limiting DVD speed to %dKB/s... "
-#define MSGTR_DVDlimitFail "failed\n"
-#define MSGTR_DVDlimitOk "successful\n"
-#define MSGTR_NoDVDSupport "MPlayer was compiled without DVD support, exiting.\n"
-#define MSGTR_DVDnumTitles "There are %d titles on this DVD.\n"
-#define MSGTR_DVDinvalidTitle "Invalid DVD title number: %d\n"
-#define MSGTR_DVDnumChapters "There are %d chapters in this DVD title.\n"
-#define MSGTR_DVDinvalidChapter "Invalid DVD chapter number: %d\n"
-#define MSGTR_DVDinvalidChapterRange "Invalid chapter range specification %s\n"
-#define MSGTR_DVDinvalidLastChapter "Invalid DVD last chapter number: %d\n"
-#define MSGTR_DVDnumAngles "There are %d angles in this DVD title.\n"
-#define MSGTR_DVDinvalidAngle "Invalid DVD angle number: %d\n"
-#define MSGTR_DVDnoIFO "Cannot open the IFO file for DVD title %d.\n"
-#define MSGTR_DVDnoVMG "Can't open VMG info!\n"
-#define MSGTR_DVDnoVOBs "Cannot open title VOBS (VTS_%02d_1.VOB).\n"
-#define MSGTR_DVDnoMatchingAudio "No matching DVD audio language found!\n"
-#define MSGTR_DVDaudioChannel "Selected DVD audio channel: %d language: %c%c\n"
-#define MSGTR_DVDaudioStreamInfo "audio stream: %d format: %s (%s) language: %s aid: %d.\n"
-#define MSGTR_DVDnumAudioChannels "number of audio channels on disk: %d.\n"
-#define MSGTR_DVDnoMatchingSubtitle "No matching DVD subtitle language found!\n"
-#define MSGTR_DVDsubtitleChannel "Selected DVD subtitle channel: %d language: %c%c\n"
-#define MSGTR_DVDsubtitleLanguage "subtitle ( sid ): %d language: %s\n"
-#define MSGTR_DVDnumSubtitles "number of subtitles on disk: %d\n"
-
-// stream_bluray.c
-#define MSGTR_BlurayNoDevice "No Blu-ray device/location was specified ...\n"
-#define MSGTR_BlurayNoTitles "Can't find any Blu-ray-compatible title here.\n"
-
-// stream_radio.c
-#define MSGTR_RADIO_ChannelNamesDetected "[radio] Radio channel names detected.\n"
-#define MSGTR_RADIO_WrongFreqForChannel "[radio] Wrong frequency for channel %s\n"
-#define MSGTR_RADIO_WrongChannelNumberFloat "[radio] Wrong channel number: %.2f\n"
-#define MSGTR_RADIO_WrongChannelNumberInt "[radio] Wrong channel number: %d\n"
-#define MSGTR_RADIO_WrongChannelName "[radio] Wrong channel name: %s\n"
-#define MSGTR_RADIO_FreqParameterDetected "[radio] Radio frequency parameter detected.\n"
-#define MSGTR_RADIO_GetTunerFailed "[radio] Warning: ioctl get tuner failed: %s. Setting frac to %d.\n"
-#define MSGTR_RADIO_NotRadioDevice "[radio] %s is no radio device!\n"
-#define MSGTR_RADIO_SetFreqFailed "[radio] ioctl set frequency 0x%x (%.2f) failed: %s\n"
-#define MSGTR_RADIO_GetFreqFailed "[radio] ioctl get frequency failed: %s\n"
-#define MSGTR_RADIO_SetMuteFailed "[radio] ioctl set mute failed: %s\n"
-#define MSGTR_RADIO_QueryControlFailed "[radio] ioctl query control failed: %s\n"
-#define MSGTR_RADIO_GetVolumeFailed "[radio] ioctl get volume failed: %s\n"
-#define MSGTR_RADIO_SetVolumeFailed "[radio] ioctl set volume failed: %s\n"
-#define MSGTR_RADIO_DroppingFrame "\n[radio] too bad - dropping audio frame (%d bytes)!\n"
-#define MSGTR_RADIO_BufferEmpty "[radio] grab_audio_frame: buffer empty, waiting for %d data bytes.\n"
-#define MSGTR_RADIO_AudioInitFailed "[radio] audio_in_init failed: %s\n"
-#define MSGTR_RADIO_AllocateBufferFailed "[radio] cannot allocate audio buffer (block=%d,buf=%d): %s\n"
-#define MSGTR_RADIO_CurrentFreq "[radio] Current frequency: %.2f\n"
-#define MSGTR_RADIO_SelectedChannel "[radio] Selected channel: %d - %s (freq: %.2f)\n"
-#define MSGTR_RADIO_ChangeChannelNoChannelList "[radio] Can not change channel: no channel list given.\n"
-#define MSGTR_RADIO_UnableOpenDevice "[radio] Unable to open '%s': %s\n"
-#define MSGTR_RADIO_InitFracFailed "[radio] init_frac failed.\n"
-#define MSGTR_RADIO_WrongFreq "[radio] Wrong frequency: %.2f\n"
-#define MSGTR_RADIO_UsingFreq "[radio] Using frequency: %.2f.\n"
-#define MSGTR_RADIO_AudioInInitFailed "[radio] audio_in_init failed.\n"
-#define MSGTR_RADIO_AudioInSetupFailed "[radio] audio_in_setup call failed: %s\n"
-#define MSGTR_RADIO_ClearBufferFailed "[radio] Clearing buffer failed: %s\n"
-#define MSGTR_RADIO_StreamEnableCacheFailed "[radio] Call to stream_enable_cache failed: %s\n"
-#define MSGTR_RADIO_DriverUnknownStr "[radio] Unknown driver name: %s\n"
-#define MSGTR_RADIO_DriverV4L2 "[radio] Using V4Lv2 radio interface.\n"
-#define MSGTR_RADIO_DriverV4L "[radio] Using V4Lv1 radio interface.\n"
-#define MSGTR_RADIO_DriverBSDBT848 "[radio] Using *BSD BT848 radio interface.\n"
-
-//tv.c
-#define MSGTR_TV_BogusNormParameter "tv.c: norm_from_string(%s): Bogus norm parameter, setting %s.\n"
-#define MSGTR_TV_NoVideoInputPresent "Error: No video input present!\n"
-#define MSGTR_TV_UnknownImageFormat ""\
-"==================================================================\n"\
-" WARNING: UNTESTED OR UNKNOWN OUTPUT IMAGE FORMAT REQUESTED (0x%x)\n"\
-" This may cause buggy playback or program crash! Bug reports will\n"\
-" be ignored! You should try again with YV12 (which is the default\n"\
-" colorspace) and read the documentation!\n"\
-"==================================================================\n"
-#define MSGTR_TV_CannotSetNorm "Error: Cannot set norm!\n"
-#define MSGTR_TV_MJP_WidthHeight " MJP: width %d height %d\n"
-#define MSGTR_TV_UnableToSetWidth "Unable to set requested width: %d\n"
-#define MSGTR_TV_UnableToSetHeight "Unable to set requested height: %d\n"
-#define MSGTR_TV_NoTuner "Selected input hasn't got a tuner!\n"
-#define MSGTR_TV_UnableFindChanlist "Unable to find selected channel list! (%s)\n"
-#define MSGTR_TV_ChannelFreqParamConflict "You can't set frequency and channel simultaneously!\n"
-#define MSGTR_TV_ChannelNamesDetected "TV channel names detected.\n"
-#define MSGTR_TV_NoFreqForChannel "Couldn't find frequency for channel %s (%s)\n"
-#define MSGTR_TV_SelectedChannel3 "Selected channel: %s - %s (freq: %.3f)\n"
-#define MSGTR_TV_SelectedChannel2 "Selected channel: %s (freq: %.3f)\n"
-#define MSGTR_TV_UnsupportedAudioType "Audio type '%s (%x)' unsupported!\n"
-#define MSGTR_TV_AvailableDrivers "Available drivers:\n"
-#define MSGTR_TV_DriverInfo "Selected driver: %s\n name: %s\n author: %s\n comment: %s\n"
-#define MSGTR_TV_NoSuchDriver "No such driver: %s\n"
-#define MSGTR_TV_DriverAutoDetectionFailed "TV driver autodetection failed.\n"
-#define MSGTR_TV_UnknownColorOption "Unknown color option (%d) specified!\n"
-#define MSGTR_TV_NoTeletext "No teletext"
-#define MSGTR_TV_Bt848IoctlFailed "tvi_bsdbt848: Call to %s ioctl failed. Error: %s\n"
-#define MSGTR_TV_Bt848InvalidAudioRate "tvi_bsdbt848: Invalid audio rate. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorOpeningBktrDev "tvi_bsdbt848: Unable to open bktr device. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorOpeningTunerDev "tvi_bsdbt848: Unable to open tuner device. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorOpeningDspDev "tvi_bsdbt848: Unable to open dsp device. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorConfiguringDsp "tvi_bsdbt848: Configuration of dsp failed. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorReadingAudio "tvi_bsdbt848: Error reading audio data. Error: %s\n"
-#define MSGTR_TV_Bt848MmapFailed "tvi_bsdbt848: mmap failed. Error: %s\n"
-#define MSGTR_TV_Bt848FrameBufAllocFailed "tvi_bsdbt848: Frame buffer allocation failed. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorSettingWidth "tvi_bsdbt848: Error setting picture width. Error: %s\n"
-#define MSGTR_TV_Bt848ErrorSettingHeight "tvi_bsdbt848: Error setting picture height. Error: %s\n"
-#define MSGTR_TV_Bt848UnableToStopCapture "tvi_bsdbt848: Unable to stop capture. Error: %s\n"
-#define MSGTR_TV_TTSupportedLanguages "Supported Teletext languages:\n"
-#define MSGTR_TV_TTSelectedLanguage "Selected default teletext language: %s\n"
-#define MSGTR_TV_ScannerNotAvailableWithoutTuner "Channel scanner is not available without tuner\n"
-
-//tvi_dshow.c
-#define MSGTR_TVI_DS_UnableConnectInputVideoDecoder "Unable to connect given input to video decoder. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableConnectInputAudioDecoder "Unable to connect given input to audio decoder. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableSelectVideoFormat "tvi_dshow: Unable to select video format. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableSelectAudioFormat "tvi_dshow: Unable to select audio format. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableGetMediaControlInterface "tvi_dshow: Unable to get IMediaControl interface. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableStartGraph "tvi_dshow: Unable to start graph! Error:0x%x\n"
-#define MSGTR_TVI_DS_DeviceNotFound "tvi_dshow: Device #%d not found\n"
-#define MSGTR_TVI_DS_UnableGetDeviceName "tvi_dshow: Unable to get name for device #%d\n"
-#define MSGTR_TVI_DS_UsingDevice "tvi_dshow: Using device #%d: %s\n"
-#define MSGTR_TVI_DS_DirectGetFreqFailed "tvi_dshow: Unable to get frequency directly. OS built-in channels table will be used.\n"
-#define MSGTR_TVI_DS_UnableExtractFreqTable "tvi_dshow: Unable to load frequency table from kstvtune.ax\n"
-#define MSGTR_TVI_DS_WrongDeviceParam "tvi_dshow: Wrong device parameter: %s\n"
-#define MSGTR_TVI_DS_WrongDeviceIndex "tvi_dshow: Wrong device index: %d\n"
-#define MSGTR_TVI_DS_WrongADeviceParam "tvi_dshow: Wrong adevice parameter: %s\n"
-#define MSGTR_TVI_DS_WrongADeviceIndex "tvi_dshow: Wrong adevice index: %d\n"
-
-#define MSGTR_TVI_DS_SamplerateNotsupported "tvi_dshow: Samplerate %d is not supported by device. Failing back to first available.\n"
-#define MSGTR_TVI_DS_VideoAdjustigNotSupported "tvi_dshow: Adjusting of brightness/hue/saturation/contrast is not supported by device\n"
-
-#define MSGTR_TVI_DS_ChangingWidthHeightNotSupported "tvi_dshow: Changing video width/height is not supported by device.\n"
-#define MSGTR_TVI_DS_SelectingInputNotSupported "tvi_dshow: Selection of capture source is not supported by device\n"
-#define MSGTR_TVI_DS_ErrorParsingAudioFormatStruct "tvi_dshow: Unable to parse audio format structure.\n"
-#define MSGTR_TVI_DS_ErrorParsingVideoFormatStruct "tvi_dshow: Unable to parse video format structure.\n"
-#define MSGTR_TVI_DS_UnableSetAudioMode "tvi_dshow: Unable to set audio mode %d. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnsupportedMediaType "tvi_dshow: Unsupported media type passed to %s\n"
-#define MSGTR_TVI_DS_UnableGetsupportedVideoFormats "tvi_dshow: Unable to get supported media formats from video pin. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableGetsupportedAudioFormats "tvi_dshow: Unable to get supported media formats from audio pin. Error:0x%x Disabling audio.\n"
-#define MSGTR_TVI_DS_UnableFindNearestChannel "tvi_dshow: Unable to find nearest channel in system frequency table\n"
-#define MSGTR_TVI_DS_UnableToSetChannel "tvi_dshow: Unable to switch to nearest channel from system frequency table. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableTerminateVPPin "tvi_dshow: Unable to terminate VideoPort pin with any filter in graph. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableBuildVideoSubGraph "tvi_dshow: Unable to build video chain of capture graph. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableBuildAudioSubGraph "tvi_dshow: Unable to build audio chain of capture graph. Error:0x%x\n"
-#define MSGTR_TVI_DS_UnableBuildVBISubGraph "tvi_dshow: Unable to build VBI chain of capture graph. Error:0x%x\n"
-#define MSGTR_TVI_DS_GraphInitFailure "tvi_dshow: Directshow graph initialization failure.\n"
-#define MSGTR_TVI_DS_NoVideoCaptureDevice "tvi_dshow: Unable to find video capture device\n"
-#define MSGTR_TVI_DS_NoAudioCaptureDevice "tvi_dshow: Unable to find audio capture device\n"
-#define MSGTR_TVI_DS_GetActualMediatypeFailed "tvi_dshow: Unable to get actual mediatype (Error:0x%x). Assuming equal to requested.\n"
-
-// url.c
-#define MSGTR_MPDEMUX_URL_StringAlreadyEscaped "String appears to be already escaped in url_escape %c%c1%c2\n"
-
-// subtitles
-#define MSGTR_SUBTITLES_SubRip_UnknownFontColor "SubRip: unknown font color in subtitle: %s\n"
-
-
-/* untranslated messages from the English master file */
-
-
-#endif /* MPLAYER_HELP_MP_H */
diff --git a/ffmpeg/libavfilter/libmpcodecs/mpbswap.h b/ffmpeg/libavfilter/libmpcodecs/mpbswap.h
deleted file mode 100644
index 28f7337..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/mpbswap.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef MPLAYER_MPBSWAP_H
-#define MPLAYER_MPBSWAP_H
-
-#include <sys/types.h>
-#include "config.h"
-#include "libavutil/bswap.h"
-
-#define bswap_16(v) av_bswap16(v)
-#define bswap_32(v) av_bswap32(v)
-#define le2me_16(v) av_le2ne16(v)
-#define le2me_32(v) av_le2ne32(v)
-#define le2me_64(v) av_le2ne64(v)
-#define be2me_16(v) av_be2ne16(v)
-#define be2me_32(v) av_be2ne32(v)
-
-#endif /* MPLAYER_MPBSWAP_H */
diff --git a/ffmpeg/libavfilter/libmpcodecs/pullup.c b/ffmpeg/libavfilter/libmpcodecs/pullup.c
deleted file mode 100644
index b5fae9b..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/pullup.c
+++ /dev/null
@@ -1,823 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "libavutil/x86/asm.h"
-#include "config.h"
-#include "pullup.h"
-
-
-
-#if ARCH_X86
-#if HAVE_MMX
-static int diff_y_mmx(unsigned char *a, unsigned char *b, int s)
-{
- int ret;
- __asm__ volatile (
- "movl $4, %%ecx \n\t"
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
-
- "1: \n\t"
-
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm2 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "movq (%%"REG_D"), %%mm1 \n\t"
- "add %%"REG_a", %%"REG_D" \n\t"
- "psubusb %%mm1, %%mm2 \n\t"
- "psubusb %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm0, %%mm4 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm2, %%mm4 \n\t"
- "paddw %%mm3, %%mm4 \n\t"
-
- "decl %%ecx \n\t"
- "jnz 1b \n\t"
-
- "movq %%mm4, %%mm3 \n\t"
- "punpcklwd %%mm7, %%mm4 \n\t"
- "punpckhwd %%mm7, %%mm3 \n\t"
- "paddd %%mm4, %%mm3 \n\t"
- "movd %%mm3, %%eax \n\t"
- "psrlq $32, %%mm3 \n\t"
- "movd %%mm3, %%edx \n\t"
- "addl %%edx, %%eax \n\t"
- "emms \n\t"
- : "=a" (ret)
- : "S" (a), "D" (b), "a" (s)
- : "%ecx", "%edx"
- );
- return ret;
-}
-
-static int licomb_y_mmx(unsigned char *a, unsigned char *b, int s)
-{
- int ret;
- __asm__ volatile (
- "movl $4, %%ecx \n\t"
- "pxor %%mm6, %%mm6 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- "sub %%"REG_a", %%"REG_D" \n\t"
-
- "2: \n\t"
-
- "movq (%%"REG_D"), %%mm0 \n\t"
- "movq (%%"REG_D"), %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "movq (%%"REG_D",%%"REG_a"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "paddw %%mm0, %%mm0 \n\t"
- "paddw %%mm2, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "psubusw %%mm1, %%mm0 \n\t"
- "psubusw %%mm2, %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
-
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_D"), %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm0 \n\t"
- "movq (%%"REG_D",%%"REG_a"), %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "paddw %%mm0, %%mm0 \n\t"
- "paddw %%mm2, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "psubusw %%mm1, %%mm0 \n\t"
- "psubusw %%mm2, %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
-
- "movq (%%"REG_D",%%"REG_a"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "paddw %%mm0, %%mm0 \n\t"
- "paddw %%mm2, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "psubusw %%mm1, %%mm0 \n\t"
- "psubusw %%mm2, %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
-
- "movq (%%"REG_D",%%"REG_a"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm0 \n\t"
- "movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "paddw %%mm0, %%mm0 \n\t"
- "paddw %%mm2, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "psubusw %%mm1, %%mm0 \n\t"
- "psubusw %%mm2, %%mm1 \n\t"
- "paddw %%mm0, %%mm6 \n\t"
- "paddw %%mm1, %%mm6 \n\t"
-
- "add %%"REG_a", %%"REG_S" \n\t"
- "add %%"REG_a", %%"REG_D" \n\t"
- "decl %%ecx \n\t"
- "jnz 2b \n\t"
-
- "movq %%mm6, %%mm5 \n\t"
- "punpcklwd %%mm7, %%mm6 \n\t"
- "punpckhwd %%mm7, %%mm5 \n\t"
- "paddd %%mm6, %%mm5 \n\t"
- "movd %%mm5, %%eax \n\t"
- "psrlq $32, %%mm5 \n\t"
- "movd %%mm5, %%edx \n\t"
- "addl %%edx, %%eax \n\t"
-
- "emms \n\t"
- : "=a" (ret)
- : "S" (a), "D" (b), "a" (s)
- : "%ecx", "%edx"
- );
- return ret;
-}
-
-static int var_y_mmx(unsigned char *a, unsigned char *b, int s)
-{
- int ret;
- __asm__ volatile (
- "movl $3, %%ecx \n\t"
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
-
- "1: \n\t"
-
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm2 \n\t"
- "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "psubusb %%mm1, %%mm2 \n\t"
- "psubusb %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm0, %%mm4 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm2, %%mm4 \n\t"
- "paddw %%mm3, %%mm4 \n\t"
-
- "decl %%ecx \n\t"
- "jnz 1b \n\t"
-
- "movq %%mm4, %%mm3 \n\t"
- "punpcklwd %%mm7, %%mm4 \n\t"
- "punpckhwd %%mm7, %%mm3 \n\t"
- "paddd %%mm4, %%mm3 \n\t"
- "movd %%mm3, %%eax \n\t"
- "psrlq $32, %%mm3 \n\t"
- "movd %%mm3, %%edx \n\t"
- "addl %%edx, %%eax \n\t"
- "emms \n\t"
- : "=a" (ret)
- : "S" (a), "a" (s)
- : "%ecx", "%edx"
- );
- return 4*ret;
-}
-#endif
-#endif
-
-#define ABS(a) (((a)^((a)>>31))-((a)>>31))
-
-static int diff_y(unsigned char *a, unsigned char *b, int s)
-{
- int i, j, diff=0;
- for (i=4; i; i--) {
- for (j=0; j<8; j++) diff += ABS(a[j]-b[j]);
- a+=s; b+=s;
- }
- return diff;
-}
-
-static int licomb_y(unsigned char *a, unsigned char *b, int s)
-{
- int i, j, diff=0;
- for (i=4; i; i--) {
- for (j=0; j<8; j++)
- diff += ABS((a[j]<<1) - b[j-s] - b[j])
- + ABS((b[j]<<1) - a[j] - a[j+s]);
- a+=s; b+=s;
- }
- return diff;
-}
-
-#if 0
-static int qpcomb_y(unsigned char *a, unsigned char *b, int s)
-{
- int i, j, diff=0;
- for (i=4; i; i--) {
- for (j=0; j<8; j++)
- diff += ABS(a[j] - 3*b[j-s] + 3*a[j+s] - b[j]);
- a+=s; b+=s;
- }
- return diff;
-}
-
-static int licomb_y_test(unsigned char *a, unsigned char *b, int s)
-{
- int c = licomb_y(a,b,s);
- int m = licomb_y_mmx(a,b,s);
- if (c != m) printf("%d != %d\n", c, m);
- return m;
-}
-#endif
-
-static int var_y(unsigned char *a, unsigned char *b, int s)
-{
- int i, j, var=0;
- for (i=3; i; i--) {
- for (j=0; j<8; j++) {
- var += ABS(a[j]-a[j+s]);
- }
- a+=s; b+=s;
- }
- return 4*var; /* match comb scaling */
-}
-
-
-
-
-
-
-
-
-
-static void alloc_buffer(struct pullup_context *c, struct pullup_buffer *b)
-{
- int i;
- if (b->planes) return;
- b->planes = calloc(c->nplanes, sizeof(unsigned char *));
- for (i = 0; i < c->nplanes; i++) {
- b->planes[i] = malloc(c->h[i]*c->stride[i]);
- /* Deal with idiotic 128=0 for chroma: */
- memset(b->planes[i], c->background[i], c->h[i]*c->stride[i]);
- }
-}
-
-struct pullup_buffer *ff_pullup_lock_buffer(struct pullup_buffer *b, int parity)
-{
- if (!b) return 0;
- if ((parity+1) & 1) b->lock[0]++;
- if ((parity+1) & 2) b->lock[1]++;
- return b;
-}
-
-void ff_pullup_release_buffer(struct pullup_buffer *b, int parity)
-{
- if (!b) return;
- if ((parity+1) & 1) b->lock[0]--;
- if ((parity+1) & 2) b->lock[1]--;
-}
-
-struct pullup_buffer *ff_pullup_get_buffer(struct pullup_context *c, int parity)
-{
- int i;
-
- /* Try first to get the sister buffer for the previous field */
- if (parity < 2 && c->last && parity != c->last->parity
- && !c->last->buffer->lock[parity]) {
- alloc_buffer(c, c->last->buffer);
- return ff_pullup_lock_buffer(c->last->buffer, parity);
- }
-
- /* Prefer a buffer with both fields open */
- for (i = 0; i < c->nbuffers; i++) {
- if (c->buffers[i].lock[0]) continue;
- if (c->buffers[i].lock[1]) continue;
- alloc_buffer(c, &c->buffers[i]);
- return ff_pullup_lock_buffer(&c->buffers[i], parity);
- }
-
- if (parity == 2) return 0;
-
- /* Search for any half-free buffer */
- for (i = 0; i < c->nbuffers; i++) {
- if (((parity+1) & 1) && c->buffers[i].lock[0]) continue;
- if (((parity+1) & 2) && c->buffers[i].lock[1]) continue;
- alloc_buffer(c, &c->buffers[i]);
- return ff_pullup_lock_buffer(&c->buffers[i], parity);
- }
-
- return 0;
-}
-
-
-
-
-
-
-static void compute_metric(struct pullup_context *c,
- struct pullup_field *fa, int pa,
- struct pullup_field *fb, int pb,
- int (*func)(unsigned char *, unsigned char *, int), int *dest)
-{
- unsigned char *a, *b;
- int x, y;
- int mp = c->metric_plane;
- int xstep = c->bpp[mp];
- int ystep = c->stride[mp]<<3;
- int s = c->stride[mp]<<1; /* field stride */
- int w = c->metric_w*xstep;
-
- if (!fa->buffer || !fb->buffer) return;
-
- /* Shortcut for duplicate fields (e.g. from RFF flag) */
- if (fa->buffer == fb->buffer && pa == pb) {
- memset(dest, 0, c->metric_len * sizeof(int));
- return;
- }
-
- a = fa->buffer->planes[mp] + pa * c->stride[mp] + c->metric_offset;
- b = fb->buffer->planes[mp] + pb * c->stride[mp] + c->metric_offset;
-
- for (y = c->metric_h; y; y--) {
- for (x = 0; x < w; x += xstep) {
- *dest++ = func(a + x, b + x, s);
- }
- a += ystep; b += ystep;
- }
-}
-
-
-
-
-
-static void alloc_metrics(struct pullup_context *c, struct pullup_field *f)
-{
- f->diffs = calloc(c->metric_len, sizeof(int));
- f->comb = calloc(c->metric_len, sizeof(int));
- f->var = calloc(c->metric_len, sizeof(int));
- /* add more metrics here as needed */
-}
-
-static struct pullup_field *make_field_queue(struct pullup_context *c, int len)
-{
- struct pullup_field *head, *f;
- f = head = calloc(1, sizeof(struct pullup_field));
- alloc_metrics(c, f);
- for (; len > 0; len--) {
- f->next = calloc(1, sizeof(struct pullup_field));
- f->next->prev = f;
- f = f->next;
- alloc_metrics(c, f);
- }
- f->next = head;
- head->prev = f;
- return head;
-}
-
-static void check_field_queue(struct pullup_context *c)
-{
- if (c->head->next == c->first) {
- struct pullup_field *f = calloc(1, sizeof(struct pullup_field));
- alloc_metrics(c, f);
- f->prev = c->head;
- f->next = c->first;
- c->head->next = f;
- c->first->prev = f;
- }
-}
-
-void ff_pullup_submit_field(struct pullup_context *c, struct pullup_buffer *b, int parity)
-{
- struct pullup_field *f;
-
- /* Grow the circular list if needed */
- check_field_queue(c);
-
- /* Cannot have two fields of same parity in a row; drop the new one */
- if (c->last && c->last->parity == parity) return;
-
- f = c->head;
- f->parity = parity;
- f->buffer = ff_pullup_lock_buffer(b, parity);
- f->flags = 0;
- f->breaks = 0;
- f->affinity = 0;
-
- compute_metric(c, f, parity, f->prev->prev, parity, c->diff, f->diffs);
- compute_metric(c, parity?f->prev:f, 0, parity?f:f->prev, 1, c->comb, f->comb);
- compute_metric(c, f, parity, f, -1, c->var, f->var);
-
- /* Advance the circular list */
- if (!c->first) c->first = c->head;
- c->last = c->head;
- c->head = c->head->next;
-}
-
-void ff_pullup_flush_fields(struct pullup_context *c)
-{
- struct pullup_field *f;
-
- for (f = c->first; f && f != c->head; f = f->next) {
- ff_pullup_release_buffer(f->buffer, f->parity);
- f->buffer = 0;
- }
- c->first = c->last = 0;
-}
-
-
-
-
-
-
-
-
-#define F_HAVE_BREAKS 1
-#define F_HAVE_AFFINITY 2
-
-
-#define BREAK_LEFT 1
-#define BREAK_RIGHT 2
-
-
-
-
-static int queue_length(struct pullup_field *begin, struct pullup_field *end)
-{
- int count = 1;
- struct pullup_field *f;
-
- if (!begin || !end) return 0;
- for (f = begin; f != end; f = f->next) count++;
- return count;
-}
-
-static int find_first_break(struct pullup_field *f, int max)
-{
- int i;
- for (i = 0; i < max; i++) {
- if (f->breaks & BREAK_RIGHT || f->next->breaks & BREAK_LEFT)
- return i+1;
- f = f->next;
- }
- return 0;
-}
-
-static void compute_breaks(struct pullup_context *c, struct pullup_field *f0)
-{
- int i;
- struct pullup_field *f1 = f0->next;
- struct pullup_field *f2 = f1->next;
- struct pullup_field *f3 = f2->next;
- int l, max_l=0, max_r=0;
- //struct pullup_field *ff;
- //for (i=0, ff=c->first; ff != f0; i++, ff=ff->next);
-
- if (f0->flags & F_HAVE_BREAKS) return;
- //printf("\n%d: ", i);
- f0->flags |= F_HAVE_BREAKS;
-
- /* Special case when fields are 100% identical */
- if (f0->buffer == f2->buffer && f1->buffer != f3->buffer) {
- f2->breaks |= BREAK_RIGHT;
- return;
- }
- if (f0->buffer != f2->buffer && f1->buffer == f3->buffer) {
- f1->breaks |= BREAK_LEFT;
- return;
- }
-
- for (i = 0; i < c->metric_len; i++) {
- l = f2->diffs[i] - f3->diffs[i];
- if (l > max_l) max_l = l;
- if (-l > max_r) max_r = -l;
- }
- /* Don't get tripped up when differences are mostly quant error */
- //printf("%d %d\n", max_l, max_r);
- if (max_l + max_r < 128) return;
- if (max_l > 4*max_r) f1->breaks |= BREAK_LEFT;
- if (max_r > 4*max_l) f2->breaks |= BREAK_RIGHT;
-}
-
-static void compute_affinity(struct pullup_context *c, struct pullup_field *f)
-{
- int i;
- int max_l=0, max_r=0, l;
- if (f->flags & F_HAVE_AFFINITY) return;
- f->flags |= F_HAVE_AFFINITY;
- if (f->buffer == f->next->next->buffer) {
- f->affinity = 1;
- f->next->affinity = 0;
- f->next->next->affinity = -1;
- f->next->flags |= F_HAVE_AFFINITY;
- f->next->next->flags |= F_HAVE_AFFINITY;
- return;
- }
- if (1) {
- for (i = 0; i < c->metric_len; i++) {
- int lv = f->prev->var[i];
- int rv = f->next->var[i];
- int v = f->var[i];
- int lc = f->comb[i] - (v+lv) + ABS(v-lv);
- int rc = f->next->comb[i] - (v+rv) + ABS(v-rv);
- lc = lc>0 ? lc : 0;
- rc = rc>0 ? rc : 0;
- l = lc - rc;
- if (l > max_l) max_l = l;
- if (-l > max_r) max_r = -l;
- }
- if (max_l + max_r < 64) return;
- if (max_r > 6*max_l) f->affinity = -1;
- else if (max_l > 6*max_r) f->affinity = 1;
- } else {
- for (i = 0; i < c->metric_len; i++) {
- l = f->comb[i] - f->next->comb[i];
- if (l > max_l) max_l = l;
- if (-l > max_r) max_r = -l;
- }
- if (max_l + max_r < 64) return;
- if (max_r > 2*max_l) f->affinity = -1;
- else if (max_l > 2*max_r) f->affinity = 1;
- }
-}
-
-static void foo(struct pullup_context *c)
-{
- struct pullup_field *f = c->first;
- int i, n = queue_length(f, c->last);
- for (i = 0; i < n-1; i++) {
- if (i < n-3) compute_breaks(c, f);
- compute_affinity(c, f);
- f = f->next;
- }
-}
-
-static int decide_frame_length(struct pullup_context *c)
-{
- struct pullup_field *f0 = c->first;
- struct pullup_field *f1 = f0->next;
- struct pullup_field *f2 = f1->next;
- int l;
-
- if (queue_length(c->first, c->last) < 4) return 0;
- foo(c);
-
- if (f0->affinity == -1) return 1;
-
- l = find_first_break(f0, 3);
- if (l == 1 && c->strict_breaks < 0) l = 0;
-
- switch (l) {
- case 1:
- if (c->strict_breaks < 1 && f0->affinity == 1 && f1->affinity == -1)
- return 2;
- else return 1;
- case 2:
- /* FIXME: strictly speaking, f0->prev is no longer valid... :) */
- if (c->strict_pairs
- && (f0->prev->breaks & BREAK_RIGHT) && (f2->breaks & BREAK_LEFT)
- && (f0->affinity != 1 || f1->affinity != -1) )
- return 1;
- if (f1->affinity == 1) return 1;
- else return 2;
- case 3:
- if (f2->affinity == 1) return 2;
- else return 3;
- default:
- /* 9 possibilities covered before switch */
- if (f1->affinity == 1) return 1; /* covers 6 */
- else if (f1->affinity == -1) return 2; /* covers 6 */
- else if (f2->affinity == -1) { /* covers 2 */
- if (f0->affinity == 1) return 3;
- else return 1;
- }
- else return 2; /* the remaining 6 */
- }
-}
-
-
-static void print_aff_and_breaks(struct pullup_context *c, struct pullup_field *f)
-{
- int i;
- struct pullup_field *f0 = f;
- const char aff_l[] = "+..", aff_r[] = "..+";
- printf("\naffinity: ");
- for (i = 0; i < 4; i++) {
- printf("%c%d%c", aff_l[1+f->affinity], i, aff_r[1+f->affinity]);
- f = f->next;
- }
- f = f0;
- printf("\nbreaks: ");
- for (i=0; i<4; i++) {
- printf("%c%d%c", f->breaks & BREAK_LEFT ? '|' : '.', i, f->breaks & BREAK_RIGHT ? '|' : '.');
- f = f->next;
- }
- printf("\n");
-}
-
-
-
-
-
-struct pullup_frame *ff_pullup_get_frame(struct pullup_context *c)
-{
- int i;
- struct pullup_frame *fr = c->frame;
- int n = decide_frame_length(c);
- int aff = c->first->next->affinity;
-
- if (!n) return 0;
- if (fr->lock) return 0;
-
- if (c->verbose) {
- print_aff_and_breaks(c, c->first);
- printf("duration: %d \n", n);
- }
-
- fr->lock++;
- fr->length = n;
- fr->parity = c->first->parity;
- fr->buffer = 0;
- for (i = 0; i < n; i++) {
- /* We cheat and steal the buffer without release+relock */
- fr->ifields[i] = c->first->buffer;
- c->first->buffer = 0;
- c->first = c->first->next;
- }
-
- if (n == 1) {
- fr->ofields[fr->parity] = fr->ifields[0];
- fr->ofields[fr->parity^1] = 0;
- } else if (n == 2) {
- fr->ofields[fr->parity] = fr->ifields[0];
- fr->ofields[fr->parity^1] = fr->ifields[1];
- } else if (n == 3) {
- if (aff == 0)
- aff = (fr->ifields[0] == fr->ifields[1]) ? -1 : 1;
- /* else if (c->verbose) printf("forced aff: %d \n", aff); */
- fr->ofields[fr->parity] = fr->ifields[1+aff];
- fr->ofields[fr->parity^1] = fr->ifields[1];
- }
- ff_pullup_lock_buffer(fr->ofields[0], 0);
- ff_pullup_lock_buffer(fr->ofields[1], 1);
-
- if (fr->ofields[0] == fr->ofields[1]) {
- fr->buffer = fr->ofields[0];
- ff_pullup_lock_buffer(fr->buffer, 2);
- return fr;
- }
- return fr;
-}
-
-static void copy_field(struct pullup_context *c, struct pullup_buffer *dest,
- struct pullup_buffer *src, int parity)
-{
- int i, j;
- unsigned char *d, *s;
- for (i = 0; i < c->nplanes; i++) {
- s = src->planes[i] + parity*c->stride[i];
- d = dest->planes[i] + parity*c->stride[i];
- for (j = c->h[i]>>1; j; j--) {
- memcpy(d, s, c->stride[i]);
- s += c->stride[i]<<1;
- d += c->stride[i]<<1;
- }
- }
-}
-
-void ff_pullup_pack_frame(struct pullup_context *c, struct pullup_frame *fr)
-{
- int i;
- if (fr->buffer) return;
- if (fr->length < 2) return; /* FIXME: deal with this */
- for (i = 0; i < 2; i++)
- {
- if (fr->ofields[i]->lock[i^1]) continue;
- fr->buffer = fr->ofields[i];
- ff_pullup_lock_buffer(fr->buffer, 2);
- copy_field(c, fr->buffer, fr->ofields[i^1], i^1);
- return;
- }
- fr->buffer = ff_pullup_get_buffer(c, 2);
- copy_field(c, fr->buffer, fr->ofields[0], 0);
- copy_field(c, fr->buffer, fr->ofields[1], 1);
-}
-
-void ff_pullup_release_frame(struct pullup_frame *fr)
-{
- int i;
- for (i = 0; i < fr->length; i++)
- ff_pullup_release_buffer(fr->ifields[i], fr->parity ^ (i&1));
- ff_pullup_release_buffer(fr->ofields[0], 0);
- ff_pullup_release_buffer(fr->ofields[1], 1);
- if (fr->buffer) ff_pullup_release_buffer(fr->buffer, 2);
- fr->lock--;
-}
-
-
-
-
-
-
-struct pullup_context *ff_pullup_alloc_context(void)
-{
- struct pullup_context *c;
-
- c = calloc(1, sizeof(struct pullup_context));
-
- return c;
-}
-
-void ff_pullup_preinit_context(struct pullup_context *c)
-{
- c->bpp = calloc(c->nplanes, sizeof(int));
- c->w = calloc(c->nplanes, sizeof(int));
- c->h = calloc(c->nplanes, sizeof(int));
- c->stride = calloc(c->nplanes, sizeof(int));
- c->background = calloc(c->nplanes, sizeof(int));
-}
-
-void ff_pullup_init_context(struct pullup_context *c)
-{
- int mp = c->metric_plane;
- if (c->nbuffers < 10) c->nbuffers = 10;
- c->buffers = calloc(c->nbuffers, sizeof (struct pullup_buffer));
-
- c->metric_w = (c->w[mp] - ((c->junk_left + c->junk_right) << 3)) >> 3;
- c->metric_h = (c->h[mp] - ((c->junk_top + c->junk_bottom) << 1)) >> 3;
- c->metric_offset = c->junk_left*c->bpp[mp] + (c->junk_top<<1)*c->stride[mp];
- c->metric_len = c->metric_w * c->metric_h;
-
- c->head = make_field_queue(c, 8);
-
- c->frame = calloc(1, sizeof (struct pullup_frame));
- c->frame->ifields = calloc(3, sizeof (struct pullup_buffer *));
-
- switch(c->format) {
- case PULLUP_FMT_Y:
- c->diff = diff_y;
- c->comb = licomb_y;
- c->var = var_y;
-#if ARCH_X86
-#if HAVE_MMX
- if (c->cpu & PULLUP_CPU_MMX) {
- c->diff = diff_y_mmx;
- c->comb = licomb_y_mmx;
- c->var = var_y_mmx;
- }
-#endif
-#endif
- /* c->comb = qpcomb_y; */
- break;
-#if 0
- case PULLUP_FMT_YUY2:
- c->diff = diff_yuy2;
- break;
- case PULLUP_FMT_RGB32:
- c->diff = diff_rgb32;
- break;
-#endif
- }
-}
-
-void ff_pullup_free_context(struct pullup_context *c)
-{
- struct pullup_field *f;
- free(c->buffers);
- f = c->head;
- do {
- if (!f) break;
- free(f->diffs);
- free(f->comb);
- f = f->next;
- free(f->prev);
- } while (f != c->head);
- free(c->frame);
- free(c);
-}
diff --git a/ffmpeg/libavfilter/libmpcodecs/pullup.h b/ffmpeg/libavfilter/libmpcodecs/pullup.h
deleted file mode 100644
index cd6ec00..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/pullup.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef MPLAYER_PULLUP_H
-#define MPLAYER_PULLUP_H
-
-#define PULLUP_CPU_MMX 1
-#define PULLUP_CPU_MMX2 2
-#define PULLUP_CPU_3DNOW 4
-#define PULLUP_CPU_3DNOWEXT 8
-#define PULLUP_CPU_SSE 16
-#define PULLUP_CPU_SSE2 32
-
-#define PULLUP_FMT_Y 1
-#define PULLUP_FMT_YUY2 2
-#define PULLUP_FMT_UYVY 3
-#define PULLUP_FMT_RGB32 4
-
-struct pullup_buffer
-{
- int lock[2];
- unsigned char **planes;
-};
-
-struct pullup_field
-{
- int parity;
- struct pullup_buffer *buffer;
- unsigned int flags;
- int breaks;
- int affinity;
- int *diffs;
- int *comb;
- int *var;
- struct pullup_field *prev, *next;
-};
-
-struct pullup_frame
-{
- int lock;
- int length;
- int parity;
- struct pullup_buffer **ifields, *ofields[2];
- struct pullup_buffer *buffer;
-};
-
-struct pullup_context
-{
- /* Public interface */
- int format;
- int nplanes;
- int *bpp, *w, *h, *stride, *background;
- unsigned int cpu;
- int junk_left, junk_right, junk_top, junk_bottom;
- int verbose;
- int metric_plane;
- int strict_breaks;
- int strict_pairs;
- /* Internal data */
- struct pullup_field *first, *last, *head;
- struct pullup_buffer *buffers;
- int nbuffers;
- int (*diff)(unsigned char *, unsigned char *, int);
- int (*comb)(unsigned char *, unsigned char *, int);
- int (*var)(unsigned char *, unsigned char *, int);
- int metric_w, metric_h, metric_len, metric_offset;
- struct pullup_frame *frame;
-};
-
-
-struct pullup_buffer *ff_pullup_lock_buffer(struct pullup_buffer *b, int parity);
-void ff_pullup_release_buffer(struct pullup_buffer *b, int parity);
-struct pullup_buffer *ff_pullup_get_buffer(struct pullup_context *c, int parity);
-
-void ff_pullup_submit_field(struct pullup_context *c, struct pullup_buffer *b, int parity);
-void ff_pullup_flush_fields(struct pullup_context *c);
-
-struct pullup_frame *ff_pullup_get_frame(struct pullup_context *c);
-void ff_pullup_pack_frame(struct pullup_context *c, struct pullup_frame *fr);
-void ff_pullup_release_frame(struct pullup_frame *fr);
-
-struct pullup_context *ff_pullup_alloc_context(void);
-void ff_pullup_preinit_context(struct pullup_context *c);
-void ff_pullup_init_context(struct pullup_context *c);
-void ff_pullup_free_context(struct pullup_context *c);
-
-#endif /* MPLAYER_PULLUP_H */
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_detc.c b/ffmpeg/libavfilter/libmpcodecs/vf_detc.c
deleted file mode 100644
index 751e2b8..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_detc.c
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-struct metrics {
- int even;
- int odd;
- int noise;
- int temp;
-};
-
-struct vf_priv_s {
- int frame;
- int drop, lastdrop;
- struct metrics pm;
- int thres[5];
- int inframes, outframes;
- int mode;
- int (*analyze)(struct vf_priv_s *, mp_image_t *, mp_image_t *);
- int needread;
-};
-
-#define COMPE(a,b,e) (abs((a)-(b)) < (((a)+(b))>>(e)))
-#define COMPARABLE(a,b) COMPE((a),(b),2)
-#define VERYCLOSE(a,b) COMPE((a),(b),3)
-
-#define OUTER_TC_NBHD(s) ( \
- COMPARABLE((s)[-1].m.even,(s)[-1].m.odd) && \
- COMPARABLE((s)[1].m.even,(s)[0].m.odd) && \
- COMPARABLE((s)[2].m.even,(s)[1].m.odd) && \
- COMPARABLE((s)[-1].m.noise,(s)[0].m.temp) && \
- COMPARABLE((s)[2].m.noise,(s)[2].m.temp) )
-
-#define INNER_TC_NBHD(s,l,h) ( \
- COMPARABLE((s)[0].m.even,(l)) && \
- COMPARABLE((s)[2].m.odd,(l)) && ( \
- COMPARABLE((s)[0].m.noise,(h)) || \
- COMPARABLE((s)[1].m.noise,(h)) ) )
-
-enum {
- TC_DROP,
- TC_PROG,
- TC_IL1,
- TC_IL2
-};
-
-static void block_diffs(struct metrics *m, unsigned char *old, unsigned char *new, int os, int ns)
-{
- int x, y, even=0, odd=0, noise, temp;
- unsigned char *oldp, *newp;
- m->noise = m->temp = 0;
- for (x = 8; x; x--) {
- oldp = old++;
- newp = new++;
- noise = temp = 0;
- for (y = 4; y; y--) {
- even += abs(newp[0]-oldp[0]);
- odd += abs(newp[ns]-oldp[os]);
- noise += newp[ns]-newp[0];
- temp += oldp[os]-newp[0];
- oldp += os<<1;
- newp += ns<<1;
- }
- m->noise += abs(noise);
- m->temp += abs(temp);
- }
- m->even = even;
- m->odd = odd;
-}
-
-static void diff_planes(struct metrics *m, unsigned char *old, unsigned char *new, int w, int h, int os, int ns)
-{
- int x, y, me=0, mo=0, mn=0, mt=0;
- struct metrics l;
- for (y = 0; y < h-7; y += 8) {
- for (x = 0; x < w-7; x += 8) {
- block_diffs(&l, old+x+y*os, new+x+y*ns, os, ns);
- if (l.even > me) me = l.even;
- if (l.odd > mo) mo = l.odd;
- if (l.noise > mn) mn = l.noise;
- if (l.temp > mt) mt = l.temp;
- }
- }
- m->even = me;
- m->odd = mo;
- m->noise = mn;
- m->temp = mt;
-}
-
-static void diff_fields(struct metrics *metr, mp_image_t *old, mp_image_t *new)
-{
- struct metrics m, mu, mv;
- diff_planes(&m, old->planes[0], new->planes[0],
- new->w, new->h, old->stride[0], new->stride[0]);
- if (new->flags & MP_IMGFLAG_PLANAR) {
- diff_planes(&mu, old->planes[1], new->planes[1],
- new->chroma_width, new->chroma_height,
- old->stride[1], new->stride[1]);
- diff_planes(&mv, old->planes[2], new->planes[2],
- new->chroma_width, new->chroma_height,
- old->stride[2], new->stride[2]);
- if (mu.even > m.even) m.even = mu.even;
- if (mu.odd > m.odd) m.odd = mu.odd;
- if (mu.noise > m.noise) m.noise = mu.noise;
- if (mu.temp > m.temp) m.temp = mu.temp;
- if (mv.even > m.even) m.even = mv.even;
- if (mv.odd > m.odd) m.odd = mv.odd;
- if (mv.noise > m.noise) m.noise = mv.noise;
- if (mv.temp > m.temp) m.temp = mv.temp;
- }
- *metr = m;
-}
-
-static void status(int f, struct metrics *m)
-{
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "frame %d: e=%d o=%d n=%d t=%d\n",
- f, m->even, m->odd, m->noise, m->temp);
-}
-
-static int analyze_fixed_pattern(struct vf_priv_s *p, mp_image_t *new, mp_image_t *old)
-{
- if (p->frame >= 0) p->frame = (p->frame+1)%5;
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "frame %d\n", p->frame);
- switch (p->frame) {
- case -1: case 0: case 1: case 2:
- return TC_PROG;
- case 3:
- return TC_IL1;
- case 4:
- return TC_IL2;
- }
- return 0;
-}
-
-static int analyze_aggressive(struct vf_priv_s *p, mp_image_t *new, mp_image_t *old)
-{
- struct metrics m, pm;
-
- if (p->frame >= 0) p->frame = (p->frame+1)%5;
-
- diff_fields(&m, old, new);
-
- status(p->frame, &m);
-
- pm = p->pm;
- p->pm = m;
-
- if (p->frame == 4) {
- /* We need to break at scene changes, but is this a valid test? */
- if ((m.even > p->thres[2]) && (m.odd > p->thres[2]) && (m.temp > p->thres[3])
- && (m.temp > 5*pm.temp) && (m.temp*2 > m.noise)) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "scene change breaking telecine!\n");
- p->frame = -1;
- return TC_DROP;
- }
- /* Thres. is to compensate for quantization errors when noise is low */
- if (m.noise - m.temp > -p->thres[4]) {
- if (COMPARABLE(m.even, pm.odd)) {
- //ff_mp_msg(MSGT_VFILTER, MSGL_V, "confirmed field match!\n");
- return TC_IL2;
- } else if ((m.even < p->thres[0]) && (m.odd < p->thres[0]) && VERYCLOSE(m.even, m.odd)
- && VERYCLOSE(m.noise,m.temp) && VERYCLOSE(m.noise,pm.noise)) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "interlaced frame appears in duplicate!!!\n");
- p->pm = pm; /* hack :) */
- p->frame = 3;
- return TC_IL1;
- }
- } else {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "mismatched telecine fields!\n");
- p->frame = -1;
- }
- }
-
- if (2*m.even*m.temp < m.odd*m.noise) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "caught telecine sync!\n");
- p->frame = 3;
- return TC_IL1;
- }
-
- if (p->frame < 3) {
- if (m.noise > p->thres[3]) {
- if (m.noise > 2*m.temp) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "merging fields out of sequence!\n");
- return TC_IL2;
- }
- if ((m.noise > 2*pm.noise) && (m.even > p->thres[2]) && (m.odd > p->thres[2])) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "dropping horrible interlaced frame!\n");
- return TC_DROP;
- }
- }
- }
-
- switch (p->frame) {
- case -1:
- if (4*m.noise > 5*m.temp) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "merging fields out of sequence!\n");
- return TC_IL2;
- }
- case 0:
- case 1:
- case 2:
- return TC_PROG;
- case 3:
- if ((m.even > p->thres[1]) && (m.even > m.odd) && (m.temp > m.noise)) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "lost telecine tracking!\n");
- p->frame = -1;
- return TC_PROG;
- }
- return TC_IL1;
- case 4:
- return TC_IL2;
- }
- return 0;
-}
-
-static void copy_image(mp_image_t *dmpi, mp_image_t *mpi, int field)
-{
- switch (field) {
- case 0:
- my_memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- break;
- case 1:
- my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
- mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
- mpi->planes[1]+mpi->stride[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
- mpi->planes[2]+mpi->stride[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- break;
- case 2:
- memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h,
- dmpi->stride[0], mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[1], mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[2], mpi->stride[2]);
- }
- break;
- }
-}
-
-static int do_put_image(struct vf_instance *vf, mp_image_t *dmpi)
-{
- struct vf_priv_s *p = vf->priv;
- int dropflag;
-
- switch (p->drop) {
- default:
- dropflag = 0;
- break;
- case 1:
- dropflag = (++p->lastdrop >= 5);
- break;
- case 2:
- dropflag = (++p->lastdrop >= 5) && (4*p->inframes <= 5*p->outframes);
- break;
- }
-
- if (dropflag) {
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "drop! [%d/%d=%g]\n",
- p->outframes, p->inframes, (float)p->outframes/p->inframes);
- p->lastdrop = 0;
- return 0;
- }
-
- p->outframes++;
- return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- int ret=0;
- mp_image_t *dmpi;
- struct vf_priv_s *p = vf->priv;
-
- p->inframes++;
-
- if (p->needread) dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE | MP_IMGFLAG_READABLE,
- mpi->width, mpi->height);
- /* FIXME: is there a good way to get rid of static type? */
- else dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE, mpi->width, mpi->height);
-
- switch (p->analyze(p, mpi, dmpi)) {
- case TC_DROP:
- /* Don't copy anything unless we'll need to read it. */
- if (p->needread) copy_image(dmpi, mpi, 2);
- p->lastdrop = 0;
- break;
- case TC_PROG:
- /* Copy and display the whole frame. */
- copy_image(dmpi, mpi, 2);
- ret = do_put_image(vf, dmpi);
- break;
- case TC_IL1:
- /* Only copy bottom field unless we need to read. */
- if (p->needread) copy_image(dmpi, mpi, 2);
- else copy_image(dmpi, mpi, 1);
- p->lastdrop = 0;
- break;
- case TC_IL2:
- /* Copy top field and show frame, then copy bottom if needed. */
- copy_image(dmpi, mpi, 0);
- ret = do_put_image(vf, dmpi);
- if (p->needread) copy_image(dmpi, mpi, 1);
- break;
- }
- return ret;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- /* FIXME - figure out which other formats work */
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void uninit(struct vf_instance *vf)
-{
- free(vf->priv);
-}
-
-static struct {
- const char *name;
- int (*func)(struct vf_priv_s *p, mp_image_t *new, mp_image_t *old);
- int needread;
-} anal_funcs[] = {
- { "fixed", analyze_fixed_pattern, 0 },
- { "aggressive", analyze_aggressive, 1 },
- { NULL, NULL, 0 }
-};
-
-#define STARTVARS if (0)
-#define GETVAR(str, name, out, func) \
- else if (!strncmp((str), name "=", sizeof(name))) \
- (out) = (func)((str) + sizeof(name))
-
-static void parse_var(struct vf_priv_s *p, char *var)
-{
- STARTVARS;
- GETVAR(var, "dr", p->drop, atoi);
- GETVAR(var, "t0", p->thres[0], atoi);
- GETVAR(var, "t1", p->thres[1], atoi);
- GETVAR(var, "t2", p->thres[2], atoi);
- GETVAR(var, "t3", p->thres[3], atoi);
- GETVAR(var, "t4", p->thres[4], atoi);
- GETVAR(var, "fr", p->frame, atoi);
- GETVAR(var, "am", p->mode, atoi);
-}
-
-static void parse_args(struct vf_priv_s *p, char *args)
-{
- char *next, *orig;
- for (args=orig=strdup(args); args; args=next) {
- next = strchr(args, ':');
- if (next) *next++ = 0;
- parse_var(p, args);
- }
- free(orig);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- struct vf_priv_s *p;
- vf->config = config;
- vf->put_image = put_image;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
- vf->priv = p = calloc(1, sizeof(struct vf_priv_s));
- p->frame = -1;
- p->thres[0] = 440;
- p->thres[1] = 720;
- p->thres[2] = 2500;
- p->thres[3] = 2500;
- p->thres[4] = 800;
- p->drop = 0;
- p->mode = 1;
- if (args) parse_args(p, args);
- p->analyze = anal_funcs[p->mode].func;
- p->needread = anal_funcs[p->mode].needread;
- return 1;
-}
-
-const vf_info_t ff_vf_info_detc = {
- "de-telecine filter",
- "detc",
- "Rich Felker",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_dint.c b/ffmpeg/libavfilter/libmpcodecs/vf_dint.c
deleted file mode 100644
index 950e835..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_dint.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#include "mp_image.h"
-#include "img_format.h"
-#include "vf.h"
-
-struct vf_priv_s {
- float sense; // first parameter
- float level; // second parameter
- unsigned int imgfmt;
- int diff;
- uint32_t max;
-// int dfr;
-// int rdfr;
- int was_dint;
- mp_image_t *pmpi; // previous mpi
-};
-
-#define MAXROWSIZE 1200
-
-static int config (struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- int rowsize;
-
- vf->priv->pmpi = ff_vf_get_image (vf->next, outfmt, MP_IMGTYPE_TEMP,
- 0, width, height);
- if (!(vf->priv->pmpi->flags & MP_IMGFLAG_PLANAR) &&
- outfmt != IMGFMT_RGB32 && outfmt != IMGFMT_BGR32 &&
- outfmt != IMGFMT_RGB24 && outfmt != IMGFMT_BGR24 &&
- outfmt != IMGFMT_RGB16 && outfmt != IMGFMT_BGR16)
- {
- ff_mp_msg (MSGT_VFILTER, MSGL_WARN, "Drop-interlaced filter doesn't support this outfmt :(\n");
- return 0;
- }
- vf->priv->imgfmt = outfmt;
- // recalculate internal values
- rowsize = vf->priv->pmpi->width;
- if (rowsize > MAXROWSIZE) rowsize = MAXROWSIZE;
- vf->priv->max = vf->priv->level * vf->priv->pmpi->height * rowsize / 2;
- if (vf->priv->pmpi->flags & MP_IMGFLAG_PLANAR) // planar YUV
- vf->priv->diff = vf->priv->sense * 256;
- else
- vf->priv->diff = vf->priv->sense * (1 << (vf->priv->pmpi->bpp/3));
- if (vf->priv->diff < 0) vf->priv->diff = 0;
- if (!(vf->priv->pmpi->flags & MP_IMGFLAG_PLANAR) &&
- vf->priv->pmpi->bpp < 24 && vf->priv->diff > 31)
- vf->priv->diff = 31;
- ff_mp_msg (MSGT_VFILTER, MSGL_INFO, "Drop-interlaced: %dx%d diff %d / level %u\n",
- vf->priv->pmpi->width, vf->priv->pmpi->height,
- vf->priv->diff, (unsigned int)vf->priv->max);
-// vf->priv->rdfr = vf->priv->dfr = 0;
- vf->priv->was_dint = 0;
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static int put_image (struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- int8_t rrow0[MAXROWSIZE];
- int8_t rrow1[MAXROWSIZE];
- int8_t rrow2[MAXROWSIZE];
- int8_t *row0 = rrow0, *row1 = rrow1, *row2 = rrow2/*, *row3 = rrow3*/;
- int rowsize = mpi->width;
- uint32_t nok = 0, max = vf->priv->max;
- int diff = vf->priv->diff;
- int i, j;
- register int n1, n2;
- unsigned char *cur0, *prv0;
- register unsigned char *cur, *prv;
-
- if (rowsize > MAXROWSIZE) rowsize = MAXROWSIZE;
- // check if nothing to do
- if (mpi->imgfmt == vf->priv->imgfmt)
- {
- cur0 = mpi->planes[0] + mpi->stride[0];
- prv0 = mpi->planes[0];
- for (j = 1; j < mpi->height && nok <= max; j++)
- {
- cur = cur0;
- prv = prv0;
- // analyse row (row0)
- if (mpi->flags & MP_IMGFLAG_PLANAR) // planar YUV - check luminance
- for (i = 0; i < rowsize; i++)
- {
- if (cur[0] - prv[0] > diff)
- row0[i] = 1;
- else if (cur[0] - prv[0] < -diff)
- row0[i] = -1;
- else
- row0[i] = 0;
- cur++;
- prv++;
- // check if row0 is 1 but row1 is 0, and row2 is 1 or row2 is 0
- // but row3 is 1 so it's interlaced ptr (nok++)
- if (j > 2 && row0[i] > 0 && (row1[i] < 0 || (!row1[i] && row2[i] < 0)) &&
- (++nok) > max)
- break;
- }
- else if (mpi->bpp < 24) // RGB/BGR 16 - check all colors
- for (i = 0; i < rowsize; i++)
- {
- n1 = cur[0] + (cur[1]<<8);
- n2 = prv[0] + (prv[1]<<8);
- if ((n1&0x1f) - (n2&0x1f) > diff ||
- ((n1>>5)&0x3f) - ((n2>>5)&0x3f) > diff ||
- ((n1>>11)&0x1f) - ((n2>>11)&0x1f) > diff)
- row0[i] = 1;
- else if ((n1&0x1f) - (n2&0x1f) < -diff ||
- ((n1>>5)&0x3f) - ((n2>>5)&0x3f) < -diff ||
- ((n1>>11)&0x1f) - ((n2>>11)&0x1f) < -diff)
- row0[i] = -1;
- else
- row0[i] = 0;
- cur += 2;
- prv += 2;
- // check if row0 is 1 but row1 is 0, and row2 is 1 or row2 is 0
- // but row3 is 1 so it's interlaced ptr (nok++)
- if (j > 2 && row0[i] > 0 && (row1[i] < 0 || (!row1[i] && row2[i] < 0)) &&
- (++nok) > max)
- break;
- }
- else // RGB/BGR 24/32
- for (i = 0; i < rowsize; i++)
- {
- if (cur[0] - prv[0] > diff ||
- cur[1] - prv[1] > diff ||
- cur[2] - prv[2] > diff)
- row0[i] = 1;
- else if (prv[0] - cur[0] > diff ||
- prv[1] - cur[1] > diff ||
- prv[2] - cur[2] > diff)
- row0[i] = -1;
- else
- row0[i] = 0;
- cur += mpi->bpp/8;
- prv += mpi->bpp/8;
- // check if row0 is 1 but row1 is 0, and row2 is 1 or row2 is 0
- // but row3 is 1 so it's interlaced ptr (nok++)
- if (j > 2 && row0[i] > 0 && (row1[i] < 0 || (!row1[i] && row2[i] < 0)) &&
- (++nok) > max)
- break;
- }
- cur0 += mpi->stride[0];
- prv0 += mpi->stride[0];
- // rotate rows
- cur = row2;
- row2 = row1;
- row1 = row0;
- row0 = cur;
- }
- }
- // check if number of interlaced is above of max
- if (nok > max)
- {
-// vf->priv->dfr++;
- if (vf->priv->was_dint < 1) // can skip at most one frame!
- {
- vf->priv->was_dint++;
-// vf->priv->rdfr++;
-// ff_mp_msg (MSGT_VFILTER, MSGL_INFO, "DI:%d/%d ", vf->priv->rdfr, vf->priv->dfr);
- return 0;
- }
- }
- vf->priv->was_dint = 0;
-// ff_mp_msg (MSGT_VFILTER, MSGL_INFO, "DI:%d/%d ", vf->priv->rdfr, vf->priv->dfr);
- return ff_vf_next_put_image (vf, mpi, pts);
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
- vf->config = config;
- vf->put_image = put_image;
-// vf->default_reqs=VFCAP_ACCEPT_STRIDE;
- vf->priv = malloc (sizeof(struct vf_priv_s));
- vf->priv->sense = 0.1;
- vf->priv->level = 0.15;
- vf->priv->pmpi = NULL;
- if (args)
- sscanf (args, "%f:%f", &vf->priv->sense, &vf->priv->level);
- return 1;
-}
-
-const vf_info_t ff_vf_info_dint = {
- "drop interlaced frames",
- "dint",
- "A.G.",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_divtc.c b/ffmpeg/libavfilter/libmpcodecs/vf_divtc.c
deleted file mode 100644
index 61f6e35..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_divtc.c
+++ /dev/null
@@ -1,722 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <limits.h>
-#include <math.h>
-
-#include "config.h"
-#include "mp_msg.h"
-#include "cpudetect.h"
-#include "libavutil/common.h"
-#include "libavutil/x86/asm.h"
-#include "mpbswap.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-const vf_info_t ff_vf_info_divtc;
-
-struct vf_priv_s
- {
- int deghost, pass, phase, window, fcount, bcount, frameno, misscount,
- ocount, sum[5];
- double threshold;
- FILE *file;
- int8_t *bdata;
- unsigned int *csdata;
- int *history;
- };
-
-/*
- * diff_MMX and diff_C stolen from vf_decimate.c
- */
-
-#if HAVE_MMX && HAVE_EBX_AVAILABLE
-static int diff_MMX(unsigned char *old, unsigned char *new, int os, int ns)
- {
- volatile short out[4];
- __asm__ (
- "movl $8, %%ecx \n\t"
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
-
- ASMALIGN(4)
- "1: \n\t"
-
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm2 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "movq (%%"REG_D"), %%mm1 \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "psubusb %%mm1, %%mm2 \n\t"
- "psubusb %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm0, %%mm4 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm2, %%mm4 \n\t"
- "paddw %%mm3, %%mm4 \n\t"
-
- "decl %%ecx \n\t"
- "jnz 1b \n\t"
- "movq %%mm4, (%%"REG_d") \n\t"
- "emms \n\t"
- :
- : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
- : "%ecx", "memory"
- );
- return out[0]+out[1]+out[2]+out[3];
- }
-#endif
-
-static int diff_C(unsigned char *old, unsigned char *new, int os, int ns)
- {
- int x, y, d=0;
-
- for(y=8; y; y--, new+=ns, old+=os)
- for(x=8; x; x--)
- d+=abs(new[x]-old[x]);
-
- return d;
- }
-
-static int (*diff)(unsigned char *, unsigned char *, int, int);
-
-static int diff_plane(unsigned char *old, unsigned char *new,
- int w, int h, int os, int ns, int arg)
- {
- int x, y, d, max=0, sum=0, n=0;
-
- for(y=0; y<h-7; y+=8)
- {
- for(x=0; x<w-7; x+=8)
- {
- d=diff(old+x+y*os, new+x+y*ns, os, ns);
- if(d>max) max=d;
- sum+=d;
- n++;
- }
- }
-
- return (sum+n*max)/2;
- }
-
-/*
-static unsigned int checksum_plane(unsigned char *p, unsigned char *z,
- int w, int h, int s, int zs, int arg)
- {
- unsigned int shift, sum;
- unsigned char *e;
-
- for(sum=0; h; h--, p+=s-w)
- for(e=p+w, shift=32; p<e;)
- sum^=(*p++)<<(shift=(shift-8)&31);
-
- return sum;
- }
-*/
-
-static unsigned int checksum_plane(unsigned char *p, unsigned char *z,
- int w, int h, int s, int zs, int arg)
- {
- unsigned int shift;
- uint32_t sum, t;
- unsigned char *e, *e2;
-#if HAVE_FAST_64BIT
- typedef uint64_t wsum_t;
-#else
- typedef uint32_t wsum_t;
-#endif
- wsum_t wsum;
-
- for(sum=0; h; h--, p+=s-w)
- {
- for(shift=0, e=p+w; (int)p&(sizeof(wsum_t)-1) && p<e;)
- sum^=*p++<<(shift=(shift-8)&31);
-
- for(wsum=0, e2=e-sizeof(wsum_t)+1; p<e2; p+=sizeof(wsum_t))
- wsum^=*(wsum_t *)p;
-
-#if HAVE_FAST_64BIT
- t=be2me_32((uint32_t)(wsum>>32^wsum));
-#else
- t=be2me_32(wsum);
-#endif
-
- for(sum^=(t<<shift|t>>(32-shift)); p<e;)
- sum^=*p++<<(shift=(shift-8)&31);
- }
-
- return sum;
- }
-
-static int deghost_plane(unsigned char *d, unsigned char *s,
- int w, int h, int ds, int ss, int threshold)
- {
- int t;
- unsigned char *e;
-
- for(; h; h--, s+=ss-w, d+=ds-w)
- for(e=d+w; d<e; d++, s++)
- if(abs(*d-*s)>=threshold)
- *d=(t=(*d<<1)-*s)<0?0:t>255?255:t;
-
- return 0;
- }
-
-static int copyop(unsigned char *d, unsigned char *s, int bpl, int h, int dstride, int sstride, int dummy) {
- memcpy_pic(d, s, bpl, h, dstride, sstride);
- return 0;
-}
-
-static int imgop(int(*planeop)(unsigned char *, unsigned char *,
- int, int, int, int, int),
- mp_image_t *dst, mp_image_t *src, int arg)
- {
- if(dst->flags&MP_IMGFLAG_PLANAR)
- return planeop(dst->planes[0], src?src->planes[0]:0,
- dst->w, dst->h,
- dst->stride[0], src?src->stride[0]:0, arg)+
- planeop(dst->planes[1], src?src->planes[1]:0,
- dst->chroma_width, dst->chroma_height,
- dst->stride[1], src?src->stride[1]:0, arg)+
- planeop(dst->planes[2], src?src->planes[2]:0,
- dst->chroma_width, dst->chroma_height,
- dst->stride[2], src?src->stride[2]:0, arg);
-
- return planeop(dst->planes[0], src?src->planes[0]:0,
- dst->w*(dst->bpp/8), dst->h,
- dst->stride[0], src?src->stride[0]:0, arg);
- }
-
-/*
- * Find the phase in which the telecine pattern fits best to the
- * given 5 frame slice of frame difference measurements.
- *
- * If phase1 and phase2 are not negative, only the two specified
- * phases are tested.
- */
-
-static int match(struct vf_priv_s *p, int *diffs,
- int phase1, int phase2, double *strength)
- {
- static const int pattern1[]={ -4, 1, 1, 1, 1 },
- pattern2[]={ -2, -3, 4, 4, -3 }, *pattern;
- int f, m, n, t[5];
-
- pattern=p->deghost>0?pattern2:pattern1;
-
- for(f=0; f<5; f++)
- {
- if(phase1<0 || phase2<0 || f==phase1 || f==phase2)
- {
- for(n=t[f]=0; n<5; n++)
- t[f]+=diffs[n]*pattern[(n-f+5)%5];
- }
- else
- t[f]=INT_MIN;
- }
-
- /* find the best match */
- for(m=0, n=1; n<5; n++)
- if(t[n]>t[m]) m=n;
-
- if(strength)
- {
- /* the second best match */
- for(f=m?0:1, n=f+1; n<5; n++)
- if(n!=m && t[n]>t[f]) f=n;
-
- *strength=(t[m]>0?(double)(t[m]-t[f])/t[m]:0.0);
- }
-
- return m;
- }
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
- {
- mp_image_t *dmpi, *tmpi=0;
- int n, m, f, newphase;
- struct vf_priv_s *p=vf->priv;
- unsigned int checksum;
- double d;
-
- dmpi=ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE | MP_IMGFLAG_READABLE,
- mpi->width, mpi->height);
- ff_vf_clone_mpi_attributes(dmpi, mpi);
-
- newphase=p->phase;
-
- switch(p->pass)
- {
- case 1:
- fprintf(p->file, "%08x %d\n",
- (unsigned int)imgop((void *)checksum_plane, mpi, 0, 0),
- p->frameno?imgop(diff_plane, dmpi, mpi, 0):0);
- break;
-
- case 2:
- if(p->frameno/5>p->bcount)
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_ERR,
- "\n%s: Log file ends prematurely! "
- "Switching to one pass mode.\n", vf->info->name);
- p->pass=0;
- break;
- }
-
- checksum=(unsigned int)imgop((void *)checksum_plane, mpi, 0, 0);
-
- if(checksum!=p->csdata[p->frameno])
- {
- for(f=0; f<100; f++)
- if(p->frameno+f<p->fcount && p->csdata[p->frameno+f]==checksum)
- break;
- else if(p->frameno-f>=0 && p->csdata[p->frameno-f]==checksum)
- {
- f=-f;
- break;
- }
-
- if(f<100)
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO,
- "\n%s: Mismatch with pass-1: %+d frame(s).\n",
- vf->info->name, f);
-
- p->frameno+=f;
- p->misscount=0;
- }
- else if(p->misscount++>=30)
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_ERR,
- "\n%s: Sync with pass-1 lost! "
- "Switching to one pass mode.\n", vf->info->name);
- p->pass=0;
- break;
- }
- }
-
- n=(p->frameno)/5;
- if(n>=p->bcount) n=p->bcount-1;
-
- newphase=p->bdata[n];
- break;
-
- default:
- if(p->frameno)
- {
- int *sump=p->sum+p->frameno%5,
- *histp=p->history+p->frameno%p->window;
-
- *sump-=*histp;
- *sump+=(*histp=imgop(diff_plane, dmpi, mpi, 0));
- }
-
- m=match(p, p->sum, -1, -1, &d);
-
- if(d>=p->threshold)
- newphase=m;
- }
-
- n=p->ocount++%5;
-
- if(newphase!=p->phase && ((p->phase+4)%5<n)==((newphase+4)%5<n))
- {
- p->phase=newphase;
- ff_mp_msg(MSGT_VFILTER, MSGL_STATUS,
- "\n%s: Telecine phase %d.\n", vf->info->name, p->phase);
- }
-
- switch((p->frameno++-p->phase+10)%5)
- {
- case 0:
- imgop(copyop, dmpi, mpi, 0);
- return 0;
-
- case 4:
- if(p->deghost>0)
- {
- tmpi=ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_READABLE,
- mpi->width, mpi->height);
- ff_vf_clone_mpi_attributes(tmpi, mpi);
-
- imgop(copyop, tmpi, mpi, 0);
- imgop(deghost_plane, tmpi, dmpi, p->deghost);
- imgop(copyop, dmpi, mpi, 0);
- return ff_vf_next_put_image(vf, tmpi, MP_NOPTS_VALUE);
- }
- }
-
- imgop(copyop, dmpi, mpi, 0);
- return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- }
-
-static int analyze(struct vf_priv_s *p)
- {
- int *buf=0, *bp, bufsize=0, n, b, f, i, j, m, s;
- unsigned int *cbuf=0, *cp;
- int8_t *pbuf;
- int8_t lbuf[256];
- int sum[5];
- double d;
-
- /* read the file */
-
- n=15;
- while(fgets(lbuf, 256, p->file))
- {
- if(n>=bufsize-19)
- {
- bufsize=bufsize?bufsize*2:30000;
- if((bp=realloc(buf, bufsize*sizeof *buf))) buf=bp;
- if((cp=realloc(cbuf, bufsize*sizeof *cbuf))) cbuf=cp;
-
- if(!bp || !cp)
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "%s: Not enough memory.\n",
- ff_vf_info_divtc.name);
- free(buf);
- free(cbuf);
- return 0;
- }
- }
- sscanf(lbuf, "%x %d", cbuf+n, buf+n);
- n++;
- }
-
- if(n <= 15)
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "%s: Empty 2-pass log file.\n",
- ff_vf_info_divtc.name);
- free(buf);
- free(cbuf);
- return 0;
- }
-
- /* generate some dummy data past the beginning and end of the array */
-
- buf+=15, cbuf+=15;
- n-=15;
-
- memcpy(buf-15, buf, 15*sizeof *buf);
- memset(cbuf-15, 0, 15*sizeof *cbuf);
-
- while(n%5)
- buf[n]=buf[n-5], cbuf[n]=0, n++;
-
- memcpy(buf+n, buf+n-15, 15*sizeof *buf);
- memset(cbuf+n, 0, 15*sizeof *cbuf);
-
- p->csdata=cbuf;
- p->fcount=n;
-
- /* array with one slot for each slice of 5 frames */
-
- p->bdata=pbuf=malloc(p->bcount=b=(n/5));
- memset(pbuf, 255, b);
-
- /* resolve the automatic mode */
-
- if(p->deghost<0)
- {
- int deghost=-p->deghost;
- double s0=0.0, s1=0.0;
-
- for(f=0; f<n; f+=5)
- {
- p->deghost=0; match(p, buf+f, -1, -1, &d); s0+=d;
- p->deghost=1; match(p, buf+f, -1, -1, &d); s1+=d;
- }
-
- p->deghost=s1>s0?deghost:0;
-
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO,
- "%s: Deghosting %-3s (relative pattern strength %+.2fdB).\n",
- ff_vf_info_divtc.name,
- p->deghost?"ON":"OFF",
- 10.0*log10(s1/s0));
- }
-
- /* analyze the data */
-
- for(f=0; f<5; f++)
- for(sum[f]=0, n=-15; n<20; n+=5)
- sum[f]+=buf[n+f];
-
- for(f=0; f<b; f++)
- {
- m=match(p, sum, -1, -1, &d);
-
- if(d>=p->threshold)
- pbuf[f]=m;
-
- if(f<b-1)
- for(n=0; n<5; n++)
- sum[n]=sum[n]-buf[5*(f-3)+n]+buf[5*(f+4)+n];
- }
-
- /* fill in the gaps */
-
- /* the beginning */
- for(f=0; f<b && pbuf[f]==-1; f++);
-
- if(f==b)
- {
- free(buf-15);
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "%s: No telecine pattern found!\n",
- ff_vf_info_divtc.name);
- return 0;
- }
-
- for(n=0; n<f; pbuf[n++]=pbuf[f]);
-
- /* the end */
- for(f=b-1; pbuf[f]==-1; f--);
- for(n=f+1; n<b; pbuf[n++]=pbuf[f]);
-
- /* the rest */
- for(f=0;;)
- {
- while(f<b && pbuf[f]!=-1) f++;
- if(f==b) break;
- for(n=f; pbuf[n]==-1; n++);
-
- if(pbuf[f-1]==pbuf[n])
- {
- /* just a gap */
- while(f<n) pbuf[f++]=pbuf[n];
- }
- else
- {
- /* phase change, reanalyze the original data in the gap with zero
- threshold for only the two phases that appear at the ends */
-
- for(i=0; i<5; i++)
- for(sum[i]=0, j=5*f-15; j<5*f; j+=5)
- sum[i]+=buf[i+j];
-
- for(i=f; i<n; i++)
- {
- pbuf[i]=match(p, sum, pbuf[f-1], pbuf[n], 0);
-
- for(j=0; j<5; j++)
- sum[j]=sum[j]-buf[5*(i-3)+j]+buf[5*(i+4)+j];
- }
-
- /* estimate the transition point by dividing the gap
- in the same proportion as the number of matches of each kind */
-
- for(i=f, m=f; i<n; i++)
- if(pbuf[i]==pbuf[f-1]) m++;
-
- /* find the transition of the right direction nearest to the
- estimated point */
-
- if(m>f && m<n)
- {
- for(j=m; j>f; j--)
- if(pbuf[j-1]==pbuf[f-1] && pbuf[j]==pbuf[n]) break;
- for(s=m; s<n; s++)
- if(pbuf[s-1]==pbuf[f-1] && pbuf[s]==pbuf[n]) break;
-
- m=(s-m<m-j)?s:j;
- }
-
- /* and rewrite the data to allow only this one transition */
-
- for(i=f; i<m; i++)
- pbuf[i]=pbuf[f-1];
-
- for(; i<n; i++)
- pbuf[i]=pbuf[n];
-
- f=n;
- }
- }
-
- free(buf-15);
-
- return 1;
- }
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
- {
- switch(fmt)
- {
- case IMGFMT_444P: case IMGFMT_IYUV: case IMGFMT_RGB24:
- case IMGFMT_422P: case IMGFMT_UYVY: case IMGFMT_BGR24:
- case IMGFMT_411P: case IMGFMT_YUY2: case IMGFMT_IF09:
- case IMGFMT_YV12: case IMGFMT_I420: case IMGFMT_YVU9:
- case IMGFMT_IUYV: case IMGFMT_Y800: case IMGFMT_Y8:
- return ff_vf_next_query_format(vf,fmt);
- }
-
- return 0;
- }
-
-static void uninit(struct vf_instance *vf)
- {
- if(vf->priv)
- {
- if(vf->priv->file) fclose(vf->priv->file);
- if(vf->priv->csdata) free(vf->priv->csdata-15);
- free(vf->priv->bdata);
- free(vf->priv->history);
- free(vf->priv);
- }
- }
-
-static int vf_open(vf_instance_t *vf, char *args)
- {
- struct vf_priv_s *p;
- const char *filename="framediff.log";
- char *ap, *q, *a;
-
- if(args && !(args=strdup(args)))
- {
- nomem:
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL,
- "%s: Not enough memory.\n", vf->info->name);
- fail:
- uninit(vf);
- free(args);
- return 0;
- }
-
- vf->put_image=put_image;
- vf->uninit=uninit;
- vf->query_format=query_format;
- vf->default_reqs=VFCAP_ACCEPT_STRIDE;
- if(!(vf->priv=p=calloc(1, sizeof(struct vf_priv_s))))
- goto nomem;
-
- p->phase=5;
- p->threshold=0.5;
- p->window=30;
-
- if((ap=args))
- while(*ap)
- {
- q=ap;
- if((ap=strchr(q, ':'))) *ap++=0; else ap=q+strlen(q);
- if((a=strchr(q, '='))) *a++=0; else a=q+strlen(q);
-
- switch(*q)
- {
- case 0: break;
- case 'f': filename=a; break;
- case 't': p->threshold=atof(a); break;
- case 'w': p->window=5*(atoi(a)+4)/5; break;
- case 'd': p->deghost=atoi(a); break;
- case 'p':
- if(q[1]=='h') p->phase=atoi(a);
- else p->pass=atoi(a);
- break;
-
- case 'h':
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO,
- "\n%s options:\n\n"
- "pass=1|2 - Use 2-pass mode.\n"
- "file=filename - Set the 2-pass log file name "
- "(default %s).\n"
- "threshold=value - Set the pattern recognition "
- "sensitivity (default %g).\n"
- "deghost=value - Select deghosting threshold "
- "(default %d).\n"
- "window=numframes - Set the statistics window "
- "for 1-pass mode (default %d).\n"
- "phase=0|1|2|3|4 - Set the initial phase "
- "for 1-pass mode (default %d).\n\n"
- "The option names can be abbreviated to the shortest "
- "unique prefix.\n\n",
- vf->info->name, filename, p->threshold, p->deghost,
- p->window, p->phase%5);
- break;
-
- default:
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL,
- "%s: Unknown argument %s.\n", vf->info->name, q);
- goto fail;
- }
- }
-
- switch(p->pass)
- {
- case 1:
- if(!(p->file=fopen(filename, "w")))
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL,
- "%s: Can't create file %s.\n", vf->info->name, filename);
- goto fail;
- }
-
- break;
-
- case 2:
- if(!(p->file=fopen(filename, "r")))
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL,
- "%s: Can't open file %s.\n", vf->info->name, filename);
- goto fail;
- }
-
- if(!analyze(p))
- goto fail;
-
- fclose(p->file);
- p->file=0;
- break;
- }
-
- if(p->window<5) p->window=5;
- if(!(p->history=calloc(sizeof *p->history, p->window)))
- goto nomem;
-
- diff = diff_C;
-#if HAVE_MMX && HAVE_EBX_AVAILABLE
- if(ff_gCpuCaps.hasMMX) diff = diff_MMX;
-#endif
-
- free(args);
- return 1;
- }
-
-const vf_info_t ff_vf_info_divtc =
- {
- "inverse telecine for deinterlaced video",
- "divtc",
- "Ville Saari",
- "",
- vf_open,
- NULL
- };
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_down3dright.c b/ffmpeg/libavfilter/libmpcodecs/vf_down3dright.c
deleted file mode 100644
index 5c95ce6..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_down3dright.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-
-#include "config.h"
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-struct vf_priv_s {
- int skipline;
- int scalew;
- int scaleh;
-};
-
-static void toright(unsigned char *dst[3], unsigned char *src[3],
- int dststride[3], int srcstride[3],
- int w, int h, struct vf_priv_s* p)
-{
- int k;
-
- for (k = 0; k < 3; k++) {
- unsigned char* fromL = src[k];
- unsigned char* fromR = src[k];
- unsigned char* to = dst[k];
- int src = srcstride[k];
- int dst = dststride[k];
- int ss;
- unsigned int dd;
- int i;
-
- if (k > 0) {
- i = h / 4 - p->skipline / 2;
- ss = src * (h / 4 + p->skipline / 2);
- dd = w / 4;
- } else {
- i = h / 2 - p->skipline;
- ss = src * (h / 2 + p->skipline);
- dd = w / 2;
- }
- fromR += ss;
- for ( ; i > 0; i--) {
- int j;
- unsigned char* t = to;
- unsigned char* sL = fromL;
- unsigned char* sR = fromR;
-
- if (p->scalew == 1) {
- for (j = dd; j > 0; j--) {
- *t++ = (sL[0] + sL[1]) / 2;
- sL+=2;
- }
- for (j = dd ; j > 0; j--) {
- *t++ = (sR[0] + sR[1]) / 2;
- sR+=2;
- }
- } else {
- for (j = dd * 2 ; j > 0; j--)
- *t++ = *sL++;
- for (j = dd * 2 ; j > 0; j--)
- *t++ = *sR++;
- }
- if (p->scaleh == 1) {
- fast_memcpy(to + dst, to, dst);
- to += dst;
- }
- to += dst;
- fromL += src;
- fromR += src;
- }
- //printf("K %d %d %d %d %d \n", k, w, h, src, dst);
- }
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- mp_image_t *dmpi;
-
- // hope we'll get DR buffer:
- dmpi=ff_vf_get_image(vf->next, IMGFMT_YV12,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE |
- ((vf->priv->scaleh == 1) ? MP_IMGFLAG_READABLE : 0),
- mpi->w * vf->priv->scalew,
- mpi->h / vf->priv->scaleh - vf->priv->skipline);
-
- toright(dmpi->planes, mpi->planes, dmpi->stride,
- mpi->stride, mpi->w, mpi->h, vf->priv);
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- /* FIXME - also support UYVY output? */
- return ff_vf_next_config(vf, width * vf->priv->scalew,
- height / vf->priv->scaleh - vf->priv->skipline, d_width, d_height, flags, IMGFMT_YV12);
-}
-
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- /* FIXME - really any YUV 4:2:0 input format should work */
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- return ff_vf_next_query_format(vf, IMGFMT_YV12);
- }
- return 0;
-}
-
-static void uninit(struct vf_instance *vf)
-{
- free(vf->priv);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- vf->config=config;
- vf->query_format=query_format;
- vf->put_image=put_image;
- vf->uninit=uninit;
-
- vf->priv = calloc(1, sizeof (struct vf_priv_s));
- vf->priv->skipline = 0;
- vf->priv->scalew = 1;
- vf->priv->scaleh = 2;
- if (args) sscanf(args, "%d:%d:%d", &vf->priv->skipline, &vf->priv->scalew, &vf->priv->scaleh);
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_down3dright = {
- "convert stereo movie from top-bottom to left-right field",
- "down3dright",
- "Zdenek Kabelac",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_eq.c b/ffmpeg/libavfilter/libmpcodecs/vf_eq.c
index 4e256d9..c926c51 100644
--- a/ffmpeg/libavfilter/libmpcodecs/vf_eq.c
+++ b/ffmpeg/libavfilter/libmpcodecs/vf_eq.c
@@ -31,7 +31,7 @@
#include "libvo/video_out.h"
-static struct vf_priv_s {
+struct vf_priv_s {
unsigned char *buf;
int brightness;
int contrast;
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_fil.c b/ffmpeg/libavfilter/libmpcodecs/vf_fil.c
deleted file mode 100644
index 80c6648..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_fil.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#include "mp_image.h"
-#include "vf.h"
-
-struct vf_priv_s {
- int interleave;
- int height;
- int width;
- int stridefactor;
-};
-
-//===========================================================================//
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
- int pixel_stride= (width+15)&~15; //FIXME this is ust a guess ... especially for non planar its somewhat bad one
-
-#if 0
- if(mpi->flags&MP_IMGFLAG_PLANAR)
- pixel_stride= mpi->stride[0];
- else
- pixel_stride= 8*mpi->stride[0] / mpi->bpp;
-
-#endif
-
- if(vf->priv->interleave){
- vf->priv->height= 2*height;
- vf->priv->width= width - (pixel_stride/2);
- vf->priv->stridefactor=1;
- }else{
- vf->priv->height= height/2;
- vf->priv->width= width + pixel_stride;
- vf->priv->stridefactor=4;
- }
-//printf("hX %d %d %d\n", vf->priv->width,vf->priv->height,vf->priv->stridefactor);
-
- return ff_vf_next_config(vf, vf->priv->width, vf->priv->height,
- (d_width*vf->priv->stridefactor)>>1, 2*d_height/vf->priv->stridefactor, flags, outfmt);
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- if(mpi->flags&MP_IMGFLAG_DIRECT){
- // we've used DR, so we're ready...
- return ff_vf_next_put_image(vf,(mp_image_t*)mpi->priv, pts);
- }
-
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_EXPORT, MP_IMGFLAG_ACCEPT_STRIDE,
- vf->priv->width, vf->priv->height);
-
- // set up mpi as a double-stride image of dmpi:
- vf->dmpi->planes[0]=mpi->planes[0];
- vf->dmpi->stride[0]=(mpi->stride[0]*vf->priv->stridefactor)>>1;
- if(vf->dmpi->flags&MP_IMGFLAG_PLANAR){
- vf->dmpi->planes[1]=mpi->planes[1];
- vf->dmpi->stride[1]=(mpi->stride[1]*vf->priv->stridefactor)>>1;
- vf->dmpi->planes[2]=mpi->planes[2];
- vf->dmpi->stride[2]=(mpi->stride[2]*vf->priv->stridefactor)>>1;
- } else
- vf->dmpi->planes[1]=mpi->planes[1]; // passthru bgr8 palette!!!
-
- return ff_vf_next_put_image(vf,vf->dmpi, pts);
-}
-
-//===========================================================================//
-
-static void uninit(struct vf_instance *vf)
-{
- free(vf->priv);
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
- vf->config=config;
- vf->put_image=put_image;
- vf->uninit=uninit;
- vf->default_reqs=VFCAP_ACCEPT_STRIDE;
- vf->priv=calloc(1, sizeof(struct vf_priv_s));
- vf->priv->interleave= args && (*args == 'i');
- return 1;
-}
-
-const vf_info_t ff_vf_info_fil = {
- "fast (de)interleaver",
- "fil",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
-
-//===========================================================================//
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_filmdint.c b/ffmpeg/libavfilter/libmpcodecs/vf_filmdint.c
deleted file mode 100644
index 93354e2..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_filmdint.c
+++ /dev/null
@@ -1,1461 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/time.h>
-
-#include "config.h"
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vd.h"
-#include "vf.h"
-#include "cmmx.h"
-#include "libavutil/x86/asm.h"
-#include "libvo/fastmemcpy.h"
-
-#define NUM_STORED 4
-
-enum pu_field_type_t {
- PU_1ST_OF_3,
- PU_2ND_OF_3,
- PU_3RD_OF_3,
- PU_1ST_OF_2,
- PU_2ND_OF_2,
- PU_INTERLACED
-};
-
-struct metrics {
- /* This struct maps to a packed word 64-bit MMX register */
- unsigned short int even;
- unsigned short int odd;
- unsigned short int noise;
- unsigned short int temp;
-} __attribute__ ((aligned (8)));
-
-struct frame_stats {
- struct metrics tiny, low, high, bigger, twox, max;
- struct { unsigned int even, odd, noise, temp; } sad;
- unsigned short interlaced_high;
- unsigned short interlaced_low;
- unsigned short num_blocks;
-};
-
-struct vf_priv_s {
- unsigned long inframes;
- unsigned long outframes;
- enum pu_field_type_t prev_type;
- unsigned swapped, chroma_swapped;
- unsigned luma_only;
- unsigned verbose;
- unsigned fast;
- unsigned long w, h, cw, ch, stride, chroma_stride, nplanes;
- unsigned long sad_thres;
- unsigned long dint_thres;
- unsigned char *memory_allocated;
- unsigned char *planes[2*NUM_STORED][4];
- unsigned char **old_planes;
- unsigned long static_idx;
- unsigned long temp_idx;
- unsigned long crop_x, crop_y, crop_cx, crop_cy;
- unsigned long export_count, merge_count;
- unsigned long num_breaks;
- unsigned long num_copies;
- long in_inc, out_dec, iosync;
- long num_fields;
- long prev_fields;
- long notout;
- long mmx2;
- unsigned small_bytes[2];
- unsigned mmx_temp[2];
- struct frame_stats stats[2];
- struct metrics thres;
- char chflag;
- double diff_time, merge_time, decode_time, vo_time, filter_time;
-};
-
-#define PPZ { 2000, 2000, 0, 2000 }
-#define PPR { 2000, 2000, 0, 2000 }
-static const struct frame_stats ppzs = {PPZ,PPZ,PPZ,PPZ,PPZ,PPZ,PPZ,0,0,9999};
-static const struct frame_stats pprs = {PPR,PPR,PPR,PPR,PPR,PPR,PPR,0,0,9999};
-
-#ifndef MIN
-#define MIN(a,b) (((a)<(b))?(a):(b))
-#endif
-#ifndef MAX
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#endif
-
-#define PDIFFUB(X,Y,T) "movq " #X "," #T "\n\t" \
- "psubusb " #Y "," #T "\n\t" \
- "psubusb " #X "," #Y "\n\t" \
- "paddusb " #Y "," #T "\n\t"
-
-#define PDIFFUBT(X,Y,T) "movq " #X "," #T "\n\t" \
- "psubusb " #Y "," #T "\n\t" \
- "psubusb " #X "," #Y "\n\t" \
- "paddusb " #T "," #Y "\n\t"
-
-#define PSUMBW(X,T,Z) "movq " #X "," #T "\n\t" \
- "punpcklbw " #Z "," #X "\n\t" \
- "punpckhbw " #Z "," #T "\n\t" \
- "paddw " #T "," #X "\n\t" \
- "movq " #X "," #T "\n\t" \
- "psllq $32, " #T "\n\t" \
- "paddw " #T "," #X "\n\t" \
- "movq " #X "," #T "\n\t" \
- "psllq $16, " #T "\n\t" \
- "paddw " #T "," #X "\n\t" \
- "psrlq $48, " #X "\n\t"
-
-#define PSADBW(X,Y,T,Z) PDIFFUBT(X,Y,T) PSUMBW(Y,T,Z)
-
-#define PMAXUB(X,Y) "psubusb " #X "," #Y "\n\tpaddusb " #X "," #Y "\n\t"
-#define PMAXUW(X,Y) "psubusw " #X "," #Y "\n\tpaddusw " #X "," #Y "\n\t"
-#define PMINUBT(X,Y,T) "movq " #Y "," #T "\n\t" \
- "psubusb " #X "," #T "\n\t" \
- "psubusb " #T "," #Y "\n\t"
-#define PAVGB(X,Y) "pavgusb " #X "," #Y "\n\t"
-
-static inline void
-get_metrics_c(unsigned char *a, unsigned char *b, int as, int bs, int lines,
- struct metrics *m)
-{
- a -= as;
- b -= bs;
- do {
- cmmx_t old_po = *(cmmx_t*)(a );
- cmmx_t po = *(cmmx_t*)(b );
- cmmx_t e = *(cmmx_t*)(b + bs);
- cmmx_t old_o = *(cmmx_t*)(a + 2*as);
- cmmx_t o = *(cmmx_t*)(b + 2*bs);
- cmmx_t ne = *(cmmx_t*)(b + 3*bs);
- cmmx_t old_no = *(cmmx_t*)(a + 4*as);
- cmmx_t no = *(cmmx_t*)(b + 4*bs);
-
- cmmx_t qup_old_odd = p31avgb(old_o, old_po);
- cmmx_t qup_odd = p31avgb( o, po);
- cmmx_t qdown_old_odd = p31avgb(old_o, old_no);
- cmmx_t qdown_odd = p31avgb( o, no);
-
- cmmx_t qup_even = p31avgb(ne, e);
- cmmx_t qdown_even = p31avgb(e, ne);
-
- cmmx_t temp_up_diff = pdiffub(qdown_even, qup_old_odd);
- cmmx_t noise_up_diff = pdiffub(qdown_even, qup_odd);
- cmmx_t temp_down_diff = pdiffub(qup_even, qdown_old_odd);
- cmmx_t noise_down_diff = pdiffub(qup_even, qdown_odd);
-
- cmmx_t odd_diff = pdiffub(o, old_o);
- m->odd += psumbw(odd_diff);
- m->even += psadbw(e, *(cmmx_t*)(a+as));
-
- temp_up_diff = pminub(temp_up_diff, temp_down_diff);
- temp_up_diff = pminub(temp_up_diff, odd_diff);
- m->temp += psumbw(temp_up_diff);
- noise_up_diff = pminub(noise_up_diff, odd_diff);
- noise_up_diff = pminub(noise_up_diff, noise_down_diff);
-
- m->noise += psumbw(noise_up_diff);
- a += 2*as;
- b += 2*bs;
- } while (--lines);
-}
-
-static inline void
-get_metrics_fast_c(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct metrics *m)
-{
- a -= as;
- b -= bs;
- do {
- cmmx_t old_po = (*(cmmx_t*)(a ) >> 1) & ~SIGN_BITS;
- cmmx_t po = (*(cmmx_t*)(b ) >> 1) & ~SIGN_BITS;
- cmmx_t old_e = (*(cmmx_t*)(a + as) >> 1) & ~SIGN_BITS;
- cmmx_t e = (*(cmmx_t*)(b + bs) >> 1) & ~SIGN_BITS;
- cmmx_t old_o = (*(cmmx_t*)(a + 2*as) >> 1) & ~SIGN_BITS;
- cmmx_t o = (*(cmmx_t*)(b + 2*bs) >> 1) & ~SIGN_BITS;
- cmmx_t ne = (*(cmmx_t*)(b + 3*bs) >> 1) & ~SIGN_BITS;
- cmmx_t old_no = (*(cmmx_t*)(a + 4*as) >> 1) & ~SIGN_BITS;
- cmmx_t no = (*(cmmx_t*)(b + 4*bs) >> 1) & ~SIGN_BITS;
-
- cmmx_t qup_old_odd = p31avgb_s(old_o, old_po);
- cmmx_t qup_odd = p31avgb_s( o, po);
- cmmx_t qdown_old_odd = p31avgb_s(old_o, old_no);
- cmmx_t qdown_odd = p31avgb_s( o, no);
-
- cmmx_t qup_even = p31avgb_s(ne, e);
- cmmx_t qdown_even = p31avgb_s(e, ne);
-
- cmmx_t temp_up_diff = pdiffub_s(qdown_even, qup_old_odd);
- cmmx_t noise_up_diff = pdiffub_s(qdown_even, qup_odd);
- cmmx_t temp_down_diff = pdiffub_s(qup_even, qdown_old_odd);
- cmmx_t noise_down_diff = pdiffub_s(qup_even, qdown_odd);
-
- cmmx_t odd_diff = pdiffub_s(o, old_o);
- m->odd += psumbw_s(odd_diff) << 1;
- m->even += psadbw_s(e, old_e) << 1;
-
- temp_up_diff = pminub_s(temp_up_diff, temp_down_diff);
- temp_up_diff = pminub_s(temp_up_diff, odd_diff);
- m->temp += psumbw_s(temp_up_diff) << 1;
- noise_up_diff = pminub_s(noise_up_diff, odd_diff);
- noise_up_diff = pminub_s(noise_up_diff, noise_down_diff);
-
- m->noise += psumbw_s(noise_up_diff) << 1;
- a += 2*as;
- b += 2*bs;
- } while (--lines);
-}
-
-static inline void
-get_metrics_faster_c(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct metrics *m)
-{
- a -= as;
- b -= bs;
- do {
- cmmx_t old_po = (*(cmmx_t*)(a )>>1) & ~SIGN_BITS;
- cmmx_t po = (*(cmmx_t*)(b )>>1) & ~SIGN_BITS;
- cmmx_t old_e = (*(cmmx_t*)(a + as)>>1) & ~SIGN_BITS;
- cmmx_t e = (*(cmmx_t*)(b + bs)>>1) & ~SIGN_BITS;
- cmmx_t old_o = (*(cmmx_t*)(a + 2*as)>>1) & ~SIGN_BITS;
- cmmx_t o = (*(cmmx_t*)(b + 2*bs)>>1) & ~SIGN_BITS;
- cmmx_t ne = (*(cmmx_t*)(b + 3*bs)>>1) & ~SIGN_BITS;
-
- cmmx_t down_even = p31avgb_s(e, ne);
- cmmx_t up_odd = p31avgb_s(o, po);
- cmmx_t up_old_odd = p31avgb_s(old_o, old_po);
-
- cmmx_t odd_diff = pdiffub_s(o, old_o);
- cmmx_t temp_diff = pdiffub_s(down_even, up_old_odd);
- cmmx_t noise_diff = pdiffub_s(down_even, up_odd);
-
- m->even += psadbw_s(e, old_e) << 1;
- m->odd += psumbw_s(odd_diff) << 1;
-
- temp_diff = pminub_s(temp_diff, odd_diff);
- noise_diff = pminub_s(noise_diff, odd_diff);
-
- m->noise += psumbw_s(noise_diff) << 1;
- m->temp += psumbw_s(temp_diff) << 1;
- a += 2*as;
- b += 2*bs;
- } while (--lines);
-
-}
-
-static inline void
-get_block_stats(struct metrics *m, struct vf_priv_s *p, struct frame_stats *s)
-{
- unsigned two_e = m->even + MAX(m->even , p->thres.even );
- unsigned two_o = m->odd + MAX(m->odd , p->thres.odd );
- unsigned two_n = m->noise + MAX(m->noise, p->thres.noise);
- unsigned two_t = m->temp + MAX(m->temp , p->thres.temp );
-
- unsigned e_big = m->even >= (m->odd + two_o + 1)/2;
- unsigned o_big = m->odd >= (m->even + two_e + 1)/2;
- unsigned n_big = m->noise >= (m->temp + two_t + 1)/2;
- unsigned t_big = m->temp >= (m->noise + two_n + 1)/2;
-
- unsigned e2x = m->even >= two_o;
- unsigned o2x = m->odd >= two_e;
- unsigned n2x = m->noise >= two_t;
- unsigned t2x = m->temp >= two_n;
-
- unsigned ntiny_e = m->even > p->thres.even ;
- unsigned ntiny_o = m->odd > p->thres.odd ;
- unsigned ntiny_n = m->noise > p->thres.noise;
- unsigned ntiny_t = m->temp > p->thres.temp ;
-
- unsigned nlow_e = m->even > 2*p->thres.even ;
- unsigned nlow_o = m->odd > 2*p->thres.odd ;
- unsigned nlow_n = m->noise > 2*p->thres.noise;
- unsigned nlow_t = m->temp > 2*p->thres.temp ;
-
- unsigned high_e = m->even > 4*p->thres.even ;
- unsigned high_o = m->odd > 4*p->thres.odd ;
- unsigned high_n = m->noise > 4*p->thres.noise;
- unsigned high_t = m->temp > 4*p->thres.temp ;
-
- unsigned low_il = !n_big && !t_big && ntiny_n && ntiny_t;
- unsigned high_il = !n_big && !t_big && nlow_n && nlow_t;
-
- if (low_il | high_il) {
- s->interlaced_low += low_il;
- s->interlaced_high += high_il;
- } else {
- s->tiny.even += ntiny_e;
- s->tiny.odd += ntiny_o;
- s->tiny.noise += ntiny_n;
- s->tiny.temp += ntiny_t;
-
- s->low .even += nlow_e ;
- s->low .odd += nlow_o ;
- s->low .noise += nlow_n ;
- s->low .temp += nlow_t ;
-
- s->high.even += high_e ;
- s->high.odd += high_o ;
- s->high.noise += high_n ;
- s->high.temp += high_t ;
-
- if (m->even >= p->sad_thres) s->sad.even += m->even ;
- if (m->odd >= p->sad_thres) s->sad.odd += m->odd ;
- if (m->noise >= p->sad_thres) s->sad.noise += m->noise;
- if (m->temp >= p->sad_thres) s->sad.temp += m->temp ;
- }
- s->num_blocks++;
- s->max.even = MAX(s->max.even , m->even );
- s->max.odd = MAX(s->max.odd , m->odd );
- s->max.noise = MAX(s->max.noise, m->noise);
- s->max.temp = MAX(s->max.temp , m->temp );
-
- s->bigger.even += e_big ;
- s->bigger.odd += o_big ;
- s->bigger.noise += n_big ;
- s->bigger.temp += t_big ;
-
- s->twox.even += e2x ;
- s->twox.odd += o2x ;
- s->twox.noise += n2x ;
- s->twox.temp += t2x ;
-
-}
-
-static inline struct metrics
-block_metrics_c(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct vf_priv_s *p, struct frame_stats *s)
-{
- struct metrics tm;
- tm.even = tm.odd = tm.noise = tm.temp = 0;
- get_metrics_c(a, b, as, bs, lines, &tm);
- if (sizeof(cmmx_t) < 8)
- get_metrics_c(a+4, b+4, as, bs, lines, &tm);
- get_block_stats(&tm, p, s);
- return tm;
-}
-
-static inline struct metrics
-block_metrics_fast_c(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct vf_priv_s *p, struct frame_stats *s)
-{
- struct metrics tm;
- tm.even = tm.odd = tm.noise = tm.temp = 0;
- get_metrics_fast_c(a, b, as, bs, lines, &tm);
- if (sizeof(cmmx_t) < 8)
- get_metrics_fast_c(a+4, b+4, as, bs, lines, &tm);
- get_block_stats(&tm, p, s);
- return tm;
-}
-
-static inline struct metrics
-block_metrics_faster_c(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct vf_priv_s *p, struct frame_stats *s)
-{
- struct metrics tm;
- tm.even = tm.odd = tm.noise = tm.temp = 0;
- get_metrics_faster_c(a, b, as, bs, lines, &tm);
- if (sizeof(cmmx_t) < 8)
- get_metrics_faster_c(a+4, b+4, as, bs, lines, &tm);
- get_block_stats(&tm, p, s);
- return tm;
-}
-
-#define MEQ(X,Y) ((X).even == (Y).even && (X).odd == (Y).odd && (X).temp == (Y).temp && (X).noise == (Y).noise)
-
-#define BLOCK_METRICS_TEMPLATE() \
- __asm__ volatile("pxor %mm7, %mm7\n\t" /* The result is colleted in mm7 */ \
- "pxor %mm6, %mm6\n\t" /* Temp to stay at 0 */ \
- ); \
- a -= as; \
- b -= bs; \
- do { \
- __asm__ volatile( \
- "movq (%0,%2), %%mm0\n\t" \
- "movq (%1,%3), %%mm1\n\t" /* mm1 = even */ \
- PSADBW(%%mm1, %%mm0, %%mm4, %%mm6) \
- "paddusw %%mm0, %%mm7\n\t" /* even diff */ \
- "movq (%0,%2,2), %%mm0\n\t" /* mm0 = old odd */ \
- "movq (%1,%3,2), %%mm2\n\t" /* mm2 = odd */ \
- "movq (%0), %%mm3\n\t" \
- "psubusb %4, %%mm3\n\t" \
- PAVGB(%%mm0, %%mm3) \
- PAVGB(%%mm0, %%mm3) /* mm3 = qup old odd */ \
- "movq %%mm0, %%mm5\n\t" \
- PSADBW(%%mm2, %%mm0, %%mm4, %%mm6) \
- "psllq $16, %%mm0\n\t" \
- "paddusw %%mm0, %%mm7\n\t" \
- "movq (%1), %%mm4\n\t" \
- "lea (%0,%2,2), %0\n\t" \
- "lea (%1,%3,2), %1\n\t" \
- "psubusb %4, %%mm4\n\t" \
- PAVGB(%%mm2, %%mm4) \
- PAVGB(%%mm2, %%mm4) /* mm4 = qup odd */ \
- PDIFFUBT(%%mm5, %%mm2, %%mm0) /* mm2 =abs(oldodd-odd) */ \
- "movq (%1,%3), %%mm5\n\t" \
- "psubusb %4, %%mm5\n\t" \
- PAVGB(%%mm1, %%mm5) \
- PAVGB(%%mm5, %%mm1) /* mm1 = qdown even */ \
- PAVGB((%1,%3), %%mm5) /* mm5 = qup next even */ \
- PDIFFUBT(%%mm1, %%mm3, %%mm0) /* mm3 = abs(qupoldo-qde) */ \
- PDIFFUBT(%%mm1, %%mm4, %%mm0) /* mm4 = abs(qupodd-qde) */ \
- PMINUBT(%%mm2, %%mm3, %%mm0) /* limit temp to odd diff */ \
- PMINUBT(%%mm2, %%mm4, %%mm0) /* limit noise to odd diff */ \
- "movq (%1,%3,2), %%mm2\n\t" \
- "psubusb %4, %%mm2\n\t" \
- PAVGB((%1), %%mm2) \
- PAVGB((%1), %%mm2) /* mm2 = qdown odd */ \
- "movq (%0,%2,2), %%mm1\n\t" \
- "psubusb %4, %%mm1\n\t" \
- PAVGB((%0), %%mm1) \
- PAVGB((%0), %%mm1) /* mm1 = qdown old odd */ \
- PDIFFUBT(%%mm5, %%mm2, %%mm0) /* mm2 = abs(qdo-qune) */ \
- PDIFFUBT(%%mm5, %%mm1, %%mm0) /* mm1 = abs(qdoo-qune) */ \
- PMINUBT(%%mm4, %%mm2, %%mm0) /* current */ \
- PMINUBT(%%mm3, %%mm1, %%mm0) /* old */ \
- PSUMBW(%%mm2, %%mm0, %%mm6) \
- PSUMBW(%%mm1, %%mm0, %%mm6) \
- "psllq $32, %%mm2\n\t" \
- "psllq $48, %%mm1\n\t" \
- "paddusw %%mm2, %%mm7\n\t" \
- "paddusw %%mm1, %%mm7\n\t" \
- : "=r" (a), "=r" (b) \
- : "r"((x86_reg)as), "r"((x86_reg)bs), "m" (ones), "0"(a), "1"(b), "X"(*a), "X"(*b) \
- ); \
- } while (--lines);
-
-static inline struct metrics
-block_metrics_3dnow(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct vf_priv_s *p, struct frame_stats *s)
-{
- struct metrics tm;
-#if !HAVE_AMD3DNOW
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "block_metrics_3dnow: internal error\n");
-#else
- static const unsigned long long ones = 0x0101010101010101ull;
-
- BLOCK_METRICS_TEMPLATE();
- __asm__ volatile("movq %%mm7, %0\n\temms" : "=m" (tm));
- get_block_stats(&tm, p, s);
-#endif
- return tm;
-}
-
-#undef PSUMBW
-#undef PSADBW
-#undef PMAXUB
-#undef PMINUBT
-#undef PAVGB
-
-#define PSUMBW(X,T,Z) "psadbw " #Z "," #X "\n\t"
-#define PSADBW(X,Y,T,Z) "psadbw " #X "," #Y "\n\t"
-#define PMAXUB(X,Y) "pmaxub " #X "," #Y "\n\t"
-#define PMINUBT(X,Y,T) "pminub " #X "," #Y "\n\t"
-#define PAVGB(X,Y) "pavgb " #X "," #Y "\n\t"
-
-static inline struct metrics
-block_metrics_mmx2(unsigned char *a, unsigned char *b, int as, int bs,
- int lines, struct vf_priv_s *p, struct frame_stats *s)
-{
- struct metrics tm;
-#if !HAVE_MMX
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "block_metrics_mmx2: internal error\n");
-#else
- static const unsigned long long ones = 0x0101010101010101ull;
- x86_reg interlaced;
- x86_reg prefetch_line = (((long)a>>3) & 7) + 10;
-#ifdef DEBUG
- struct frame_stats ts = *s;
-#endif
- __asm__ volatile("prefetcht0 (%0,%2)\n\t"
- "prefetcht0 (%1,%3)\n\t" :
- : "r" (a), "r" (b),
- "r" (prefetch_line * as), "r" (prefetch_line * bs));
-
- BLOCK_METRICS_TEMPLATE();
-
- s->num_blocks++;
- __asm__ volatile(
- "movq %3, %%mm0\n\t"
- "movq %%mm7, %%mm1\n\t"
- "psubusw %%mm0, %%mm1\n\t"
- "movq %%mm1, %%mm2\n\t"
- "paddusw %%mm0, %%mm2\n\t"
- "paddusw %%mm7, %%mm2\n\t"
- "pshufw $0xb1, %%mm2, %%mm3\n\t"
- "pavgw %%mm7, %%mm2\n\t"
- "pshufw $0xb1, %%mm2, %%mm2\n\t"
- "psubusw %%mm7, %%mm2\n\t"
- "pcmpeqw %%mm6, %%mm2\n\t" /* 1 if >= 1.5x */
- "psubusw %%mm7, %%mm3\n\t"
- "pcmpeqw %%mm6, %%mm3\n\t" /* 1 if >= 2x */
- "movq %1, %%mm4\n\t"
- "movq %2, %%mm5\n\t"
- "psubw %%mm2, %%mm4\n\t"
- "psubw %%mm3, %%mm5\n\t"
- "movq %%mm4, %1\n\t"
- "movq %%mm5, %2\n\t"
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqw %%mm1, %%mm4\n\t" /* 1 if <= t */
- "psubusw %%mm0, %%mm1\n\t"
- "pxor %%mm5, %%mm5\n\t"
- "pcmpeqw %%mm1, %%mm5\n\t" /* 1 if <= 2t */
- "psubusw %%mm0, %%mm1\n\t"
- "psubusw %%mm0, %%mm1\n\t"
- "pcmpeqw %%mm6, %%mm1\n\t" /* 1 if <= 4t */
- "pshufw $0xb1, %%mm2, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t" /* 1 if not close */
- "punpckhdq %%mm0, %%mm0\n\t"
- "movq %%mm4, %%mm2\n\t" /* tttt */
- "punpckhdq %%mm5, %%mm2\n\t" /* ttll */
- "por %%mm2, %%mm0\n\t"
- "pcmpeqd %%mm6, %%mm0\n\t" /* close && big */
- "psrlq $16, %%mm0\n\t"
- "psrlw $15, %%mm0\n\t"
- "movd %%mm0, %0\n\t"
- : "=r" (interlaced), "=m" (s->bigger), "=m" (s->twox)
- : "m" (p->thres)
- );
-
- if (interlaced) {
- s->interlaced_high += interlaced >> 16;
- s->interlaced_low += interlaced;
- } else {
- __asm__ volatile(
- "pcmpeqw %%mm0, %%mm0\n\t" /* -1 */
- "psubw %%mm0, %%mm4\n\t"
- "psubw %%mm0, %%mm5\n\t"
- "psubw %%mm0, %%mm1\n\t"
- "paddw %0, %%mm4\n\t"
- "paddw %1, %%mm5\n\t"
- "paddw %2, %%mm1\n\t"
- "movq %%mm4, %0\n\t"
- "movq %%mm5, %1\n\t"
- "movq %%mm1, %2\n\t"
- : "=m" (s->tiny), "=m" (s->low), "=m" (s->high)
- );
-
- __asm__ volatile(
- "pshufw $0, %2, %%mm0\n\t"
- "psubusw %%mm7, %%mm0\n\t"
- "pcmpeqw %%mm6, %%mm0\n\t" /* 0 if below sad_thres */
- "pand %%mm7, %%mm0\n\t"
- "movq %%mm0, %%mm1\n\t"
- "punpcklwd %%mm6, %%mm0\n\t" /* sad even, odd */
- "punpckhwd %%mm6, %%mm1\n\t" /* sad noise, temp */
- "paddd %0, %%mm0\n\t"
- "paddd %1, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, %1\n\t"
- : "=m" (s->sad.even), "=m" (s->sad.noise)
- : "m" (p->sad_thres)
- );
- }
-
- __asm__ volatile(
- "movq %%mm7, (%1)\n\t"
- PMAXUW((%0), %%mm7)
- "movq %%mm7, (%0)\n\t"
- "emms"
- : : "r" (&s->max), "r" (&tm), "X" (s->max)
- : "memory"
- );
-#ifdef DEBUG
- if (1) {
- struct metrics cm;
- a -= 7*as;
- b -= 7*bs;
- cm = block_metrics_c(a, b, as, bs, 4, p, &ts);
- if (!MEQ(tm, cm))
- ff_mp_msg(MSGT_VFILTER, MSGL_WARN, "Bad metrics\n");
- if (s) {
-# define CHECK(X) if (!MEQ(s->X, ts.X)) \
- ff_mp_msg(MSGT_VFILTER, MSGL_WARN, "Bad " #X "\n");
- CHECK(tiny);
- CHECK(low);
- CHECK(high);
- CHECK(sad);
- CHECK(max);
- }
- }
-#endif
-#endif
- return tm;
-}
-
-static inline int
-dint_copy_line_mmx2(unsigned char *dst, unsigned char *a, long bos,
- long cos, int ds, int ss, int w, int t)
-{
-#if !HAVE_MMX
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "dint_copy_line_mmx2: internal error\n");
- return 0;
-#else
- unsigned long len = (w+7) >> 3;
- int ret;
- __asm__ volatile (
- "pxor %%mm6, %%mm6 \n\t" /* deinterlaced pixel counter */
- "movd %0, %%mm7 \n\t"
- "punpcklbw %%mm7, %%mm7 \n\t"
- "punpcklwd %%mm7, %%mm7 \n\t"
- "punpckldq %%mm7, %%mm7 \n\t" /* mm7 = threshold */
- : /* no output */
- : "rm" (t)
- );
- do {
- __asm__ volatile (
- "movq (%0), %%mm0\n\t"
- "movq (%0,%3,2), %%mm1\n\t"
- "movq %%mm0, (%2)\n\t"
- "pmaxub %%mm1, %%mm0\n\t"
- "pavgb (%0), %%mm1\n\t"
- "psubusb %%mm1, %%mm0\n\t"
- "paddusb %%mm7, %%mm0\n\t" /* mm0 = max-avg+thr */
- "movq (%0,%1), %%mm2\n\t"
- "movq (%0,%5), %%mm3\n\t"
- "movq %%mm2, %%mm4\n\t"
- PDIFFUBT(%%mm1, %%mm2, %%mm5)
- PDIFFUBT(%%mm1, %%mm3, %%mm5)
- "pminub %%mm2, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm2\n\t" /* b = min */
- "pand %%mm2, %%mm4\n\t"
- "pandn (%0,%5), %%mm2\n\t"
- "por %%mm4, %%mm2\n\t"
- "pminub %%mm0, %%mm3\n\t"
- "pcmpeqb %%mm0, %%mm3\n\t" /* set to 1s if >= threshold */
- "psubb %%mm3, %%mm6\n\t" /* count pixels above thr. */
- "pand %%mm3, %%mm1 \n\t"
- "pandn %%mm2, %%mm3 \n\t"
- "por %%mm3, %%mm1 \n\t" /* avg if >= threshold */
- "movq %%mm1, (%2,%4) \n\t"
- : /* no output */
- : "r" (a), "r" ((x86_reg)bos), "r" ((x86_reg)dst), "r" ((x86_reg)ss), "r" ((x86_reg)ds), "r" ((x86_reg)cos)
- );
- a += 8;
- dst += 8;
- } while (--len);
-
- __asm__ volatile ("pxor %%mm7, %%mm7 \n\t"
- "psadbw %%mm6, %%mm7 \n\t"
- "movd %%mm7, %0 \n\t"
- "emms \n\t"
- : "=r" (ret)
- );
- return ret;
-#endif
-}
-
-static inline int
-dint_copy_line(unsigned char *dst, unsigned char *a, long bos,
- long cos, int ds, int ss, int w, int t)
-{
- unsigned long len = ((unsigned long)w+sizeof(cmmx_t)-1) / sizeof(cmmx_t);
- cmmx_t dint_count = 0;
- cmmx_t thr;
- t |= t << 8;
- thr = t | (t << 16);
- if (sizeof(cmmx_t) > 4)
- thr |= thr << (sizeof(cmmx_t)*4);
- do {
- cmmx_t e = *(cmmx_t*)a;
- cmmx_t ne = *(cmmx_t*)(a+2*ss);
- cmmx_t o = *(cmmx_t*)(a+bos);
- cmmx_t oo = *(cmmx_t*)(a+cos);
- cmmx_t maxe = pmaxub(e, ne);
- cmmx_t avge = pavgb(e, ne);
- cmmx_t max_diff = maxe - avge + thr; /* 0<=max-avg<128, thr<128 */
- cmmx_t diffo = pdiffub(avge, o);
- cmmx_t diffoo = pdiffub(avge, oo);
- cmmx_t diffcmp = pcmpgtub(diffo, diffoo);
- cmmx_t bo = ((oo ^ o) & diffcmp) ^ o;
- cmmx_t diffbo = ((diffoo ^ diffo) & diffcmp) ^ diffo;
- cmmx_t above_thr = ~pcmpgtub(max_diff, diffbo);
- cmmx_t bo_or_avg = ((avge ^ bo) & above_thr) ^ bo;
- dint_count += above_thr & ONE_BYTES;
- *(cmmx_t*)(dst) = e;
- *(cmmx_t*)(dst+ds) = bo_or_avg;
- a += sizeof(cmmx_t);
- dst += sizeof(cmmx_t);
- } while (--len);
- return psumbw(dint_count);
-}
-
-static int
-dint_copy_plane(unsigned char *d, unsigned char *a, unsigned char *b,
- unsigned char *c, unsigned long w, unsigned long h,
- unsigned long ds, unsigned long ss, unsigned long threshold,
- long field, long mmx2)
-{
- unsigned long ret = 0;
- long bos = b - a;
- long cos = c - a;
- if (field) {
- fast_memcpy(d, b, w);
- h--;
- d += ds;
- a += ss;
- }
- bos += ss;
- cos += ss;
- while (h > 2) {
- if (threshold >= 128) {
- fast_memcpy(d, a, w);
- fast_memcpy(d+ds, a+bos, w);
- } else if (mmx2 == 1) {
- ret += dint_copy_line_mmx2(d, a, bos, cos, ds, ss, w, threshold);
- } else
- ret += dint_copy_line(d, a, bos, cos, ds, ss, w, threshold);
- h -= 2;
- d += 2*ds;
- a += 2*ss;
- }
- fast_memcpy(d, a, w);
- if (h == 2)
- fast_memcpy(d+ds, a+bos, w);
- return ret;
-}
-
-static void
-copy_merge_fields(struct vf_priv_s *p, mp_image_t *dmpi,
- unsigned char **old, unsigned char **new, unsigned long show)
-{
- unsigned long threshold = 256;
- unsigned long field = p->swapped;
- unsigned long dint_pixels = 0;
- unsigned char **other = old;
- if (show >= 12 || !(show & 3))
- show >>= 2, other = new, new = old;
- if (show <= 2) { /* Single field: de-interlace */
- threshold = p->dint_thres;
- field ^= show & 1;
- old = new;
- } else if (show == 3)
- old = new;
- else
- field ^= 1;
- dint_pixels +=dint_copy_plane(dmpi->planes[0], old[0], new[0],
- other[0], p->w, p->h, dmpi->stride[0],
- p->stride, threshold, field, p->mmx2);
- if (dmpi->flags & MP_IMGFLAG_PLANAR) {
- if (p->luma_only)
- old = new, other = new;
- else
- threshold = threshold/2 + 1;
- field ^= p->chroma_swapped;
- dint_copy_plane(dmpi->planes[1], old[1], new[1],
- other[1], p->cw, p->ch, dmpi->stride[1],
- p->chroma_stride, threshold, field, p->mmx2);
- dint_copy_plane(dmpi->planes[2], old[2], new[2],
- other[2], p->cw, p->ch, dmpi->stride[2],
- p->chroma_stride, threshold, field, p->mmx2);
- }
- if (dint_pixels > 0 && p->verbose)
- ff_mp_msg(MSGT_VFILTER,MSGL_INFO,"Deinterlaced %lu pixels\n",dint_pixels);
-}
-
-static void diff_planes(struct vf_priv_s *p, struct frame_stats *s,
- unsigned char *of, unsigned char *nf,
- int w, int h, int os, int ns, int swapped)
-{
- int i, y;
- int align = -(long)nf & 7;
- of += align;
- nf += align;
- w -= align;
- if (swapped)
- of -= os, nf -= ns;
- i = (h*3 >> 7) & ~1;
- of += i*os + 8;
- nf += i*ns + 8;
- h -= i;
- w -= 16;
-
- memset(s, 0, sizeof(*s));
-
- for (y = (h-8) >> 3; y; y--) {
- if (p->mmx2 == 1) {
- for (i = 0; i < w; i += 8)
- block_metrics_mmx2(of+i, nf+i, os, ns, 4, p, s);
- } else if (p->mmx2 == 2) {
- for (i = 0; i < w; i += 8)
- block_metrics_3dnow(of+i, nf+i, os, ns, 4, p, s);
- } else if (p->fast > 3) {
- for (i = 0; i < w; i += 8)
- block_metrics_faster_c(of+i, nf+i, os, ns, 4, p, s);
- } else if (p->fast > 1) {
- for (i = 0; i < w; i += 8)
- block_metrics_fast_c(of+i, nf+i, os, ns, 4, p, s);
- } else {
- for (i = 0; i < w; i += 8)
- block_metrics_c(of+i, nf+i, os, ns, 4, p, s);
- }
- of += 8*os;
- nf += 8*ns;
- }
-}
-
-#define METRICS(X) (X).even, (X).odd, (X).noise, (X).temp
-
-static void diff_fields(struct vf_priv_s *p, struct frame_stats *s,
- unsigned char **old, unsigned char **new)
-{
- diff_planes(p, s, old[0], new[0], p->w, p->h,
- p->stride, p->stride, p->swapped);
- s->sad.even = (s->sad.even * 16ul) / s->num_blocks;
- s->sad.odd = (s->sad.odd * 16ul) / s->num_blocks;
- s->sad.noise = (s->sad.noise * 16ul) / s->num_blocks;
- s->sad.temp = (s->sad.temp * 16ul) / s->num_blocks;
- if (p->verbose)
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "%lu%c M:%d/%d/%d/%d - %d, "
- "t:%d/%d/%d/%d, l:%d/%d/%d/%d, h:%d/%d/%d/%d, bg:%d/%d/%d/%d, "
- "2x:%d/%d/%d/%d, sad:%d/%d/%d/%d, lil:%d, hil:%d, ios:%.1f\n",
- p->inframes, p->chflag, METRICS(s->max), s->num_blocks,
- METRICS(s->tiny), METRICS(s->low), METRICS(s->high),
- METRICS(s->bigger), METRICS(s->twox), METRICS(s->sad),
- s->interlaced_low, s->interlaced_high,
- p->iosync / (double) p->in_inc);
-}
-
-static const char *parse_args(struct vf_priv_s *p, const char *args)
-{
- args--;
- while (args && *++args &&
- (sscanf(args, "io=%lu:%lu", &p->out_dec, &p->in_inc) == 2 ||
- sscanf(args, "diff_thres=%hu", &p->thres.even ) == 1 ||
- sscanf(args, "comb_thres=%hu", &p->thres.noise) == 1 ||
- sscanf(args, "sad_thres=%lu", &p->sad_thres ) == 1 ||
- sscanf(args, "dint_thres=%lu", &p->dint_thres ) == 1 ||
- sscanf(args, "fast=%u", &p->fast ) == 1 ||
- sscanf(args, "mmx2=%lu", &p->mmx2 ) == 1 ||
- sscanf(args, "luma_only=%u", &p->luma_only ) == 1 ||
- sscanf(args, "verbose=%u", &p->verbose ) == 1 ||
- sscanf(args, "crop=%lu:%lu:%lu:%lu", &p->w,
- &p->h, &p->crop_x, &p->crop_y) == 4))
- args = strchr(args, '/');
- return args;
-}
-
-static unsigned long gcd(unsigned long x, unsigned long y)
-{
- unsigned long t;
- if (x > y)
- t = x, x = y, y = t;
-
- while (x) {
- t = y % x;
- y = x;
- x = t;
- }
- return y;
-}
-
-static void init(struct vf_priv_s *p, mp_image_t *mpi)
-{
- unsigned long i;
- unsigned long plane_size, chroma_plane_size;
- unsigned char *plane;
- unsigned long cos, los;
- p->crop_cx = p->crop_x >> mpi->chroma_x_shift;
- p->crop_cy = p->crop_y >> mpi->chroma_y_shift;
- if (mpi->flags & MP_IMGFLAG_ACCEPT_STRIDE) {
- p->stride = (mpi->w + 15) & ~15;
- p->chroma_stride = p->stride >> mpi->chroma_x_shift;
- } else {
- p->stride = mpi->width;
- p->chroma_stride = mpi->chroma_width;
- }
- p->cw = p->w >> mpi->chroma_x_shift;
- p->ch = p->h >> mpi->chroma_y_shift;
- p->nplanes = 1;
- p->static_idx = 0;
- p->temp_idx = 0;
- p->old_planes = p->planes[0];
- plane_size = mpi->h * p->stride;
- chroma_plane_size = mpi->flags & MP_IMGFLAG_PLANAR ?
- mpi->chroma_height * p->chroma_stride : 0;
- p->memory_allocated =
- malloc(NUM_STORED * (plane_size+2*chroma_plane_size) +
- 8*p->chroma_stride + 4096);
- /* align to page boundary */
- plane = p->memory_allocated + (-(long)p->memory_allocated & 4095);
- memset(plane, 0, NUM_STORED * plane_size);
- los = p->crop_x + p->crop_y * p->stride;
- cos = p->crop_cx + p->crop_cy * p->chroma_stride;
- for (i = 0; i != NUM_STORED; i++, plane += plane_size) {
- p->planes[i][0] = plane;
- p->planes[NUM_STORED + i][0] = plane + los;
- }
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- p->nplanes = 3;
- memset(plane, 0x80, NUM_STORED * 2 * chroma_plane_size);
- for (i = 0; i != NUM_STORED; i++) {
- p->planes[i][1] = plane;
- p->planes[NUM_STORED + i][1] = plane + cos;
- plane += chroma_plane_size;
- p->planes[i][2] = plane;
- p->planes[NUM_STORED + i][2] = plane + cos;
- plane += chroma_plane_size;
- }
- }
- p->out_dec <<= 2;
- i = gcd(p->in_inc, p->out_dec);
- p->in_inc /= i;
- p->out_dec /= i;
- p->iosync = 0;
- p->num_fields = 3;
-}
-
-static inline double get_time(void)
-{
- struct timeval tv;
- gettimeofday(&tv, 0);
- return tv.tv_sec + tv.tv_usec * 1e-6;
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi)
-{
- struct vf_priv_s *p = vf->priv;
- static unsigned char **planes, planes_idx;
-
- if (mpi->type == MP_IMGTYPE_STATIC) return;
-
- if (!p->planes[0][0]) init(p, mpi);
-
- if (mpi->type == MP_IMGTYPE_TEMP ||
- (mpi->type == MP_IMGTYPE_IPB && !(mpi->flags & MP_IMGFLAG_READABLE)))
- planes_idx = NUM_STORED/2 + (++p->temp_idx % (NUM_STORED/2));
- else
- planes_idx = ++p->static_idx % (NUM_STORED/2);
- planes = p->planes[planes_idx];
- mpi->priv = p->planes[NUM_STORED + planes_idx];
- if (mpi->priv == p->old_planes) {
- unsigned char **old_planes =
- p->planes[NUM_STORED + 2 + (++p->temp_idx & 1)];
- my_memcpy_pic(old_planes[0], p->old_planes[0],
- p->w, p->h, p->stride, p->stride);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(old_planes[1], p->old_planes[1],
- p->cw, p->ch, p->chroma_stride, p->chroma_stride);
- my_memcpy_pic(old_planes[2], p->old_planes[2],
- p->cw, p->ch, p->chroma_stride, p->chroma_stride);
- }
- p->old_planes = old_planes;
- p->num_copies++;
- }
- mpi->planes[0] = planes[0];
- mpi->stride[0] = p->stride;
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- mpi->planes[1] = planes[1];
- mpi->planes[2] = planes[2];
- mpi->stride[1] = mpi->stride[2] = p->chroma_stride;
- }
- mpi->width = p->stride;
-
- mpi->flags |= MP_IMGFLAG_DIRECT;
- mpi->flags &= ~MP_IMGFLAG_DRAW_CALLBACK;
-}
-
-static inline long
-cmpe(unsigned long x, unsigned long y, unsigned long err, unsigned long e)
-{
- long diff = x-y;
- long unit = ((x+y+err) >> e);
- long ret = (diff > unit) - (diff < -unit);
- unit >>= 1;
- return ret + (diff > unit) - (diff < -unit);
-}
-
-static unsigned long
-find_breaks(struct vf_priv_s *p, struct frame_stats *s)
-{
- struct frame_stats *ps = &p->stats[(p->inframes-1) & 1];
- long notfilm = 5*p->in_inc - p->out_dec;
- unsigned long n = s->num_blocks >> 8;
- unsigned long sad_comb_cmp = cmpe(s->sad.temp, s->sad.noise, 512, 1);
- unsigned long ret = 8;
-
- if (cmpe(s->sad.temp, s->sad.even, 512, 1) > 0)
- ff_mp_msg(MSGT_VFILTER, MSGL_WARN,
- "@@@@@@@@ Bottom-first field??? @@@@@@@@\n");
- if (s->sad.temp > 1000 && s->sad.noise > 1000)
- return 3;
- if (s->interlaced_high >= 2*n && s->sad.temp > 256 && s->sad.noise > 256)
- return 3;
- if (s->high.noise > s->num_blocks/4 && s->sad.noise > 10000 &&
- s->sad.noise > 2*s->sad.even && s->sad.noise > 2*ps->sad.odd) {
- // Mid-frame scene change
- if (s->tiny.temp + s->interlaced_low < n ||
- s->low.temp + s->interlaced_high < n/4 ||
- s->high.temp + s->interlaced_high < n/8 ||
- s->sad.temp < 160)
- return 1;
- return 3;
- }
- if (s->high.temp > s->num_blocks/4 && s->sad.temp > 10000 &&
- s->sad.temp > 2*ps->sad.odd && s->sad.temp > 2*ps->sad.even) {
- // Start frame scene change
- if (s->tiny.noise + s->interlaced_low < n ||
- s->low.noise + s->interlaced_high < n/4 ||
- s->high.noise + s->interlaced_high < n/8 ||
- s->sad.noise < 160)
- return 2;
- return 3;
- }
- if (sad_comb_cmp == 2)
- return 2;
- if (sad_comb_cmp == -2)
- return 1;
-
- if (s->tiny.odd > 3*MAX(n,s->tiny.even) + s->interlaced_low)
- return 1;
- if (s->tiny.even > 3*MAX(n,s->tiny.odd)+s->interlaced_low &&
- (!sad_comb_cmp || (s->low.noise <= n/4 && s->low.temp <= n/4)))
- return 4;
-
- if (s->sad.noise < 64 && s->sad.temp < 64 &&
- s->low.noise <= n/2 && s->high.noise <= n/4 &&
- s->low.temp <= n/2 && s->high.temp <= n/4)
- goto still;
-
- if (s->tiny.temp > 3*MAX(n,s->tiny.noise) + s->interlaced_low)
- return 2;
- if (s->tiny.noise > 3*MAX(n,s->tiny.temp) + s->interlaced_low)
- return 1;
-
- if (s->low.odd > 3*MAX(n/4,s->low.even) + s->interlaced_high)
- return 1;
- if (s->low.even > 3*MAX(n/4,s->low.odd)+s->interlaced_high &&
- s->sad.even > 2*s->sad.odd &&
- (!sad_comb_cmp || (s->low.noise <= n/4 && s->low.temp <= n/4)))
- return 4;
-
- if (s->low.temp > 3*MAX(n/4,s->low.noise) + s->interlaced_high)
- return 2;
- if (s->low.noise > 3*MAX(n/4,s->low.temp) + s->interlaced_high)
- return 1;
-
- if (sad_comb_cmp == 1 && s->sad.noise < 64)
- return 2;
- if (sad_comb_cmp == -1 && s->sad.temp < 64)
- return 1;
-
- if (s->tiny.odd <= n || (s->tiny.noise <= n/2 && s->tiny.temp <= n/2)) {
- if (s->interlaced_low <= n) {
- if (p->num_fields == 1)
- goto still;
- if (s->tiny.even <= n || ps->tiny.noise <= n/2)
- /* Still frame */
- goto still;
- if (s->bigger.even >= 2*MAX(n,s->bigger.odd) + s->interlaced_low)
- return 4;
- if (s->low.even >= 2*n + s->interlaced_low)
- return 4;
- goto still;
- }
- }
- if (s->low.odd <= n/4) {
- if (s->interlaced_high <= n/4) {
- if (p->num_fields == 1)
- goto still;
- if (s->low.even <= n/4)
- /* Still frame */
- goto still;
- if (s->bigger.even >= 2*MAX(n/4,s->bigger.odd)+s->interlaced_high)
- return 4;
- if (s->low.even >= n/2 + s->interlaced_high)
- return 4;
- goto still;
- }
- }
- if (s->bigger.temp > 2*MAX(n,s->bigger.noise) + s->interlaced_low)
- return 2;
- if (s->bigger.noise > 2*MAX(n,s->bigger.temp) + s->interlaced_low)
- return 1;
- if (s->bigger.temp > 2*MAX(n,s->bigger.noise) + s->interlaced_high)
- return 2;
- if (s->bigger.noise > 2*MAX(n,s->bigger.temp) + s->interlaced_high)
- return 1;
- if (s->twox.temp > 2*MAX(n,s->twox.noise) + s->interlaced_high)
- return 2;
- if (s->twox.noise > 2*MAX(n,s->twox.temp) + s->interlaced_high)
- return 1;
- if (s->bigger.even > 2*MAX(n,s->bigger.odd) + s->interlaced_low &&
- s->bigger.temp < n && s->bigger.noise < n)
- return 4;
- if (s->interlaced_low > MIN(2*n, s->tiny.odd))
- return 3;
- ret = 8 + (1 << (s->sad.temp > s->sad.noise));
- still:
- if (p->num_fields == 1 && p->prev_fields == 3 && notfilm >= 0 &&
- (s->tiny.temp <= s->tiny.noise || s->sad.temp < s->sad.noise+16))
- return 1;
- if (p->notout < p->num_fields && p->iosync > 2*p->in_inc && notfilm < 0)
- notfilm = 0;
- if (p->num_fields < 2 ||
- (p->num_fields == 2 && p->prev_fields == 2 && notfilm < 0))
- return ret;
- if (!notfilm && (p->prev_fields&~1) == 2) {
- if (p->prev_fields + p->num_fields == 5) {
- if (s->tiny.noise <= s->tiny.temp ||
- s->low.noise == 0 || s->low.noise < s->low.temp ||
- s->sad.noise < s->sad.temp+16)
- return 2;
- }
- if (p->prev_fields + p->num_fields == 4) {
- if (s->tiny.temp <= s->tiny.noise ||
- s->low.temp == 0 || s->low.temp < s->low.noise ||
- s->sad.temp < s->sad.noise+16)
- return 1;
- }
- }
- if (p->num_fields > 2 &&
- ps->sad.noise > s->sad.noise && ps->sad.noise > s->sad.temp)
- return 4;
- return 2 >> (s->sad.noise > s->sad.temp);
-}
-
-#define ITOC(X) (!(X) ? ' ' : (X) + ((X)>9 ? 'a'-10 : '0'))
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- mp_image_t *dmpi;
- struct vf_priv_s *p = vf->priv;
- unsigned char **planes, **old_planes;
- struct frame_stats *s = &p->stats[p->inframes & 1];
- struct frame_stats *ps = &p->stats[(p->inframes-1) & 1];
- int swapped = 0;
- const int flags = mpi->fields;
- int breaks, prev;
- int show_fields = 0;
- int dropped_fields = 0;
- double start_time, diff_time;
- char prev_chflag = p->chflag;
- int keep_rate;
-
- if (!p->planes[0][0]) init(p, mpi);
-
- old_planes = p->old_planes;
-
- if ((mpi->flags & MP_IMGFLAG_DIRECT) && mpi->priv) {
- planes = mpi->priv;
- mpi->priv = 0;
- } else {
- planes = p->planes[2 + (++p->temp_idx & 1)];
- my_memcpy_pic(planes[0],
- mpi->planes[0] + p->crop_x + p->crop_y * mpi->stride[0],
- p->w, p->h, p->stride, mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(planes[1],
- mpi->planes[1] + p->crop_cx + p->crop_cy * mpi->stride[1],
- p->cw, p->ch, p->chroma_stride, mpi->stride[1]);
- my_memcpy_pic(planes[2],
- mpi->planes[2] + p->crop_cx + p->crop_cy * mpi->stride[2],
- p->cw, p->ch, p->chroma_stride, mpi->stride[2]);
- p->num_copies++;
- }
- }
-
- p->old_planes = planes;
- p->chflag = ';';
- if (flags & MP_IMGFIELD_ORDERED) {
- swapped = !(flags & MP_IMGFIELD_TOP_FIRST);
- p->chflag = (flags & MP_IMGFIELD_REPEAT_FIRST ? '|' :
- flags & MP_IMGFIELD_TOP_FIRST ? ':' : '.');
- }
- p->swapped = swapped;
-
- start_time = get_time();
- if (p->chflag == '|') {
- *s = ppzs;
- p->iosync += p->in_inc;
- } else if ((p->fast & 1) && prev_chflag == '|')
- *s = pprs;
- else
- diff_fields(p, s, old_planes, planes);
- diff_time = get_time();
- p->diff_time += diff_time - start_time;
- breaks = p->inframes ? find_breaks(p, s) : 2;
- p->inframes++;
- keep_rate = 4*p->in_inc == p->out_dec;
-
- switch (breaks) {
- case 0:
- case 8:
- case 9:
- case 10:
- if (!keep_rate && p->notout < p->num_fields && p->iosync < 2*p->in_inc)
- break;
- if (p->notout < p->num_fields)
- dropped_fields = -2;
- case 4:
- if (keep_rate || p->iosync >= -2*p->in_inc)
- show_fields = (4<<p->num_fields)-1;
- break;
- case 3:
- if (keep_rate)
- show_fields = 2;
- else if (p->iosync > 0) {
- if (p->notout >= p->num_fields && p->iosync > 2*p->in_inc) {
- show_fields = 4; /* prev odd only */
- if (p->num_fields > 1)
- show_fields |= 8; /* + prev even */
- } else {
- show_fields = 2; /* even only */
- if (p->notout >= p->num_fields)
- dropped_fields += p->num_fields;
- }
- }
- break;
- case 2:
- if (p->iosync <= -3*p->in_inc) {
- if (p->notout >= p->num_fields)
- dropped_fields = p->num_fields;
- break;
- }
- if (p->num_fields == 1) {
- int prevbreak = ps->sad.noise >= 128;
- if (p->iosync < 4*p->in_inc) {
- show_fields = 3;
- dropped_fields = prevbreak;
- } else {
- show_fields = 4 | (!prevbreak << 3);
- if (p->notout < 1 + p->prev_fields)
- dropped_fields = -!prevbreak;
- }
- break;
- }
- default:
- if (keep_rate)
- show_fields = 3 << (breaks & 1);
- else if (p->notout >= p->num_fields &&
- p->iosync >= (breaks == 1 ? -p->in_inc :
- p->in_inc << (p->num_fields == 1))) {
- show_fields = (1 << (2 + p->num_fields)) - (1<<breaks);
- } else {
- if (p->notout >= p->num_fields)
- dropped_fields += p->num_fields + 2 - breaks;
- if (breaks == 1) {
- if (p->iosync >= 4*p->in_inc)
- show_fields = 6;
- } else if (p->iosync > -3*p->in_inc)
- show_fields = 3; /* odd+even */
- }
- break;
- }
-
- show_fields &= 15;
- prev = p->prev_fields;
- if (breaks < 8) {
- if (p->num_fields == 1)
- breaks &= ~4;
- if (breaks)
- p->num_breaks++;
- if (breaks == 3)
- p->prev_fields = p->num_fields = 1;
- else if (breaks) {
- p->prev_fields = p->num_fields + (breaks==1) - (breaks==4);
- p->num_fields = breaks - (breaks == 4) + (p->chflag == '|');
- } else
- p->num_fields += 2;
- } else
- p->num_fields += 2;
-
- p->iosync += 4 * p->in_inc;
- if (p->chflag == '|')
- p->iosync += p->in_inc;
-
- if (show_fields) {
- p->iosync -= p->out_dec;
- p->notout = !(show_fields & 1) + !(show_fields & 3);
- if (((show_fields & 3) == 3 &&
- (s->low.noise + s->interlaced_low < (s->num_blocks>>8) ||
- s->sad.noise < 160)) ||
- ((show_fields & 12) == 12 &&
- (ps->low.noise + ps->interlaced_low < (s->num_blocks>>8) ||
- ps->sad.noise < 160))) {
- p->export_count++;
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, MP_IMGTYPE_EXPORT,
- MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE,
- p->w, p->h);
- if ((show_fields & 3) != 3) planes = old_planes;
- dmpi->planes[0] = planes[0];
- dmpi->stride[0] = p->stride;
- dmpi->width = mpi->width;
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- dmpi->planes[1] = planes[1];
- dmpi->planes[2] = planes[2];
- dmpi->stride[1] = p->chroma_stride;
- dmpi->stride[2] = p->chroma_stride;
- }
- } else {
- p->merge_count++;
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- p->w, p->h);
- copy_merge_fields(p, dmpi, old_planes, planes, show_fields);
- }
- p->outframes++;
- } else
- p->notout += 2;
-
- if (p->verbose)
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "%lu %lu: %x %c %c %lu%s%s%c%s\n",
- p->inframes, p->outframes,
- breaks, breaks<8 && breaks>0 ? (int) p->prev_fields+'0' : ' ',
- ITOC(show_fields),
- p->num_breaks, 5*p->in_inc == p->out_dec && breaks<8 &&
- breaks>0 && ((prev&~1)!=2 || prev+p->prev_fields!=5) ?
- " ######## bad telecine ########" : "",
- dropped_fields ? " ======== dropped ":"", ITOC(dropped_fields),
- !show_fields || (show_fields & (show_fields-1)) ?
- "" : " @@@@@@@@@@@@@@@@@");
-
- p->merge_time += get_time() - diff_time;
- return show_fields ? ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE) : 0;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- /* FIXME - support more formats */
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- case IMGFMT_411P:
- case IMGFMT_422P:
- case IMGFMT_444P:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- unsigned long cxm = 0;
- unsigned long cym = 0;
- struct vf_priv_s *p = vf->priv;
- // rounding:
- if(!IMGFMT_IS_RGB(outfmt) && !IMGFMT_IS_BGR(outfmt)){
- switch(outfmt){
- case IMGFMT_444P:
- case IMGFMT_Y800:
- case IMGFMT_Y8:
- break;
- case IMGFMT_YVU9:
- case IMGFMT_IF09:
- cym = 3;
- case IMGFMT_411P:
- cxm = 3;
- break;
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- cym = 1;
- default:
- cxm = 1;
- }
- }
- p->chroma_swapped = !!(p->crop_y & (cym+1));
- if (p->w) p->w += p->crop_x & cxm;
- if (p->h) p->h += p->crop_y & cym;
- p->crop_x &= ~cxm;
- p->crop_y &= ~cym;
- if (!p->w || p->w > width ) p->w = width;
- if (!p->h || p->h > height) p->h = height;
- if (p->crop_x + p->w > width ) p->crop_x = 0;
- if (p->crop_y + p->h > height) p->crop_y = 0;
-
- if(!opt_screen_size_x && !opt_screen_size_y){
- d_width = d_width * p->w/width;
- d_height = d_height * p->h/height;
- }
- return ff_vf_next_config(vf, p->w, p->h, d_width, d_height, flags, outfmt);
-}
-
-static void uninit(struct vf_instance *vf)
-{
- struct vf_priv_s *p = vf->priv;
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "diff_time: %.3f, merge_time: %.3f, "
- "export: %lu, merge: %lu, copy: %lu\n", p->diff_time, p->merge_time,
- p->export_count, p->merge_count, p->num_copies);
- free(p->memory_allocated);
- free(p);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- struct vf_priv_s *p;
- vf->get_image = get_image;
- vf->put_image = put_image;
- vf->config = config;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
- vf->priv = p = calloc(1, sizeof(struct vf_priv_s));
- p->out_dec = 5;
- p->in_inc = 4;
- p->thres.noise = 128;
- p->thres.even = 128;
- p->sad_thres = 64;
- p->dint_thres = 4;
- p->luma_only = 0;
- p->fast = 3;
- p->mmx2 = ff_gCpuCaps.hasMMX2 ? 1 : ff_gCpuCaps.has3DNow ? 2 : 0;
- if (args) {
- const char *args_remain = parse_args(p, args);
- if (args_remain) {
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL,
- "filmdint: unknown suboption: %s\n", args_remain);
- return 0;
- }
- if (p->out_dec < p->in_inc) {
- ff_mp_msg(MSGT_VFILTER, MSGL_FATAL,
- "filmdint: increasing the frame rate is not supported\n");
- return 0;
- }
- }
- if (p->mmx2 > 2)
- p->mmx2 = 0;
-#if !HAVE_MMX
- p->mmx2 = 0;
-#endif
-#if !HAVE_AMD3DNOW
- p->mmx2 &= 1;
-#endif
- p->thres.odd = p->thres.even;
- p->thres.temp = p->thres.noise;
- p->diff_time = 0;
- p->merge_time = 0;
- return 1;
-}
-
-const vf_info_t ff_vf_info_filmdint = {
- "Advanced inverse telecine filer",
- "filmdint",
- "Zoltan Hidvegi",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_ivtc.c b/ffmpeg/libavfilter/libmpcodecs/vf_ivtc.c
deleted file mode 100644
index 8a47a57..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_ivtc.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "config.h"
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "libavutil/x86/asm.h"
-#include "libvo/fastmemcpy.h"
-
-
-struct metrics {
- /* difference: total, even lines, odd lines */
- int d, e, o;
- /* noise: temporal, spacial (current), spacial (past) */
- int t, s, p;
-};
-
-struct frameinfo {
- /* peak, relative, mean */
- struct metrics p, r, m;
-};
-
-struct vf_priv_s {
- struct frameinfo fi[2];
- mp_image_t *dmpi;
- int first;
- int drop, lastdrop, dropnext;
- int inframes, outframes;
-};
-
-enum {
- F_DROP,
- F_MERGE,
- F_NEXT,
- F_SHOW
-};
-
-#if HAVE_MMX && HAVE_EBX_AVAILABLE
-static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char *new, int os, int ns)
-{
- int i;
- short out[24]; // output buffer for the partial metrics from the mmx code
-
- __asm__ (
- "movl $4, %%ecx \n\t"
- "pxor %%mm4, %%mm4 \n\t" // 4 even difference sums
- "pxor %%mm5, %%mm5 \n\t" // 4 odd difference sums
- "pxor %%mm7, %%mm7 \n\t" // all zeros
-
- ASMALIGN(4)
- "1: \n\t"
-
- // Even difference
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm2 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "movq (%%"REG_D"), %%mm1 \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "psubusb %%mm1, %%mm2 \n\t"
- "psubusb %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm0, %%mm4 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm2, %%mm4 \n\t"
- "paddw %%mm3, %%mm4 \n\t"
-
- // Odd difference
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S"), %%mm2 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "movq (%%"REG_D"), %%mm1 \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "psubusb %%mm1, %%mm2 \n\t"
- "psubusb %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm0, %%mm5 \n\t"
- "paddw %%mm1, %%mm5 \n\t"
- "paddw %%mm2, %%mm5 \n\t"
- "paddw %%mm3, %%mm5 \n\t"
-
- "decl %%ecx \n\t"
- "jnz 1b \n\t"
- "movq %%mm4, (%%"REG_d") \n\t"
- "movq %%mm5, 8(%%"REG_d") \n\t"
- :
- : "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
- : "memory"
- );
- m->e = out[0]+out[1]+out[2]+out[3];
- m->o = out[4]+out[5]+out[6]+out[7];
- m->d = m->e + m->o;
-
- __asm__ (
- // First loop to measure first four columns
- "movl $4, %%ecx \n\t"
- "pxor %%mm4, %%mm4 \n\t" // Past spacial noise
- "pxor %%mm5, %%mm5 \n\t" // Temporal noise
- "pxor %%mm6, %%mm6 \n\t" // Current spacial noise
-
- ASMALIGN(4)
- "2: \n\t"
-
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "movq (%%"REG_D"), %%mm2 \n\t"
- "movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm1, %%mm5 \n\t"
- "paddw %%mm3, %%mm6 \n\t"
- "psubw %%mm0, %%mm4 \n\t"
- "psubw %%mm2, %%mm5 \n\t"
- "psubw %%mm2, %%mm6 \n\t"
-
- "decl %%ecx \n\t"
- "jnz 2b \n\t"
-
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "movq %%mm0, %%mm3 \n\t"
- "pcmpgtw %%mm4, %%mm1 \n\t"
- "pcmpgtw %%mm5, %%mm2 \n\t"
- "pcmpgtw %%mm6, %%mm3 \n\t"
- "pxor %%mm1, %%mm4 \n\t"
- "pxor %%mm2, %%mm5 \n\t"
- "pxor %%mm3, %%mm6 \n\t"
- "psubw %%mm1, %%mm4 \n\t"
- "psubw %%mm2, %%mm5 \n\t"
- "psubw %%mm3, %%mm6 \n\t"
- "movq %%mm4, (%%"REG_d") \n\t"
- "movq %%mm5, 16(%%"REG_d") \n\t"
- "movq %%mm6, 32(%%"REG_d") \n\t"
-
- "mov %%"REG_a", %%"REG_c" \n\t"
- "shl $3, %%"REG_c" \n\t"
- "sub %%"REG_c", %%"REG_S" \n\t"
- "mov %%"REG_b", %%"REG_c" \n\t"
- "shl $3, %%"REG_c" \n\t"
- "sub %%"REG_c", %%"REG_D" \n\t"
-
- // Second loop for the last four columns
- "movl $4, %%ecx \n\t"
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm5, %%mm5 \n\t"
- "pxor %%mm6, %%mm6 \n\t"
-
- ASMALIGN(4)
- "3: \n\t"
-
- "movq (%%"REG_S"), %%mm0 \n\t"
- "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "add %%"REG_a", %%"REG_S" \n\t"
- "movq (%%"REG_D"), %%mm2 \n\t"
- "movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "add %%"REG_b", %%"REG_D" \n\t"
- "punpckhbw %%mm7, %%mm0 \n\t"
- "punpckhbw %%mm7, %%mm1 \n\t"
- "punpckhbw %%mm7, %%mm2 \n\t"
- "punpckhbw %%mm7, %%mm3 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm1, %%mm5 \n\t"
- "paddw %%mm3, %%mm6 \n\t"
- "psubw %%mm0, %%mm4 \n\t"
- "psubw %%mm2, %%mm5 \n\t"
- "psubw %%mm2, %%mm6 \n\t"
-
- "decl %%ecx \n\t"
- "jnz 3b \n\t"
-
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "movq %%mm0, %%mm3 \n\t"
- "pcmpgtw %%mm4, %%mm1 \n\t"
- "pcmpgtw %%mm5, %%mm2 \n\t"
- "pcmpgtw %%mm6, %%mm3 \n\t"
- "pxor %%mm1, %%mm4 \n\t"
- "pxor %%mm2, %%mm5 \n\t"
- "pxor %%mm3, %%mm6 \n\t"
- "psubw %%mm1, %%mm4 \n\t"
- "psubw %%mm2, %%mm5 \n\t"
- "psubw %%mm3, %%mm6 \n\t"
- "movq %%mm4, 8(%%"REG_d") \n\t"
- "movq %%mm5, 24(%%"REG_d") \n\t"
- "movq %%mm6, 40(%%"REG_d") \n\t"
-
- "emms \n\t"
- :
- : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
- : "memory"
- );
- m->p = m->t = m->s = 0;
- for (i=0; i<8; i++) {
- m->p += out[i];
- m->t += out[8+i];
- m->s += out[16+i];
- }
- //printf("e=%d o=%d d=%d p=%d t=%d s=%d\n", m->e, m->o, m->d, m->p, m->t, m->s);
-}
-#endif
-
-//#define MAG(a) ((a)*(a))
-//#define MAG(a) (abs(a))
-#define MAG(a) (((a)^((a)>>31))-((a)>>31))
-
-//#define LOWPASS(s) (((s)[-2] + 4*(s)[-1] + 6*(s)[0] + 4*(s)[1] + (s)[2])>>4)
-//#define LOWPASS(s) (((s)[-1] + 2*(s)[0] + (s)[1])>>2)
-#define LOWPASS(s) ((s)[0])
-
-
-static void block_diffs_C(struct metrics *m, unsigned char *old, unsigned char *new, int os, int ns)
-{
- int x, y, e=0, o=0, s=0, p=0, t=0;
- unsigned char *oldp, *newp;
- m->s = m->p = m->t = 0;
- for (x = 8; x; x--) {
- oldp = old++;
- newp = new++;
- s = p = t = 0;
- for (y = 4; y; y--) {
- e += MAG(newp[0]-oldp[0]);
- o += MAG(newp[ns]-oldp[os]);
- s += newp[ns]-newp[0];
- p += oldp[os]-oldp[0];
- t += oldp[os]-newp[0];
- oldp += os<<1;
- newp += ns<<1;
- }
- m->s += MAG(s);
- m->p += MAG(p);
- m->t += MAG(t);
- }
- m->e = e;
- m->o = o;
- m->d = e+o;
-}
-
-static void (*block_diffs)(struct metrics *, unsigned char *, unsigned char *, int, int);
-
-#define MAXUP(a,b) ((a) = ((a)>(b)) ? (a) : (b))
-
-static void diff_planes(struct frameinfo *fi,
- unsigned char *old, unsigned char *new, int w, int h, int os, int ns)
-{
- int x, y;
- struct metrics l;
- struct metrics *peak=&fi->p, *rel=&fi->r, *mean=&fi->m;
- memset(peak, 0, sizeof(struct metrics));
- memset(rel, 0, sizeof(struct metrics));
- memset(mean, 0, sizeof(struct metrics));
- for (y = 0; y < h-7; y += 8) {
- for (x = 8; x < w-8-7; x += 8) {
- block_diffs(&l, old+x+y*os, new+x+y*ns, os, ns);
- mean->d += l.d;
- mean->e += l.e;
- mean->o += l.o;
- mean->s += l.s;
- mean->p += l.p;
- mean->t += l.t;
- MAXUP(peak->d, l.d);
- MAXUP(peak->e, l.e);
- MAXUP(peak->o, l.o);
- MAXUP(peak->s, l.s);
- MAXUP(peak->p, l.p);
- MAXUP(peak->t, l.t);
- MAXUP(rel->e, l.e-l.o);
- MAXUP(rel->o, l.o-l.e);
- MAXUP(rel->s, l.s-l.t);
- MAXUP(rel->p, l.p-l.t);
- MAXUP(rel->t, l.t-l.p);
- MAXUP(rel->d, l.t-l.s); /* hack */
- }
- }
- x = (w/8-2)*(h/8);
- mean->d /= x;
- mean->e /= x;
- mean->o /= x;
- mean->s /= x;
- mean->p /= x;
- mean->t /= x;
-}
-
-static void diff_fields(struct frameinfo *fi, mp_image_t *old, mp_image_t *new)
-{
- diff_planes(fi, old->planes[0], new->planes[0],
- new->w, new->h, old->stride[0], new->stride[0]);
-}
-
-static void stats(struct frameinfo *f)
-{
- ff_mp_msg(MSGT_VFILTER, MSGL_V, " pd=%d re=%d ro=%d rp=%d rt=%d rs=%d rd=%d pp=%d pt=%d ps=%d\r",
- f->p.d, f->r.e, f->r.o, f->r.p, f->r.t, f->r.s, f->r.d, f->p.p, f->p.t, f->p.s);
-}
-
-static int foo(struct vf_priv_s *p, mp_image_t *new, mp_image_t *cur)
-{
- struct frameinfo *f = p->fi;
-
- f[0] = f[1];
- diff_fields(&f[1], cur, new);
- stats(&f[1]);
-
- // Immediately drop this frame if it's already been used.
- if (p->dropnext) {
- p->dropnext = 0;
- return F_DROP;
- }
-
- // Sometimes a pulldown frame comes all by itself, so both
- // its top and bottom field are duplicates from the adjacent
- // two frames. We can just drop such a frame, but we
- // immediately show the next frame instead to keep the frame
- // drops evenly spaced during normal 3:2 pulldown sequences.
- if ((3*f[1].r.o < f[1].r.e) && (f[1].r.s < f[1].r.d)) {
- p->dropnext = 1;
- return F_NEXT;
- }
-
- // If none of these conditions hold, we will consider the frame
- // progressive and just show it as-is.
- if (!( (3*f[0].r.e < f[0].r.o) ||
- ((2*f[0].r.d < f[0].r.s) && (f[0].r.s > 1200)) ||
- ((2*f[1].r.t < f[1].r.p) && (f[1].r.p > 1200)) ))
- return F_SHOW;
-
- // Otherwise, we have to decide whether to merge or drop.
- // If the noise metric only increases minimally, we're off
- // to a good start...
- if (((2*f[1].r.t < 3*f[1].r.p) && (f[1].r.t < 3600)) ||
- (f[1].r.t < 900) || (f[1].r.d < 900)) {
- // ...and if noise decreases or the duplicate even field
- // is detected, we go ahead with the merge.
- if ((3*f[0].r.e < f[0].r.o) || (2*f[1].r.t < f[1].r.p)) {
- p->dropnext = 1;
- return F_MERGE;
- }
- }
- return F_DROP;
-}
-
-
-
-static void copy_image(mp_image_t *dmpi, mp_image_t *mpi, int field)
-{
- switch (field) {
- case 0:
- my_memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- break;
- case 1:
- my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
- mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
- mpi->planes[1]+mpi->stride[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
- mpi->planes[2]+mpi->stride[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- break;
- case 2:
- memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h,
- dmpi->stride[0], mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[1], mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[2], mpi->stride[2]);
- }
- break;
- }
-}
-
-static int do_put_image(struct vf_instance *vf, mp_image_t *dmpi)
-{
- struct vf_priv_s *p = vf->priv;
- int dropflag=0;
-
- if (!p->dropnext) switch (p->drop) {
- case 0:
- dropflag = 0;
- break;
- case 1:
- dropflag = (++p->lastdrop >= 5);
- break;
- case 2:
- dropflag = (++p->lastdrop >= 5) && (4*p->inframes <= 5*p->outframes);
- break;
- }
-
- if (dropflag) {
- //ff_mp_msg(MSGT_VFILTER, MSGL_V, "drop! [%d/%d=%g]\n",
- // p->outframes, p->inframes, (float)p->outframes/p->inframes);
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "!");
- p->lastdrop = 0;
- return 0;
- }
-
- p->outframes++;
- return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- int ret=0;
- struct vf_priv_s *p = vf->priv;
-
- p->inframes++;
-
- if (p->first) { /* hack */
- p->first = 0;
- return 1;
- }
-
- if (!p->dmpi) p->dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE | MP_IMGFLAG_READABLE,
- mpi->width, mpi->height);
- /* FIXME -- not correct, off by one frame! */
- p->dmpi->qscale = mpi->qscale;
- p->dmpi->qstride = mpi->qstride;
- p->dmpi->qscale_type = mpi->qscale_type;
-
- switch (foo(p, mpi, p->dmpi)) {
- case F_DROP:
- copy_image(p->dmpi, mpi, 2);
- ret = 0;
- p->lastdrop = 0;
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "DROP\n");
- break;
- case F_MERGE:
- copy_image(p->dmpi, mpi, 0);
- ret = do_put_image(vf, p->dmpi);
- copy_image(p->dmpi, mpi, 1);
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "MERGE\n");
- p->dmpi = NULL;
- break;
- case F_NEXT:
- copy_image(p->dmpi, mpi, 2);
- ret = do_put_image(vf, p->dmpi);
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "NEXT\n");
- p->dmpi = NULL;
- break;
- case F_SHOW:
- ret = do_put_image(vf, p->dmpi);
- copy_image(p->dmpi, mpi, 2);
- ff_mp_msg(MSGT_VFILTER, MSGL_V, "OK\n");
- p->dmpi = NULL;
- break;
- }
- return ret;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static void uninit(struct vf_instance *vf)
-{
- free(vf->priv);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- struct vf_priv_s *p;
- vf->put_image = put_image;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
- vf->priv = p = calloc(1, sizeof(struct vf_priv_s));
- p->drop = 0;
- p->first = 1;
- if (args) sscanf(args, "%d", &p->drop);
- block_diffs = block_diffs_C;
-#if HAVE_MMX && HAVE_EBX_AVAILABLE
- if(ff_gCpuCaps.hasMMX) block_diffs = block_diffs_MMX;
-#endif
- return 1;
-}
-
-const vf_info_t ff_vf_info_ivtc = {
- "inverse telecine, take 2",
- "ivtc",
- "Rich Felker",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_mcdeint.c b/ffmpeg/libavfilter/libmpcodecs/vf_mcdeint.c
deleted file mode 100644
index b9ffaf2..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_mcdeint.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-
-/*
-Known Issues:
-* The motion estimation is somewhat at the mercy of the input, if the input
- frames are created purely based on spatial interpolation then for example
- a thin black line or another random and not interpolateable pattern
- will cause problems
- Note: completly ignoring the "unavailable" lines during motion estimation
- didnt look any better, so the most obvious solution would be to improve
- tfields or penalize problematic motion vectors ...
-
-* If non iterative ME is used then snow currently ignores the OBMC window
- and as a result sometimes creates artifacts
-
-* only past frames are used, we should ideally use future frames too, something
- like filtering the whole movie in forward and then backward direction seems
- like a interresting idea but the current filter framework is FAR from
- supporting such things
-
-* combining the motion compensated image with the input image also isnt
- as trivial as it seems, simple blindly taking even lines from one and
- odd ones from the other doesnt work at all as ME/MC sometimes simple
- has nothing in the previous frames which matches the current, the current
- algo has been found by trial and error and almost certainly can be
- improved ...
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <math.h>
-
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "libavutil/common.h"
-#include "libavutil/internal.h"
-#include "libavutil/intreadwrite.h"
-#include "libavcodec/avcodec.h"
-#include "libavcodec/dsputil.h"
-
-#undef fprintf
-#undef free
-#undef malloc
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "av_helpers.h"
-
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
-#define MAX(a,b) ((a) < (b) ? (b) : (a))
-#define ABS(a) ((a) > 0 ? (a) : (-(a)))
-
-//===========================================================================//
-
-struct vf_priv_s {
- int mode;
- int qp;
- int parity;
-#if 0
- int temp_stride[3];
- uint8_t *src[3];
- int16_t *temp[3];
-#endif
- int outbuf_size;
- uint8_t *outbuf;
- AVCodecContext *avctx_enc;
- AVFrame *frame;
- AVFrame *frame_dec;
-};
-
-static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){
- int x, y, i;
-
- for(i=0; i<3; i++){
- p->frame->data[i]= src[i];
- p->frame->linesize[i]= src_stride[i];
- }
-
- p->avctx_enc->me_cmp=
- p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/;
- p->frame->quality= p->qp*FF_QP2LAMBDA;
- avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame);
- p->frame_dec = p->avctx_enc->coded_frame;
-
- for(i=0; i<3; i++){
- int is_chroma= !!i;
- int w= width >>is_chroma;
- int h= height>>is_chroma;
- int fils= p->frame_dec->linesize[i];
- int srcs= src_stride[i];
-
- for(y=0; y<h; y++){
- if((y ^ p->parity) & 1){
- for(x=0; x<w; x++){
- if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this
- uint8_t *filp= &p->frame_dec->data[i][x + y*fils];
- uint8_t *srcp= &src[i][x + y*srcs];
- int diff0= filp[-fils] - srcp[-srcs];
- int diff1= filp[+fils] - srcp[+srcs];
- int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1])
- +ABS(srcp[-srcs ] - srcp[+srcs ])
- +ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
- int temp= filp[0];
-
-#define CHECK(j)\
- { int score= ABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])\
- + ABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])\
- + ABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)]);\
- if(score < spatial_score){\
- spatial_score= score;\
- diff0= filp[-fils+(j)] - srcp[-srcs+(j)];\
- diff1= filp[+fils-(j)] - srcp[+srcs-(j)];
-
- CHECK(-1) CHECK(-2) }} }}
- CHECK( 1) CHECK( 2) }} }}
-#if 0
- if((diff0 ^ diff1) > 0){
- int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0;
- temp-= mindiff;
- }
-#elif 1
- if(diff0 + diff1 > 0)
- temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2;
- else
- temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2;
-#else
- temp-= (diff0 + diff1)/2;
-#endif
-#if 1
- filp[0]=
- dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp;
-#else
- dst[i][x + y*dst_stride[i]]= filp[0];
- filp[0]= temp > 255U ? ~(temp>>31) : temp;
-#endif
- }else
- dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
- }
- }
- }
- for(y=0; y<h; y++){
- if(!((y ^ p->parity) & 1)){
- for(x=0; x<w; x++){
-#if 1
- p->frame_dec->data[i][x + y*fils]=
- dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs];
-#else
- dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
- p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs];
-#endif
- }
- }
- }
- }
- p->parity ^= 1;
-
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
- int i;
- AVCodec *enc= avcodec_find_encoder(AV_CODEC_ID_SNOW);
-
- for(i=0; i<3; i++){
- AVCodecContext *avctx_enc;
- AVDictionary *opts = NULL;
-#if 0
- int is_chroma= !!i;
- int w= ((width + 31) & (~31))>>is_chroma;
- int h= ((height + 31) & (~31))>>is_chroma;
-
- vf->priv->temp_stride[i]= w;
- vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
- vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
-#endif
- avctx_enc=
- vf->priv->avctx_enc= avcodec_alloc_context3(enc);
- avctx_enc->width = width;
- avctx_enc->height = height;
- avctx_enc->time_base= (AVRational){1,25}; // meaningless
- avctx_enc->gop_size = 300;
- avctx_enc->max_b_frames= 0;
- avctx_enc->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
- avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
- avctx_enc->global_quality= 1;
- av_dict_set(&opts, "memc_only", "1", 0);
- avctx_enc->me_cmp=
- avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
- avctx_enc->mb_cmp= FF_CMP_SSE;
-
- switch(vf->priv->mode){
- case 3:
- avctx_enc->refs= 3;
- case 2:
- avctx_enc->me_method= ME_ITER;
- case 1:
- avctx_enc->flags |= CODEC_FLAG_4MV;
- avctx_enc->dia_size=2;
-// avctx_enc->mb_decision = MB_DECISION_RD;
- case 0:
- avctx_enc->flags |= CODEC_FLAG_QPEL;
- }
-
- avcodec_open2(avctx_enc, enc, &opts);
- av_dict_free(&opts);
-
- }
- vf->priv->frame= avcodec_alloc_frame();
-
- vf->priv->outbuf_size= width*height*10;
- vf->priv->outbuf= malloc(vf->priv->outbuf_size);
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi){
- if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
-return; //caused problems, dunno why
- // ok, we can do pp in-place (or pp disabled):
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
- mpi->planes[0]=vf->dmpi->planes[0];
- mpi->stride[0]=vf->dmpi->stride[0];
- mpi->width=vf->dmpi->width;
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- mpi->planes[1]=vf->dmpi->planes[1];
- mpi->planes[2]=vf->dmpi->planes[2];
- mpi->stride[1]=vf->dmpi->stride[1];
- mpi->stride[2]=vf->dmpi->stride[2];
- }
- mpi->flags|=MP_IMGFLAG_DIRECT;
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- mp_image_t *dmpi;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- // no DR, so get a new image! hope we'll get DR buffer:
- dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP,
- MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
- mpi->width,mpi->height);
- ff_vf_clone_mpi_attributes(dmpi, mpi);
- }else{
- dmpi=vf->dmpi;
- }
-
- filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h);
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
-#if 0
- for(i=0; i<3; i++){
- free(vf->priv->temp[i]);
- vf->priv->temp[i]= NULL;
- free(vf->priv->src[i]);
- vf->priv->src[i]= NULL;
- }
-#endif
- if (vf->priv->avctx_enc) {
- avcodec_close(vf->priv->avctx_enc);
- av_freep(&vf->priv->avctx_enc);
- }
-
- free(vf->priv->outbuf);
- free(vf->priv);
- vf->priv=NULL;
-}
-
-//===========================================================================//
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt){
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- case IMGFMT_Y800:
- case IMGFMT_Y8:
- return ff_vf_next_query_format(vf,fmt);
- }
- return 0;
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
-
- vf->config=config;
- vf->put_image=put_image;
- vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
- ff_init_avcodec();
-
- vf->priv->mode=0;
- vf->priv->parity= -1;
- vf->priv->qp=1;
-
- if (args) sscanf(args, "%d:%d:%d", &vf->priv->mode, &vf->priv->parity, &vf->priv->qp);
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_mcdeint = {
- "motion compensating deinterlacer",
- "mcdeint",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_noise.c b/ffmpeg/libavfilter/libmpcodecs/vf_noise.c
deleted file mode 100644
index 3b946e9..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_noise.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <math.h>
-
-#include "config.h"
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#if HAVE_MALLOC_H
-#include <malloc.h>
-#endif
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "libvo/fastmemcpy.h"
-#include "libavutil/mem.h"
-#include "libavutil/x86/asm.h"
-
-#define MAX_NOISE 4096
-#define MAX_SHIFT 1024
-#define MAX_RES (MAX_NOISE-MAX_SHIFT)
-
-//===========================================================================//
-
-static inline void lineNoise_C(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift);
-static inline void lineNoiseAvg_C(uint8_t *dst, uint8_t *src, int len, int8_t **shift);
-
-static void (*lineNoise)(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift)= lineNoise_C;
-static void (*lineNoiseAvg)(uint8_t *dst, uint8_t *src, int len, int8_t **shift)= lineNoiseAvg_C;
-
-typedef struct FilterParam{
- int strength;
- int uniform;
- int temporal;
- int quality;
- int averaged;
- int pattern;
- int shiftptr;
- int8_t *noise;
- int8_t *prev_shift[MAX_RES][3];
-}FilterParam;
-
-struct vf_priv_s {
- FilterParam lumaParam;
- FilterParam chromaParam;
- unsigned int outfmt;
-};
-
-static int nonTempRandShift_init;
-static int nonTempRandShift[MAX_RES];
-
-static int patt[4] = {
- -1,0,1,0
-};
-
-#define RAND_N(range) ((int) ((double)range*rand()/(RAND_MAX+1.0)))
-static int8_t *initNoise(FilterParam *fp){
- int strength= fp->strength;
- int uniform= fp->uniform;
- int averaged= fp->averaged;
- int pattern= fp->pattern;
- int8_t *noise= av_malloc(MAX_NOISE*sizeof(int8_t));
- int i, j;
-
- srand(123457);
-
- for(i=0,j=0; i<MAX_NOISE; i++,j++)
- {
- if(uniform) {
- if (averaged) {
- if (pattern) {
- noise[i]= (RAND_N(strength) - strength/2)/6
- +patt[j%4]*strength*0.25/3;
- } else {
- noise[i]= (RAND_N(strength) - strength/2)/3;
- }
- } else {
- if (pattern) {
- noise[i]= (RAND_N(strength) - strength/2)/2
- + patt[j%4]*strength*0.25;
- } else {
- noise[i]= RAND_N(strength) - strength/2;
- }
- }
- } else {
- double x1, x2, w, y1;
- do {
- x1 = 2.0 * rand()/(float)RAND_MAX - 1.0;
- x2 = 2.0 * rand()/(float)RAND_MAX - 1.0;
- w = x1 * x1 + x2 * x2;
- } while ( w >= 1.0 );
-
- w = sqrt( (-2.0 * log( w ) ) / w );
- y1= x1 * w;
- y1*= strength / sqrt(3.0);
- if (pattern) {
- y1 /= 2;
- y1 += patt[j%4]*strength*0.35;
- }
- if (y1<-128) y1=-128;
- else if(y1> 127) y1= 127;
- if (averaged) y1 /= 3.0;
- noise[i]= (int)y1;
- }
- if (RAND_N(6) == 0) j--;
- }
-
-
- for (i = 0; i < MAX_RES; i++)
- for (j = 0; j < 3; j++)
- fp->prev_shift[i][j] = noise + (rand()&(MAX_SHIFT-1));
-
- if(!nonTempRandShift_init){
- for(i=0; i<MAX_RES; i++){
- nonTempRandShift[i]= rand()&(MAX_SHIFT-1);
- }
- nonTempRandShift_init = 1;
- }
-
- fp->noise= noise;
- fp->shiftptr= 0;
- return noise;
-}
-
-/***************************************************************************/
-
-#if HAVE_MMX
-static inline void lineNoise_MMX(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
- x86_reg mmx_len= len&(~7);
- noise+=shift;
-
- __asm__ volatile(
- "mov %3, %%"REG_a" \n\t"
- "pcmpeqb %%mm7, %%mm7 \n\t"
- "psllw $15, %%mm7 \n\t"
- "packsswb %%mm7, %%mm7 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq (%1, %%"REG_a"), %%mm1 \n\t"
- "pxor %%mm7, %%mm0 \n\t"
- "paddsb %%mm1, %%mm0 \n\t"
- "pxor %%mm7, %%mm0 \n\t"
- "movq %%mm0, (%2, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
- : "%"REG_a
- );
- if(mmx_len!=len)
- lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
-}
-#endif
-
-//duplicate of previous except movntq
-#if HAVE_MMX2
-static inline void lineNoise_MMX2(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
- x86_reg mmx_len= len&(~7);
- noise+=shift;
-
- __asm__ volatile(
- "mov %3, %%"REG_a" \n\t"
- "pcmpeqb %%mm7, %%mm7 \n\t"
- "psllw $15, %%mm7 \n\t"
- "packsswb %%mm7, %%mm7 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq (%1, %%"REG_a"), %%mm1 \n\t"
- "pxor %%mm7, %%mm0 \n\t"
- "paddsb %%mm1, %%mm0 \n\t"
- "pxor %%mm7, %%mm0 \n\t"
- "movntq %%mm0, (%2, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
- : "%"REG_a
- );
- if(mmx_len!=len)
- lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
-}
-#endif
-
-static inline void lineNoise_C(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
- int i;
- noise+= shift;
- for(i=0; i<len; i++)
- {
- int v= src[i]+ noise[i];
- if(v>255) dst[i]=255; //FIXME optimize
- else if(v<0) dst[i]=0;
- else dst[i]=v;
- }
-}
-
-/***************************************************************************/
-
-#if HAVE_MMX
-static inline void lineNoiseAvg_MMX(uint8_t *dst, uint8_t *src, int len, int8_t **shift){
- x86_reg mmx_len= len&(~7);
-
- __asm__ volatile(
- "mov %5, %%"REG_a" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movq (%1, %%"REG_a"), %%mm1 \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "paddb (%2, %%"REG_a"), %%mm1 \n\t"
- "paddb (%3, %%"REG_a"), %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "movq %%mm1, %%mm3 \n\t"
- "punpcklbw %%mm0, %%mm0 \n\t"
- "punpckhbw %%mm2, %%mm2 \n\t"
- "punpcklbw %%mm1, %%mm1 \n\t"
- "punpckhbw %%mm3, %%mm3 \n\t"
- "pmulhw %%mm0, %%mm1 \n\t"
- "pmulhw %%mm2, %%mm3 \n\t"
- "paddw %%mm1, %%mm1 \n\t"
- "paddw %%mm3, %%mm3 \n\t"
- "paddw %%mm0, %%mm1 \n\t"
- "paddw %%mm2, %%mm3 \n\t"
- "psrlw $8, %%mm1 \n\t"
- "psrlw $8, %%mm3 \n\t"
- "packuswb %%mm3, %%mm1 \n\t"
- "movq %%mm1, (%4, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len),
- "r" (dst+mmx_len), "g" (-mmx_len)
- : "%"REG_a
- );
-
- if(mmx_len!=len){
- int8_t *shift2[3]={shift[0]+mmx_len, shift[1]+mmx_len, shift[2]+mmx_len};
- lineNoiseAvg_C(dst+mmx_len, src+mmx_len, len-mmx_len, shift2);
- }
-}
-#endif
-
-static inline void lineNoiseAvg_C(uint8_t *dst, uint8_t *src, int len, int8_t **shift){
- int i;
- int8_t *src2= (int8_t*)src;
-
- for(i=0; i<len; i++)
- {
- const int n= shift[0][i] + shift[1][i] + shift[2][i];
- dst[i]= src2[i]+((n*src2[i])>>7);
- }
-}
-
-/***************************************************************************/
-
-static void noise(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int width, int height, FilterParam *fp){
- int8_t *noise= fp->noise;
- int y;
- int shift=0;
-
- if(!noise)
- {
- if(src==dst) return;
-
- if(dstStride==srcStride) fast_memcpy(dst, src, srcStride*height);
- else
- {
- for(y=0; y<height; y++)
- {
- fast_memcpy(dst, src, width);
- dst+= dstStride;
- src+= srcStride;
- }
- }
- return;
- }
-
- for(y=0; y<height; y++)
- {
- if(fp->temporal) shift= rand()&(MAX_SHIFT -1);
- else shift= nonTempRandShift[y];
-
- if(fp->quality==0) shift&= ~7;
- if (fp->averaged) {
- lineNoiseAvg(dst, src, width, fp->prev_shift[y]);
- fp->prev_shift[y][fp->shiftptr] = noise + shift;
- } else {
- lineNoise(dst, src, noise, width, shift);
- }
- dst+= dstStride;
- src+= srcStride;
- }
- fp->shiftptr++;
- if (fp->shiftptr == 3) fp->shiftptr = 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi){
- if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
- if(mpi->imgfmt!=vf->priv->outfmt) return; // colorspace differ
- // ok, we can do pp in-place (or pp disabled):
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- mpi->type, mpi->flags, mpi->w, mpi->h);
- mpi->planes[0]=vf->dmpi->planes[0];
- mpi->stride[0]=vf->dmpi->stride[0];
- mpi->width=vf->dmpi->width;
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- mpi->planes[1]=vf->dmpi->planes[1];
- mpi->planes[2]=vf->dmpi->planes[2];
- mpi->stride[1]=vf->dmpi->stride[1];
- mpi->stride[2]=vf->dmpi->stride[2];
- }
- mpi->flags|=MP_IMGFLAG_DIRECT;
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- mp_image_t *dmpi;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- // no DR, so get a new image! hope we'll get DR buffer:
- vf->dmpi=ff_vf_get_image(vf->next,vf->priv->outfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->w,mpi->h);
-//printf("nodr\n");
- }
-//else printf("dr\n");
- dmpi= vf->dmpi;
-
- noise(dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, &vf->priv->lumaParam);
- noise(dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w/2, mpi->h/2, &vf->priv->chromaParam);
- noise(dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w/2, mpi->h/2, &vf->priv->chromaParam);
-
- ff_vf_clone_mpi_attributes(dmpi, mpi);
-
-#if HAVE_MMX
- if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
-#endif
-#if HAVE_MMX2
- if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
-#endif
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
- av_free(vf->priv->chromaParam.noise);
- vf->priv->chromaParam.noise= NULL;
-
- av_free(vf->priv->lumaParam.noise);
- vf->priv->lumaParam.noise= NULL;
-
- free(vf->priv);
- vf->priv=NULL;
-}
-
-//===========================================================================//
-
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt)
- {
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- return ff_vf_next_query_format(vf,vf->priv->outfmt);
- }
- return 0;
-}
-
-static void parse(FilterParam *fp, char* args){
- char *pos;
- char *max= strchr(args, ':');
-
- if(!max) max= args + strlen(args);
-
- fp->strength= atoi(args);
- pos= strchr(args, 'u');
- if(pos && pos<max) fp->uniform=1;
- pos= strchr(args, 't');
- if(pos && pos<max) fp->temporal=1;
- pos= strchr(args, 'h');
- if(pos && pos<max) fp->quality=1;
- pos= strchr(args, 'p');
- if(pos && pos<max) fp->pattern=1;
- pos= strchr(args, 'a');
- if(pos && pos<max) {
- fp->temporal=1;
- fp->averaged=1;
- }
-
- if(fp->strength) initNoise(fp);
-}
-
-static const unsigned int fmt_list[]={
- IMGFMT_YV12,
- IMGFMT_I420,
- IMGFMT_IYUV,
- 0
-};
-
-static int vf_open(vf_instance_t *vf, char *args){
- vf->config=config;
- vf->put_image=put_image;
- vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
- if(args)
- {
- char *arg2= strchr(args,':');
- if(arg2) parse(&vf->priv->chromaParam, arg2+1);
- parse(&vf->priv->lumaParam, args);
- }
-
- // check csp:
- vf->priv->outfmt=ff_vf_match_csp(&vf->next,fmt_list,IMGFMT_YV12);
- if(!vf->priv->outfmt)
- {
- uninit(vf);
- return 0; // no csp match :(
- }
-
-
-#if HAVE_MMX
- if(ff_gCpuCaps.hasMMX){
- lineNoise= lineNoise_MMX;
- lineNoiseAvg= lineNoiseAvg_MMX;
- }
-#endif
-#if HAVE_MMX2
- if(ff_gCpuCaps.hasMMX2) lineNoise= lineNoise_MMX2;
-// if(ff_gCpuCaps.hasMMX) lineNoiseAvg= lineNoiseAvg_MMX2;
-#endif
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_noise = {
- "noise generator",
- "noise",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
-
-//===========================================================================//
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_ow.c b/ffmpeg/libavfilter/libmpcodecs/vf_ow.c
deleted file mode 100644
index 69b07ef..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_ow.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/**
- * @todo try to change to int
- * @todo try lifting based implementation
- * @todo optimize optimize optimize
- * @todo hard tresholding
- * @todo use QP to decide filter strength
- * @todo wavelet normalization / least squares optimal signal vs. noise thresholds
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
-#include <math.h>
-
-#include "mp_msg.h"
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-//===========================================================================//
-static const uint8_t __attribute__((aligned(8))) dither[8][8]={
-{ 0, 48, 12, 60, 3, 51, 15, 63, },
-{ 32, 16, 44, 28, 35, 19, 47, 31, },
-{ 8, 56, 4, 52, 11, 59, 7, 55, },
-{ 40, 24, 36, 20, 43, 27, 39, 23, },
-{ 2, 50, 14, 62, 1, 49, 13, 61, },
-{ 34, 18, 46, 30, 33, 17, 45, 29, },
-{ 10, 58, 6, 54, 9, 57, 5, 53, },
-{ 42, 26, 38, 22, 41, 25, 37, 21, },
-};
-//FIXME the above is duplicated in many filters
-
-struct vf_priv_s {
- float strength[2];
- float delta;
- int mode;
- int depth;
- float *plane[16][4];
- int stride;
-};
-
-#define S 1.41421356237 //sqrt(2)
-
-static const double coeff[2][5]={
- {
- 0.6029490182363579 *S,
- 0.2668641184428723 *S,
- -0.07822326652898785 *S,
- -0.01686411844287495 *S,
- 0.02674875741080976 *S
- },{
- 1.115087052456994 /S,
- -0.5912717631142470 /S,
- -0.05754352622849957 /S,
- 0.09127176311424948 /S
- }
-};
-
-static const double icoeff[2][5]={
- {
- 1.115087052456994 /S,
- 0.5912717631142470 /S,
- -0.05754352622849957 /S,
- -0.09127176311424948 /S
- },{
- 0.6029490182363579 *S,
- -0.2668641184428723 *S,
- -0.07822326652898785 *S,
- 0.01686411844287495 *S,
- 0.02674875741080976 *S
- }
-};
-#undef S
-
-static inline int mirror(int x, int w){
- while((unsigned)x > (unsigned)w){
- x=-x;
- if(x<0) x+= 2*w;
- }
- return x;
-}
-
-static inline void decompose(float *dstL, float *dstH, float *src, int stride, int w){
- int x, i;
- for(x=0; x<w; x++){
- double sumL= src[x*stride] * coeff[0][0];
- double sumH= src[x*stride] * coeff[1][0];
- for(i=1; i<=4; i++){
- double s= (src[mirror(x-i, w-1)*stride] + src[mirror(x+i, w-1)*stride]);
-
- sumL+= coeff[0][i]*s;
- sumH+= coeff[1][i]*s;
- }
- dstL[x*stride]= sumL;
- dstH[x*stride]= sumH;
- }
-}
-
-static inline void compose(float *dst, float *srcL, float *srcH, int stride, int w){
- int x, i;
- for(x=0; x<w; x++){
- double sumL= srcL[x*stride] * icoeff[0][0];
- double sumH= srcH[x*stride] * icoeff[1][0];
- for(i=1; i<=4; i++){
- int x0= mirror(x-i, w-1)*stride;
- int x1= mirror(x+i, w-1)*stride;
-
- sumL+= icoeff[0][i]*(srcL[x0] + srcL[x1]);
- sumH+= icoeff[1][i]*(srcH[x0] + srcH[x1]);
- }
- dst[x*stride]= (sumL + sumH)*0.5;
- }
-}
-
-static inline void decompose2D(float *dstL, float *dstH, float *src, int xstride, int ystride, int step, int w, int h){
- int y, x;
- for(y=0; y<h; y++)
- for(x=0; x<step; x++)
- decompose(dstL + ystride*y + xstride*x, dstH + ystride*y + xstride*x, src + ystride*y + xstride*x, step*xstride, (w-x+step-1)/step);
-}
-
-static inline void compose2D(float *dst, float *srcL, float *srcH, int xstride, int ystride, int step, int w, int h){
- int y, x;
- for(y=0; y<h; y++)
- for(x=0; x<step; x++)
- compose(dst + ystride*y + xstride*x, srcL + ystride*y + xstride*x, srcH + ystride*y + xstride*x, step*xstride, (w-x+step-1)/step);
-}
-
-static void decompose2D2(float *dst[4], float *src, float *temp[2], int stride, int step, int w, int h){
- decompose2D(temp[0], temp[1], src , 1, stride, step , w, h);
- decompose2D( dst[0], dst[1], temp[0], stride, 1, step , h, w);
- decompose2D( dst[2], dst[3], temp[1], stride, 1, step , h, w);
-}
-
-static void compose2D2(float *dst, float *src[4], float *temp[2], int stride, int step, int w, int h){
- compose2D(temp[0], src[0], src[1], stride, 1, step , h, w);
- compose2D(temp[1], src[2], src[3], stride, 1, step , h, w);
- compose2D(dst , temp[0], temp[1], 1, stride, step , w, h);
-}
-
-static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, int is_luma){
- int x,y, i, j;
-// double sum=0;
- double s= p->strength[!is_luma];
- int depth= p->depth;
-
- while(1<<depth > width || 1<<depth > height)
- depth--;
-
- for(y=0; y<height; y++)
- for(x=0; x<width; x++)
- p->plane[0][0][x + y*p->stride]= src[x + y*src_stride];
-
- for(i=0; i<depth; i++){
- decompose2D2(p->plane[i+1], p->plane[i][0], p->plane[0]+1,p->stride, 1<<i, width, height);
- }
- for(i=0; i<depth; i++){
- for(j=1; j<4; j++){
- for(y=0; y<height; y++){
- for(x=0; x<width; x++){
- double v= p->plane[i+1][j][x + y*p->stride];
- if (v> s) v-=s;
- else if(v<-s) v+=s;
- else v =0;
- p->plane[i+1][j][x + y*p->stride]= v;
- }
- }
- }
- }
- for(i=depth-1; i>=0; i--){
- compose2D2(p->plane[i][0], p->plane[i+1], p->plane[0]+1, p->stride, 1<<i, width, height);
- }
-
- for(y=0; y<height; y++)
- for(x=0; x<width; x++){
- i= p->plane[0][0][x + y*p->stride] + dither[x&7][y&7]*(1.0/64) + 1.0/128; //yes the rounding is insane but optimal :)
-// double e= i - src[x + y*src_stride];
-// sum += e*e;
- if((unsigned)i > 255U) i= ~(i>>31);
- dst[x + y*dst_stride]= i;
- }
-
-// printf("%f\n", sum/height/width);
-}
-
-static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt){
- int h= (height+15)&(~15);
- int i,j;
-
- vf->priv->stride= (width+15)&(~15);
- for(j=0; j<4; j++){
- for(i=0; i<=vf->priv->depth; i++)
- vf->priv->plane[i][j]= malloc(vf->priv->stride*h*sizeof(vf->priv->plane[0][0][0]));
- }
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi){
- if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
- // ok, we can do pp in-place (or pp disabled):
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
- mpi->planes[0]=vf->dmpi->planes[0];
- mpi->stride[0]=vf->dmpi->stride[0];
- mpi->width=vf->dmpi->width;
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- mpi->planes[1]=vf->dmpi->planes[1];
- mpi->planes[2]=vf->dmpi->planes[2];
- mpi->stride[1]=vf->dmpi->stride[1];
- mpi->stride[2]=vf->dmpi->stride[2];
- }
- mpi->flags|=MP_IMGFLAG_DIRECT;
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- mp_image_t *dmpi;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- // no DR, so get a new image! hope we'll get DR buffer:
- dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP,
- MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
- mpi->width,mpi->height);
- ff_vf_clone_mpi_attributes(dmpi, mpi);
- }else{
- dmpi=vf->dmpi;
- }
-
- filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, 1);
- filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, 0);
- filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, 0);
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static void uninit(struct vf_instance *vf){
- int i,j;
- if(!vf->priv) return;
-
- for(j=0; j<4; j++){
- for(i=0; i<16; i++){
- free(vf->priv->plane[i][j]);
- vf->priv->plane[i][j]= NULL;
- }
- }
-
- free(vf->priv);
- vf->priv=NULL;
-}
-
-//===========================================================================//
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt){
- case IMGFMT_YVU9:
- case IMGFMT_IF09:
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- case IMGFMT_CLPL:
- case IMGFMT_Y800:
- case IMGFMT_Y8:
- case IMGFMT_444P:
- case IMGFMT_422P:
- case IMGFMT_411P:
- return ff_vf_next_query_format(vf,fmt);
- }
- return 0;
-}
-
-
-static int vf_open(vf_instance_t *vf, char *args){
- vf->config=config;
- vf->put_image=put_image;
- vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
- vf->priv->depth= 8;
- vf->priv->strength[0]= 1.0;
- vf->priv->strength[1]= 1.0;
- vf->priv->delta= 1.0;
-
- if (args) sscanf(args, "%d:%f:%f:%d:%f", &vf->priv->depth,
- &vf->priv->strength[0],
- &vf->priv->strength[1],
- &vf->priv->mode,
- &vf->priv->delta);
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_ow = {
- "overcomplete wavelet denoiser",
- "ow",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_perspective.c b/ffmpeg/libavfilter/libmpcodecs/vf_perspective.c
deleted file mode 100644
index aed5c4d..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_perspective.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <assert.h>
-#include <math.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#if HAVE_MALLOC_H
-#include <malloc.h>
-#endif
-
-#include "libavutil/mem.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#define SUB_PIXEL_BITS 8
-#define SUB_PIXELS (1<<SUB_PIXEL_BITS)
-#define COEFF_BITS 11
-
-//===========================================================================//
-
-struct vf_priv_s {
- double ref[4][2];
- int32_t coeff[1<<SUB_PIXEL_BITS][4];
- int32_t (*pv)[2];
- int pvStride;
- int cubic;
-};
-
-
-/***************************************************************************/
-
-static void initPv(struct vf_priv_s *priv, int W, int H){
- double a,b,c,d,e,f,g,h,D;
- double (*ref)[2]= priv->ref;
- int x,y;
-
- g= ( (ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0])*(ref[2][1] - ref[3][1])
- - (ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1])*(ref[2][0] - ref[3][0]))*H;
- h= ( (ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1])*(ref[1][0] - ref[3][0])
- - (ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0])*(ref[1][1] - ref[3][1]))*W;
- D= (ref[1][0] - ref[3][0])*(ref[2][1] - ref[3][1])
- - (ref[2][0] - ref[3][0])*(ref[1][1] - ref[3][1]);
-
- a= D*(ref[1][0] - ref[0][0])*H + g*ref[1][0];
- b= D*(ref[2][0] - ref[0][0])*W + h*ref[2][0];
- c= D*ref[0][0]*W*H;
- d= D*(ref[1][1] - ref[0][1])*H + g*ref[1][1];
- e= D*(ref[2][1] - ref[0][1])*W + h*ref[2][1];
- f= D*ref[0][1]*W*H;
-
- for(y=0; y<H; y++){
- for(x=0; x<W; x++){
- int u, v;
-
- u= (int)floor( SUB_PIXELS*(a*x + b*y + c)/(g*x + h*y + D*W*H) + 0.5);
- v= (int)floor( SUB_PIXELS*(d*x + e*y + f)/(g*x + h*y + D*W*H) + 0.5);
-
- priv->pv[x + y*W][0]= u;
- priv->pv[x + y*W][1]= v;
- }
- }
-}
-
-static double getCoeff(double d){
- double A= -0.60;
- double coeff;
-
- d= fabs(d);
-
- // Equation is from VirtualDub
- if(d<1.0)
- coeff = (1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
- else if(d<2.0)
- coeff = (-4.0*A + 8.0*A*d - 5.0*A*d*d + A*d*d*d);
- else
- coeff=0.0;
-
- return coeff;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
- int i, j;
-
- vf->priv->pvStride= width;
- vf->priv->pv= av_malloc(width*height*2*sizeof(int32_t));
- initPv(vf->priv, width, height);
-
- for(i=0; i<SUB_PIXELS; i++){
- double d= i/(double)SUB_PIXELS;
- double temp[4];
- double sum=0;
-
- for(j=0; j<4; j++)
- temp[j]= getCoeff(j - d - 1);
-
- for(j=0; j<4; j++)
- sum+= temp[j];
-
- for(j=0; j<4; j++)
- vf->priv->coeff[i][j]= (int)floor((1<<COEFF_BITS)*temp[j]/sum + 0.5);
- }
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
- av_free(vf->priv->pv);
- vf->priv->pv= NULL;
-
- free(vf->priv);
- vf->priv=NULL;
-}
-
-static inline void resampleCubic(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, struct vf_priv_s *privParam, int xShift, int yShift){
- int x, y;
- struct vf_priv_s priv= *privParam;
-
- for(y=0; y<h; y++){
- for(x=0; x<w; x++){
- int u, v, subU, subV, sum, sx, sy;
-
- sx= x << xShift;
- sy= y << yShift;
- u= priv.pv[sx + sy*priv.pvStride][0]>>xShift;
- v= priv.pv[sx + sy*priv.pvStride][1]>>yShift;
- subU= u & (SUB_PIXELS-1);
- subV= v & (SUB_PIXELS-1);
- u >>= SUB_PIXEL_BITS;
- v >>= SUB_PIXEL_BITS;
-
- if(u>0 && v>0 && u<w-2 && v<h-2){
- const int index= u + v*srcStride;
- const int a= priv.coeff[subU][0];
- const int b= priv.coeff[subU][1];
- const int c= priv.coeff[subU][2];
- const int d= priv.coeff[subU][3];
-
- sum=
- priv.coeff[subV][0]*( a*src[index - 1 - srcStride] + b*src[index - 0 - srcStride]
- + c*src[index + 1 - srcStride] + d*src[index + 2 - srcStride])
- +priv.coeff[subV][1]*( a*src[index - 1 ] + b*src[index - 0 ]
- + c*src[index + 1 ] + d*src[index + 2 ])
- +priv.coeff[subV][2]*( a*src[index - 1 + srcStride] + b*src[index - 0 + srcStride]
- + c*src[index + 1 + srcStride] + d*src[index + 2 + srcStride])
- +priv.coeff[subV][3]*( a*src[index - 1+2*srcStride] + b*src[index - 0+2*srcStride]
- + c*src[index + 1+2*srcStride] + d*src[index + 2+2*srcStride]);
- }else{
- int dx, dy;
- sum=0;
-
- for(dy=0; dy<4; dy++){
- int iy= v + dy - 1;
- if (iy< 0) iy=0;
- else if(iy>=h) iy=h-1;
- for(dx=0; dx<4; dx++){
- int ix= u + dx - 1;
- if (ix< 0) ix=0;
- else if(ix>=w) ix=w-1;
-
- sum+= priv.coeff[subU][dx]*priv.coeff[subV][dy]
- *src[ ix + iy*srcStride];
- }
- }
- }
- sum= (sum + (1<<(COEFF_BITS*2-1)) ) >> (COEFF_BITS*2);
- if(sum&~255){
- if(sum<0) sum=0;
- else sum=255;
- }
- dst[ x + y*dstStride]= sum;
- }
- }
-}
-
-static inline void resampleLinear(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride,
- struct vf_priv_s *privParam, int xShift, int yShift){
- int x, y;
- struct vf_priv_s priv= *privParam;
-
- for(y=0; y<h; y++){
- for(x=0; x<w; x++){
- int u, v, subU, subV, sum, sx, sy, index, subUI, subVI;
-
- sx= x << xShift;
- sy= y << yShift;
- u= priv.pv[sx + sy*priv.pvStride][0]>>xShift;
- v= priv.pv[sx + sy*priv.pvStride][1]>>yShift;
- subU= u & (SUB_PIXELS-1);
- subV= v & (SUB_PIXELS-1);
- u >>= SUB_PIXEL_BITS;
- v >>= SUB_PIXEL_BITS;
- index= u + v*srcStride;
- subUI= SUB_PIXELS - subU;
- subVI= SUB_PIXELS - subV;
-
- if((unsigned)u < (unsigned)(w - 1)){
- if((unsigned)v < (unsigned)(h - 1)){
- sum= subVI*(subUI*src[index ] + subU*src[index +1])
- +subV *(subUI*src[index+srcStride] + subU*src[index+srcStride+1]);
- sum= (sum + (1<<(SUB_PIXEL_BITS*2-1)) ) >> (SUB_PIXEL_BITS*2);
- }else{
- if(v<0) v= 0;
- else v= h-1;
- index= u + v*srcStride;
- sum= subUI*src[index] + subU*src[index+1];
- sum= (sum + (1<<(SUB_PIXEL_BITS-1)) ) >> SUB_PIXEL_BITS;
- }
- }else{
- if((unsigned)v < (unsigned)(h - 1)){
- if(u<0) u= 0;
- else u= w-1;
- index= u + v*srcStride;
- sum= subVI*src[index] + subV*src[index+srcStride];
- sum= (sum + (1<<(SUB_PIXEL_BITS-1)) ) >> SUB_PIXEL_BITS;
- }else{
- if(u<0) u= 0;
- else u= w-1;
- if(v<0) v= 0;
- else v= h-1;
- index= u + v*srcStride;
- sum= src[index];
- }
- }
- if(sum&~255){
- if(sum<0) sum=0;
- else sum=255;
- }
- dst[ x + y*dstStride]= sum;
- }
- }
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- int cw= mpi->w >> mpi->chroma_x_shift;
- int ch= mpi->h >> mpi->chroma_y_shift;
-
- mp_image_t *dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->w,mpi->h);
-
- assert(mpi->flags&MP_IMGFLAG_PLANAR);
-
- if(vf->priv->cubic){
- resampleCubic(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, dmpi->stride[0], mpi->stride[0],
- vf->priv, 0, 0);
- resampleCubic(dmpi->planes[1], mpi->planes[1], cw , ch , dmpi->stride[1], mpi->stride[1],
- vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift);
- resampleCubic(dmpi->planes[2], mpi->planes[2], cw , ch , dmpi->stride[2], mpi->stride[2],
- vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift);
- }else{
- resampleLinear(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, dmpi->stride[0], mpi->stride[0],
- vf->priv, 0, 0);
- resampleLinear(dmpi->planes[1], mpi->planes[1], cw , ch , dmpi->stride[1], mpi->stride[1],
- vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift);
- resampleLinear(dmpi->planes[2], mpi->planes[2], cw , ch , dmpi->stride[2], mpi->stride[2],
- vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift);
- }
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-//===========================================================================//
-
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt)
- {
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- case IMGFMT_YVU9:
- case IMGFMT_444P:
- case IMGFMT_422P:
- case IMGFMT_411P:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
- int e;
-
- vf->config=config;
- vf->put_image=put_image;
-// vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
- if(args==NULL) return 0;
-
- e=sscanf(args, "%lf:%lf:%lf:%lf:%lf:%lf:%lf:%lf:%d",
- &vf->priv->ref[0][0], &vf->priv->ref[0][1],
- &vf->priv->ref[1][0], &vf->priv->ref[1][1],
- &vf->priv->ref[2][0], &vf->priv->ref[2][1],
- &vf->priv->ref[3][0], &vf->priv->ref[3][1],
- &vf->priv->cubic
- );
-
- if(e!=9)
- return 0;
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_perspective = {
- "perspective correcture",
- "perspective",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
-
-//===========================================================================//
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_phase.c b/ffmpeg/libavfilter/libmpcodecs/vf_phase.c
deleted file mode 100644
index 25abc5b..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_phase.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <limits.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-enum mode { PROGRESSIVE, TOP_FIRST, BOTTOM_FIRST,
- TOP_FIRST_ANALYZE, BOTTOM_FIRST_ANALYZE,
- ANALYZE, FULL_ANALYZE, AUTO, AUTO_ANALYZE };
-
-#define fixed_mode(p) ((p)<=BOTTOM_FIRST)
-
-struct vf_priv_s
- {
- enum mode mode;
- int verbose;
- unsigned char *buf[3];
- };
-
-/*
- * Copy fields from either current or buffered previous frame to the
- * output and store the current frame unmodified to the buffer.
- */
-
-static void do_plane(unsigned char *to, unsigned char *from,
- int w, int h, int ts, int fs,
- unsigned char **bufp, enum mode mode)
- {
- unsigned char *buf, *end;
- int top;
-
- if(!*bufp)
- {
- mode=PROGRESSIVE;
- if(!(*bufp=malloc(h*w))) return;
- }
-
- for(end=to+h*ts, buf=*bufp, top=1; to<end; from+=fs, to+=ts, buf+=w, top^=1)
- {
- fast_memcpy(to, mode==(top?BOTTOM_FIRST:TOP_FIRST)?buf:from, w);
- fast_memcpy(buf, from, w);
- }
- }
-
-/*
- * This macro interpolates the value of both fields at a point halfway
- * between lines and takes the squared difference. In field resolution
- * the point is a quarter pixel below a line in one field and a quarter
- * pixel above a line in other.
- *
- * (the result is actually multiplied by 25)
- */
-
-#define diff(a, as, b, bs) (t=((*a-b[bs])<<2)+a[as<<1]-b[-bs], t*t)
-
-/*
- * Find which field combination has the smallest average squared difference
- * between the fields.
- */
-
-static enum mode analyze_plane(unsigned char *old, unsigned char *new,
- int w, int h, int os, int ns, enum mode mode,
- int verbose, int fields)
- {
- double bdiff, pdiff, tdiff, scale;
- int bdif, tdif, pdif;
- int top, t;
- unsigned char *end, *rend;
-
- if(mode==AUTO)
- mode=fields&MP_IMGFIELD_ORDERED?fields&MP_IMGFIELD_TOP_FIRST?
- TOP_FIRST:BOTTOM_FIRST:PROGRESSIVE;
- else if(mode==AUTO_ANALYZE)
- mode=fields&MP_IMGFIELD_ORDERED?fields&MP_IMGFIELD_TOP_FIRST?
- TOP_FIRST_ANALYZE:BOTTOM_FIRST_ANALYZE:FULL_ANALYZE;
-
- if(fixed_mode(mode))
- bdiff=pdiff=tdiff=65536.0;
- else
- {
- bdiff=pdiff=tdiff=0.0;
-
- for(end=new+(h-2)*ns, new+=ns, old+=os, top=0;
- new<end; new+=ns-w, old+=os-w, top^=1)
- {
- pdif=tdif=bdif=0;
-
- switch(mode)
- {
- case TOP_FIRST_ANALYZE:
- if(top)
- for(rend=new+w; new<rend; new++, old++)
- pdif+=diff(new, ns, new, ns),
- tdif+=diff(new, ns, old, os);
- else
- for(rend=new+w; new<rend; new++, old++)
- pdif+=diff(new, ns, new, ns),
- tdif+=diff(old, os, new, ns);
- break;
-
- case BOTTOM_FIRST_ANALYZE:
- if(top)
- for(rend=new+w; new<rend; new++, old++)
- pdif+=diff(new, ns, new, ns),
- bdif+=diff(old, os, new, ns);
- else
- for(rend=new+w; new<rend; new++, old++)
- pdif+=diff(new, ns, new, ns),
- bdif+=diff(new, ns, old, os);
- break;
-
- case ANALYZE:
- if(top)
- for(rend=new+w; new<rend; new++, old++)
- tdif+=diff(new, ns, old, os),
- bdif+=diff(old, os, new, ns);
- else
- for(rend=new+w; new<rend; new++, old++)
- bdif+=diff(new, ns, old, os),
- tdif+=diff(old, os, new, ns);
- break;
-
- default: /* FULL_ANALYZE */
- if(top)
- for(rend=new+w; new<rend; new++, old++)
- pdif+=diff(new, ns, new, ns),
- tdif+=diff(new, ns, old, os),
- bdif+=diff(old, os, new, ns);
- else
- for(rend=new+w; new<rend; new++, old++)
- pdif+=diff(new, ns, new, ns),
- bdif+=diff(new, ns, old, os),
- tdif+=diff(old, os, new, ns);
- }
-
- pdiff+=(double)pdif;
- tdiff+=(double)tdif;
- bdiff+=(double)bdif;
- }
-
- scale=1.0/(w*(h-3))/25.0;
- pdiff*=scale;
- tdiff*=scale;
- bdiff*=scale;
-
- if(mode==TOP_FIRST_ANALYZE)
- bdiff=65536.0;
- else if(mode==BOTTOM_FIRST_ANALYZE)
- tdiff=65536.0;
- else if(mode==ANALYZE)
- pdiff=65536.0;
-
- if(bdiff<pdiff && bdiff<tdiff)
- mode=BOTTOM_FIRST;
- else if(tdiff<pdiff && tdiff<bdiff)
- mode=TOP_FIRST;
- else
- mode=PROGRESSIVE;
- }
-
- if( ff_mp_msg_test(MSGT_VFILTER,MSGL_V) )
- {
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "%c", mode==BOTTOM_FIRST?'b':mode==TOP_FIRST?'t':'p');
- if(tdiff==65536.0) ff_mp_msg(MSGT_VFILTER, MSGL_INFO," N/A "); else ff_mp_msg(MSGT_VFILTER, MSGL_INFO," %8.2f", tdiff);
- if(bdiff==65536.0) ff_mp_msg(MSGT_VFILTER, MSGL_INFO," N/A "); else ff_mp_msg(MSGT_VFILTER, MSGL_INFO," %8.2f", bdiff);
- if(pdiff==65536.0) ff_mp_msg(MSGT_VFILTER, MSGL_INFO," N/A "); else ff_mp_msg(MSGT_VFILTER, MSGL_INFO," %8.2f", pdiff);
- ff_mp_msg(MSGT_VFILTER, MSGL_INFO," \n");
- }
-
- return mode;
- }
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
- {
- mp_image_t *dmpi;
- int w;
- enum mode mode;
-
- if(!(dmpi=ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->w, mpi->h)))
- return 0;
-
- w=dmpi->w;
- if(!(dmpi->flags&MP_IMGFLAG_PLANAR))
- w*=dmpi->bpp/8;
-
- mode=vf->priv->mode;
-
- if(!vf->priv->buf[0])
- mode=PROGRESSIVE;
- else
- mode=analyze_plane(vf->priv->buf[0], mpi->planes[0],
- w, dmpi->h, w, mpi->stride[0], mode,
- vf->priv->verbose, mpi->fields);
-
- do_plane(dmpi->planes[0], mpi->planes[0],
- w, dmpi->h,
- dmpi->stride[0], mpi->stride[0],
- &vf->priv->buf[0], mode);
-
- if(dmpi->flags&MP_IMGFLAG_PLANAR)
- {
- do_plane(dmpi->planes[1], mpi->planes[1],
- dmpi->chroma_width, dmpi->chroma_height,
- dmpi->stride[1], mpi->stride[1],
- &vf->priv->buf[1], mode);
- do_plane(dmpi->planes[2], mpi->planes[2],
- dmpi->chroma_width, dmpi->chroma_height,
- dmpi->stride[2], mpi->stride[2],
- &vf->priv->buf[2], mode);
- }
-
- return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- }
-
-static void uninit(struct vf_instance *vf)
- {
- if (!vf->priv)
- return;
- free(vf->priv->buf[0]);
- free(vf->priv->buf[1]);
- free(vf->priv->buf[2]);
- free(vf->priv);
- }
-
-static int vf_open(vf_instance_t *vf, char *args)
- {
- vf->put_image = put_image;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
-
- if(!(vf->priv = calloc(1, sizeof(struct vf_priv_s))))
- {
- uninit(vf);
- return 0;
- }
-
- vf->priv->mode=AUTO_ANALYZE;
- vf->priv->verbose=0;
-
- while(args && *args)
- {
- switch(*args)
- {
- case 't': vf->priv->mode=TOP_FIRST; break;
- case 'a': vf->priv->mode=AUTO; break;
- case 'b': vf->priv->mode=BOTTOM_FIRST; break;
- case 'u': vf->priv->mode=ANALYZE; break;
- case 'T': vf->priv->mode=TOP_FIRST_ANALYZE; break;
- case 'A': vf->priv->mode=AUTO_ANALYZE; break;
- case 'B': vf->priv->mode=BOTTOM_FIRST_ANALYZE; break;
- case 'U': vf->priv->mode=FULL_ANALYZE; break;
- case 'p': vf->priv->mode=PROGRESSIVE; break;
- case 'v': vf->priv->verbose=1; break;
- case ':': break;
-
- default:
- uninit(vf);
- return 0; /* bad args */
- }
-
- if( (args=strchr(args, ':')) ) args++;
- }
-
- return 1;
- }
-
-const vf_info_t ff_vf_info_phase =
- {
- "phase shift fields",
- "phase",
- "Ville Saari",
- "",
- vf_open,
- NULL
- };
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_pullup.c b/ffmpeg/libavfilter/libmpcodecs/vf_pullup.c
deleted file mode 100644
index e4a28c4..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_pullup.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "config.h"
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-#include "pullup.h"
-
-#undef MAX
-#define MAX(a,b) ((a)>(b)?(a):(b))
-
-struct vf_priv_s {
- struct pullup_context *ctx;
- int init;
- int fakecount;
- char *qbuf;
-};
-
-static void init_pullup(struct vf_instance *vf, mp_image_t *mpi)
-{
- struct pullup_context *c = vf->priv->ctx;
-
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- c->format = PULLUP_FMT_Y;
- c->nplanes = 4;
- ff_pullup_preinit_context(c);
- c->bpp[0] = c->bpp[1] = c->bpp[2] = 8;
- c->w[0] = mpi->w;
- c->h[0] = mpi->h;
- c->w[1] = c->w[2] = mpi->chroma_width;
- c->h[1] = c->h[2] = mpi->chroma_height;
- c->w[3] = ((mpi->w+15)/16) * ((mpi->h+15)/16);
- c->h[3] = 2;
- c->stride[0] = mpi->width;
- c->stride[1] = c->stride[2] = mpi->chroma_width;
- c->stride[3] = c->w[3];
- c->background[1] = c->background[2] = 128;
- }
-
- if (ff_gCpuCaps.hasMMX) c->cpu |= PULLUP_CPU_MMX;
- if (ff_gCpuCaps.hasMMX2) c->cpu |= PULLUP_CPU_MMX2;
- if (ff_gCpuCaps.has3DNow) c->cpu |= PULLUP_CPU_3DNOW;
- if (ff_gCpuCaps.has3DNowExt) c->cpu |= PULLUP_CPU_3DNOWEXT;
- if (ff_gCpuCaps.hasSSE) c->cpu |= PULLUP_CPU_SSE;
- if (ff_gCpuCaps.hasSSE2) c->cpu |= PULLUP_CPU_SSE2;
-
- ff_pullup_init_context(c);
-
- vf->priv->init = 1;
- vf->priv->qbuf = malloc(c->w[3]);
-}
-
-
-#if 0
-static void get_image(struct vf_instance *vf, mp_image_t *mpi)
-{
- struct pullup_context *c = vf->priv->ctx;
- struct pullup_buffer *b;
-
- if (mpi->type == MP_IMGTYPE_STATIC) return;
-
- if (!vf->priv->init) init_pullup(vf, mpi);
-
- b = ff_pullup_get_buffer(c, 2);
- if (!b) return; /* shouldn't happen... */
-
- mpi->priv = b;
-
- mpi->planes[0] = b->planes[0];
- mpi->planes[1] = b->planes[1];
- mpi->planes[2] = b->planes[2];
- mpi->stride[0] = c->stride[0];
- mpi->stride[1] = c->stride[1];
- mpi->stride[2] = c->stride[2];
-
- mpi->flags |= MP_IMGFLAG_DIRECT;
- mpi->flags &= ~MP_IMGFLAG_DRAW_CALLBACK;
-}
-#endif
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- struct pullup_context *c = vf->priv->ctx;
- struct pullup_buffer *b;
- struct pullup_frame *f;
- mp_image_t *dmpi;
- int ret;
- int p;
- int i;
-
- if (!vf->priv->init) init_pullup(vf, mpi);
-
- if (mpi->flags & MP_IMGFLAG_DIRECT) {
- b = mpi->priv;
- mpi->priv = 0;
- } else {
- b = ff_pullup_get_buffer(c, 2);
- if (!b) {
- ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"Could not get buffer from pullup!\n");
- f = ff_pullup_get_frame(c);
- ff_pullup_release_frame(f);
- return 0;
- }
- memcpy_pic(b->planes[0], mpi->planes[0], mpi->w, mpi->h,
- c->stride[0], mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(b->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- c->stride[1], mpi->stride[1]);
- memcpy_pic(b->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- c->stride[2], mpi->stride[2]);
- }
- }
- if (mpi->qscale) {
- fast_memcpy(b->planes[3], mpi->qscale, c->w[3]);
- fast_memcpy(b->planes[3]+c->w[3], mpi->qscale, c->w[3]);
- }
-
- p = mpi->fields & MP_IMGFIELD_TOP_FIRST ? 0 :
- (mpi->fields & MP_IMGFIELD_ORDERED ? 1 : 0);
- ff_pullup_submit_field(c, b, p);
- ff_pullup_submit_field(c, b, p^1);
- if (mpi->fields & MP_IMGFIELD_REPEAT_FIRST)
- ff_pullup_submit_field(c, b, p);
-
- ff_pullup_release_buffer(b, 2);
-
- f = ff_pullup_get_frame(c);
-
- /* Fake yes for first few frames (buffer depth) to keep from
- * breaking A/V sync with G1's bad architecture... */
- if (!f) return vf->priv->fakecount ? (--vf->priv->fakecount,1) : 0;
-
- if (f->length < 2) {
- ff_pullup_release_frame(f);
- f = ff_pullup_get_frame(c);
- if (!f) return 0;
- if (f->length < 2) {
- ff_pullup_release_frame(f);
- if (!(mpi->fields & MP_IMGFIELD_REPEAT_FIRST))
- return 0;
- f = ff_pullup_get_frame(c);
- if (!f) return 0;
- if (f->length < 2) {
- ff_pullup_release_frame(f);
- return 0;
- }
- }
- }
-
-#if 0
- /* Average qscale tables from both frames. */
- if (mpi->qscale) {
- for (i=0; i<c->w[3]; i++) {
- vf->priv->qbuf[i] = (f->ofields[0]->planes[3][i]
- + f->ofields[1]->planes[3][i+c->w[3]])>>1;
- }
- }
-#else
- /* Take worst of qscale tables from both frames. */
- if (mpi->qscale) {
- for (i=0; i<c->w[3]; i++) {
- vf->priv->qbuf[i] = MAX(f->ofields[0]->planes[3][i], f->ofields[1]->planes[3][i+c->w[3]]);
- }
- }
-#endif
-
- /* If the frame isn't already exportable... */
- while (!f->buffer) {
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->width, mpi->height);
- /* FIXME: Is it ok to discard dmpi if it's not direct? */
- if (!(dmpi->flags & MP_IMGFLAG_DIRECT)) {
- ff_pullup_pack_frame(c, f);
- break;
- }
- /* Direct render fields into output buffer */
- my_memcpy_pic(dmpi->planes[0], f->ofields[0]->planes[0],
- mpi->w, mpi->h/2, dmpi->stride[0]*2, c->stride[0]*2);
- my_memcpy_pic(dmpi->planes[0] + dmpi->stride[0],
- f->ofields[1]->planes[0] + c->stride[0],
- mpi->w, mpi->h/2, dmpi->stride[0]*2, c->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1], f->ofields[0]->planes[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, c->stride[1]*2);
- my_memcpy_pic(dmpi->planes[1] + dmpi->stride[1],
- f->ofields[1]->planes[1] + c->stride[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, c->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2], f->ofields[0]->planes[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, c->stride[2]*2);
- my_memcpy_pic(dmpi->planes[2] + dmpi->stride[2],
- f->ofields[1]->planes[2] + c->stride[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, c->stride[2]*2);
- }
- ff_pullup_release_frame(f);
- if (mpi->qscale) {
- dmpi->qscale = vf->priv->qbuf;
- dmpi->qstride = mpi->qstride;
- dmpi->qscale_type = mpi->qscale_type;
- }
- return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- }
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_EXPORT, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->width, mpi->height);
-
- dmpi->planes[0] = f->buffer->planes[0];
- dmpi->planes[1] = f->buffer->planes[1];
- dmpi->planes[2] = f->buffer->planes[2];
-
- dmpi->stride[0] = c->stride[0];
- dmpi->stride[1] = c->stride[1];
- dmpi->stride[2] = c->stride[2];
-
- if (mpi->qscale) {
- dmpi->qscale = vf->priv->qbuf;
- dmpi->qstride = mpi->qstride;
- dmpi->qscale_type = mpi->qscale_type;
- }
- ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- ff_pullup_release_frame(f);
- return ret;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- /* FIXME - support more formats */
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- if (height&3) {
- ff_mp_msg(MSGT_VFILTER, MSGL_ERR, "height must be divisible by four\n");
- return 0;
- }
- return ff_vf_next_config(vf, width, height, d_width, d_height, flags, outfmt);
-}
-
-static void uninit(struct vf_instance *vf)
-{
- ff_pullup_free_context(vf->priv->ctx);
- free(vf->priv);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- struct vf_priv_s *p;
- struct pullup_context *c;
- //vf->get_image = get_image;
- vf->put_image = put_image;
- vf->config = config;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
- vf->priv = p = calloc(1, sizeof(struct vf_priv_s));
- p->ctx = c = ff_pullup_alloc_context();
- p->fakecount = 1;
- c->junk_left = c->junk_right = 1;
- c->junk_top = c->junk_bottom = 4;
- c->strict_breaks = 0;
- c->metric_plane = 0;
- if (args) {
- sscanf(args, "%d:%d:%d:%d:%d:%d", &c->junk_left, &c->junk_right, &c->junk_top, &c->junk_bottom, &c->strict_breaks, &c->metric_plane);
- }
- return 1;
-}
-
-const vf_info_t ff_vf_info_pullup = {
- "pullup (from field sequence to frames)",
- "pullup",
- "Rich Felker",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_qp.c b/ffmpeg/libavfilter/libmpcodecs/vf_qp.c
deleted file mode 100644
index 579ec1c..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_qp.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-#include <inttypes.h>
-
-#include "mp_msg.h"
-#include "cpudetect.h"
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "libvo/fastmemcpy.h"
-
-#include "libavcodec/avcodec.h"
-#include "libavutil/eval.h"
-#include "libavutil/mem.h"
-
-
-struct vf_priv_s {
- char eq[200];
- int8_t *qp;
- int8_t lut[257];
- int qp_stride;
-};
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
- int h= (height+15)>>4;
- int i;
-
- vf->priv->qp_stride= (width+15)>>4;
- vf->priv->qp= av_malloc(vf->priv->qp_stride*h*sizeof(int8_t));
-
- for(i=-129; i<128; i++){
- double const_values[]={
- M_PI,
- M_E,
- i != -129,
- i,
- 0
- };
- static const char *const_names[]={
- "PI",
- "E",
- "known",
- "qp",
- NULL
- };
- double temp_val;
- int res;
-
- res= av_expr_parse_and_eval(&temp_val, vf->priv->eq, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL);
-
- if (res < 0){
- ff_mp_msg(MSGT_VFILTER, MSGL_ERR, "qp: Error evaluating \"%s\" \n", vf->priv->eq);
- return 0;
- }
- vf->priv->lut[i+129]= lrintf(temp_val);
- }
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi){
- if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
- // ok, we can do pp in-place (or pp disabled):
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- mpi->type, mpi->flags, mpi->w, mpi->h);
- mpi->planes[0]=vf->dmpi->planes[0];
- mpi->stride[0]=vf->dmpi->stride[0];
- mpi->width=vf->dmpi->width;
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- mpi->planes[1]=vf->dmpi->planes[1];
- mpi->planes[2]=vf->dmpi->planes[2];
- mpi->stride[1]=vf->dmpi->stride[1];
- mpi->stride[2]=vf->dmpi->stride[2];
- }
- mpi->flags|=MP_IMGFLAG_DIRECT;
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- mp_image_t *dmpi;
- int x,y;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- // no DR, so get a new image! hope we'll get DR buffer:
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
- mpi->w,mpi->h);
- }
-
- dmpi= vf->dmpi;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
- }
- }
- ff_vf_clone_mpi_attributes(dmpi, mpi);
-
- dmpi->qscale = vf->priv->qp;
- dmpi->qstride= vf->priv->qp_stride;
- if(mpi->qscale){
- for(y=0; y<((dmpi->h+15)>>4); y++){
- for(x=0; x<vf->priv->qp_stride; x++){
- dmpi->qscale[x + dmpi->qstride*y]=
- vf->priv->lut[ 129 + ((int8_t)mpi->qscale[x + mpi->qstride*y]) ];
- }
- }
- }else{
- int qp= vf->priv->lut[0];
- for(y=0; y<((dmpi->h+15)>>4); y++){
- for(x=0; x<vf->priv->qp_stride; x++){
- dmpi->qscale[x + dmpi->qstride*y]= qp;
- }
- }
- }
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
- av_free(vf->priv->qp);
- vf->priv->qp= NULL;
-
- av_free(vf->priv);
- vf->priv=NULL;
-}
-
-//===========================================================================//
-static int vf_open(vf_instance_t *vf, char *args){
- vf->config=config;
- vf->put_image=put_image;
- vf->get_image=get_image;
- vf->uninit=uninit;
- vf->priv=av_malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
-// avcodec_init();
-
- if (args) strncpy(vf->priv->eq, args, 199);
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_qp = {
- "QP changer",
- "qp",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_sab.c b/ffmpeg/libavfilter/libmpcodecs/vf_sab.c
deleted file mode 100644
index 2928a85..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_sab.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <assert.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#if HAVE_MALLOC_H
-#include <malloc.h>
-#endif
-
-#include "libavutil/avutil.h"
-#include "libavutil/mem.h"
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "libswscale/swscale.h"
-#include "vf_scale.h"
-
-
-//===========================================================================//
-
-typedef struct FilterParam{
- float radius;
- float preFilterRadius;
- float strength;
- float quality;
- struct SwsContext *preFilterContext;
- uint8_t *preFilterBuf;
- int preFilterStride;
- int distWidth;
- int distStride;
- int *distCoeff;
- int colorDiffCoeff[512];
-}FilterParam;
-
-struct vf_priv_s {
- FilterParam luma;
- FilterParam chroma;
-};
-
-
-/***************************************************************************/
-
-//FIXME stupid code duplication
-static void getSubSampleFactors(int *h, int *v, int format){
- switch(format){
- default:
- assert(0);
- case IMGFMT_YV12:
- case IMGFMT_I420:
- *h=1;
- *v=1;
- break;
- case IMGFMT_YVU9:
- *h=2;
- *v=2;
- break;
- case IMGFMT_444P:
- *h=0;
- *v=0;
- break;
- case IMGFMT_422P:
- *h=1;
- *v=0;
- break;
- case IMGFMT_411P:
- *h=2;
- *v=0;
- break;
- }
-}
-
-static int allocStuff(FilterParam *f, int width, int height){
- int stride= (width+7)&~7;
- SwsVector *vec;
- SwsFilter swsF;
- int i,x,y;
- f->preFilterBuf= av_malloc(stride*height);
- f->preFilterStride= stride;
-
- vec = sws_getGaussianVec(f->preFilterRadius, f->quality);
- swsF.lumH= swsF.lumV= vec;
- swsF.chrH= swsF.chrV= NULL;
- f->preFilterContext= sws_getContext(
- width, height, AV_PIX_FMT_GRAY8, width, height, AV_PIX_FMT_GRAY8, SWS_POINT, &swsF, NULL, NULL);
-
- sws_freeVec(vec);
- vec = sws_getGaussianVec(f->strength, 5.0);
- for(i=0; i<512; i++){
- double d;
- int index= i-256 + vec->length/2;
-
- if(index<0 || index>=vec->length) d= 0.0;
- else d= vec->coeff[index];
-
- f->colorDiffCoeff[i]= (int)(d/vec->coeff[vec->length/2]*(1<<12) + 0.5);
- }
- sws_freeVec(vec);
- vec = sws_getGaussianVec(f->radius, f->quality);
- f->distWidth= vec->length;
- f->distStride= (vec->length+7)&~7;
- f->distCoeff= av_malloc(f->distWidth*f->distStride*sizeof(int32_t));
-
- for(y=0; y<vec->length; y++){
- for(x=0; x<vec->length; x++){
- double d= vec->coeff[x] * vec->coeff[y];
-
- f->distCoeff[x + y*f->distStride]= (int)(d*(1<<10) + 0.5);
-// if(y==vec->length/2)
-// printf("%6d ", f->distCoeff[x + y*f->distStride]);
- }
- }
- sws_freeVec(vec);
-
- return 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
-
- int sw, sh;
-//__asm__ volatile("emms\n\t");
- allocStuff(&vf->priv->luma, width, height);
-
- getSubSampleFactors(&sw, &sh, outfmt);
- allocStuff(&vf->priv->chroma, width>>sw, height>>sh);
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void freeBuffers(FilterParam *f){
- if(f->preFilterContext) sws_freeContext(f->preFilterContext);
- f->preFilterContext=NULL;
-
- av_free(f->preFilterBuf);
- f->preFilterBuf=NULL;
-
- av_free(f->distCoeff);
- f->distCoeff=NULL;
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
- freeBuffers(&vf->priv->luma);
- freeBuffers(&vf->priv->chroma);
-
- free(vf->priv);
- vf->priv=NULL;
-}
-
-static inline void blur(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, FilterParam *fp){
- int x, y;
- FilterParam f= *fp;
- const int radius= f.distWidth/2;
- const uint8_t* const srcArray[MP_MAX_PLANES] = {src};
- uint8_t *dstArray[MP_MAX_PLANES]= {f.preFilterBuf};
- int srcStrideArray[MP_MAX_PLANES]= {srcStride};
- int dstStrideArray[MP_MAX_PLANES]= {f.preFilterStride};
-
-// f.preFilterContext->swScale(f.preFilterContext, srcArray, srcStrideArray, 0, h, dstArray, dstStrideArray);
- sws_scale(f.preFilterContext, srcArray, srcStrideArray, 0, h, dstArray, dstStrideArray);
-
- for(y=0; y<h; y++){
- for(x=0; x<w; x++){
- int sum=0;
- int div=0;
- int dy;
- const int preVal= f.preFilterBuf[x + y*f.preFilterStride];
-#if 0
- const int srcVal= src[x + y*srcStride];
-if((x/32)&1){
- dst[x + y*dstStride]= srcVal;
- if(y%32==0) dst[x + y*dstStride]= 0;
- continue;
-}
-#endif
- if(x >= radius && x < w - radius){
- for(dy=0; dy<radius*2+1; dy++){
- int dx;
- int iy= y+dy - radius;
- if (iy<0) iy= -iy;
- else if(iy>=h) iy= h+h-iy-1;
-
- for(dx=0; dx<radius*2+1; dx++){
- const int ix= x+dx - radius;
- int factor;
-
- factor= f.colorDiffCoeff[256+preVal - f.preFilterBuf[ix + iy*f.preFilterStride] ]
- *f.distCoeff[dx + dy*f.distStride];
- sum+= src[ix + iy*srcStride] *factor;
- div+= factor;
- }
- }
- }else{
- for(dy=0; dy<radius*2+1; dy++){
- int dx;
- int iy= y+dy - radius;
- if (iy<0) iy= -iy;
- else if(iy>=h) iy= h+h-iy-1;
-
- for(dx=0; dx<radius*2+1; dx++){
- int ix= x+dx - radius;
- int factor;
- if (ix<0) ix= -ix;
- else if(ix>=w) ix= w+w-ix-1;
-
- factor= f.colorDiffCoeff[256+preVal - f.preFilterBuf[ix + iy*f.preFilterStride] ]
- *f.distCoeff[dx + dy*f.distStride];
- sum+= src[ix + iy*srcStride] *factor;
- div+= factor;
- }
- }
- }
- dst[x + y*dstStride]= (sum + div/2)/div;
- }
- }
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- int cw= mpi->w >> mpi->chroma_x_shift;
- int ch= mpi->h >> mpi->chroma_y_shift;
-
- mp_image_t *dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->w,mpi->h);
-
- assert(mpi->flags&MP_IMGFLAG_PLANAR);
-
- blur(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, dmpi->stride[0], mpi->stride[0], &vf->priv->luma);
- blur(dmpi->planes[1], mpi->planes[1], cw , ch , dmpi->stride[1], mpi->stride[1], &vf->priv->chroma);
- blur(dmpi->planes[2], mpi->planes[2], cw , ch , dmpi->stride[2], mpi->stride[2], &vf->priv->chroma);
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-//===========================================================================//
-
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt)
- {
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- case IMGFMT_YVU9:
- case IMGFMT_444P:
- case IMGFMT_422P:
- case IMGFMT_411P:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
- int e;
-
- vf->config=config;
- vf->put_image=put_image;
-// vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
- if(args==NULL) return 0;
-
- e=sscanf(args, "%f:%f:%f:%f:%f:%f",
- &vf->priv->luma.radius,
- &vf->priv->luma.preFilterRadius,
- &vf->priv->luma.strength,
- &vf->priv->chroma.radius,
- &vf->priv->chroma.preFilterRadius,
- &vf->priv->chroma.strength
- );
-
- vf->priv->luma.quality = vf->priv->chroma.quality= 3.0;
-
- if(e==3){
- vf->priv->chroma.radius= vf->priv->luma.radius;
- vf->priv->chroma.preFilterRadius = vf->priv->luma.preFilterRadius;
- vf->priv->chroma.strength= vf->priv->luma.strength;
- }else if(e!=6)
- return 0;
-
-// if(vf->priv->luma.radius < 0) return 0;
-// if(vf->priv->chroma.radius < 0) return 0;
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_sab = {
- "shape adaptive blur",
- "sab",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
-
-//===========================================================================//
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_scale.h b/ffmpeg/libavfilter/libmpcodecs/vf_scale.h
deleted file mode 100644
index 177fbe5..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_scale.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef MPLAYER_VF_SCALE_H
-#define MPLAYER_VF_SCALE_H
-
-extern int ff_sws_chr_vshift;
-extern int ff_sws_chr_hshift;
-
-extern float ff_sws_chr_gblur;
-extern float ff_sws_lum_gblur;
-extern float ff_sws_chr_sharpen;
-extern float ff_sws_lum_sharpen;
-
-extern int ff_sws_flags;
-
-struct SwsContext *ff_sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat);
-
-#endif /* MPLAYER_VF_SCALE_H */
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_spp.c b/ffmpeg/libavfilter/libmpcodecs/vf_spp.c
deleted file mode 100644
index 75ede23..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_spp.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/*
- * This implementation is based on an algorithm described in
- * "Aria Nosratinia Embedded Post-Processing for
- * Enhancement of Compressed Images (1999)"
- * (http://citeseer.nj.nec.com/nosratinia99embedded.html)
- */
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <math.h>
-
-#include "config.h"
-
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "libavutil/common.h"
-#include "libavutil/internal.h"
-#include "libavutil/intreadwrite.h"
-#include "libavcodec/avcodec.h"
-#include "libavcodec/dsputil.h"
-
-#undef fprintf
-#undef free
-#undef malloc
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "av_helpers.h"
-#include "libvo/fastmemcpy.h"
-
-#define XMIN(a,b) ((a) < (b) ? (a) : (b))
-
-//===========================================================================//
-static const uint8_t __attribute__((aligned(8))) dither[8][8]={
-{ 0, 48, 12, 60, 3, 51, 15, 63, },
-{ 32, 16, 44, 28, 35, 19, 47, 31, },
-{ 8, 56, 4, 52, 11, 59, 7, 55, },
-{ 40, 24, 36, 20, 43, 27, 39, 23, },
-{ 2, 50, 14, 62, 1, 49, 13, 61, },
-{ 34, 18, 46, 30, 33, 17, 45, 29, },
-{ 10, 58, 6, 54, 9, 57, 5, 53, },
-{ 42, 26, 38, 22, 41, 25, 37, 21, },
-};
-
-static const uint8_t offset[127][2]= {
-{0,0},
-{0,0}, {4,4},
-{0,0}, {2,2}, {6,4}, {4,6},
-{0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7},
-
-{0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3},
-{0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
-
-{0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7},
-{2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
-{4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
-{6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
-
-{0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2},
-{0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
-{1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
-{1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
-{0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
-{0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
-{1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
-{1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
-};
-
-struct vf_priv_s {
- int log2_count;
- int qp;
- int mode;
- int mpeg2;
- int temp_stride;
- uint8_t *src;
- int16_t *temp;
- AVCodecContext *avctx;
- DSPContext dsp;
- char *non_b_qp;
-};
-
-#define SHIFT 22
-
-static void hardthresh_c(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){
- int i;
- int bias= 0; //FIXME
- unsigned int threshold1, threshold2;
-
- threshold1= qp*((1<<4) - bias) - 1;
- threshold2= (threshold1<<1);
-
- memset(dst, 0, 64*sizeof(int16_t));
- dst[0]= (src[0] + 4)>>3;
-
- for(i=1; i<64; i++){
- int level= src[i];
- if(((unsigned)(level+threshold1))>threshold2){
- const int j= permutation[i];
- dst[j]= (level + 4)>>3;
- }
- }
-}
-
-static void softthresh_c(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){
- int i;
- int bias= 0; //FIXME
- unsigned int threshold1, threshold2;
-
- threshold1= qp*((1<<4) - bias) - 1;
- threshold2= (threshold1<<1);
-
- memset(dst, 0, 64*sizeof(int16_t));
- dst[0]= (src[0] + 4)>>3;
-
- for(i=1; i<64; i++){
- int level= src[i];
- if(((unsigned)(level+threshold1))>threshold2){
- const int j= permutation[i];
- if(level>0)
- dst[j]= (level - threshold1 + 4)>>3;
- else
- dst[j]= (level + threshold1 + 4)>>3;
- }
- }
-}
-
-#if HAVE_MMX
-static void hardthresh_mmx(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){
- int bias= 0; //FIXME
- unsigned int threshold1;
-
- threshold1= qp*((1<<4) - bias) - 1;
-
- __asm__ volatile(
-#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
- "movq " #src0 ", %%mm0 \n\t"\
- "movq " #src1 ", %%mm1 \n\t"\
- "movq " #src2 ", %%mm2 \n\t"\
- "movq " #src3 ", %%mm3 \n\t"\
- "psubw %%mm4, %%mm0 \n\t"\
- "psubw %%mm4, %%mm1 \n\t"\
- "psubw %%mm4, %%mm2 \n\t"\
- "psubw %%mm4, %%mm3 \n\t"\
- "paddusw %%mm5, %%mm0 \n\t"\
- "paddusw %%mm5, %%mm1 \n\t"\
- "paddusw %%mm5, %%mm2 \n\t"\
- "paddusw %%mm5, %%mm3 \n\t"\
- "paddw %%mm6, %%mm0 \n\t"\
- "paddw %%mm6, %%mm1 \n\t"\
- "paddw %%mm6, %%mm2 \n\t"\
- "paddw %%mm6, %%mm3 \n\t"\
- "psubusw %%mm6, %%mm0 \n\t"\
- "psubusw %%mm6, %%mm1 \n\t"\
- "psubusw %%mm6, %%mm2 \n\t"\
- "psubusw %%mm6, %%mm3 \n\t"\
- "psraw $3, %%mm0 \n\t"\
- "psraw $3, %%mm1 \n\t"\
- "psraw $3, %%mm2 \n\t"\
- "psraw $3, %%mm3 \n\t"\
-\
- "movq %%mm0, %%mm7 \n\t"\
- "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\
- "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\
- "movq %%mm1, %%mm2 \n\t"\
- "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\
- "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\
- "movq %%mm0, %%mm3 \n\t"\
- "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\
- "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\
- "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\
- "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\
-\
- "movq %%mm0, " #dst0 " \n\t"\
- "movq %%mm7, " #dst1 " \n\t"\
- "movq %%mm3, " #dst2 " \n\t"\
- "movq %%mm1, " #dst3 " \n\t"
-
- "movd %2, %%mm4 \n\t"
- "movd %3, %%mm5 \n\t"
- "movd %4, %%mm6 \n\t"
- "packssdw %%mm4, %%mm4 \n\t"
- "packssdw %%mm5, %%mm5 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- "packssdw %%mm4, %%mm4 \n\t"
- "packssdw %%mm5, %%mm5 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
- REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
- REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
- REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
- : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
- );
- dst[0]= (src[0] + 4)>>3;
-}
-
-static void softthresh_mmx(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){
- int bias= 0; //FIXME
- unsigned int threshold1;
-
- threshold1= qp*((1<<4) - bias) - 1;
-
- __asm__ volatile(
-#undef REQUANT_CORE
-#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
- "movq " #src0 ", %%mm0 \n\t"\
- "movq " #src1 ", %%mm1 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- "pcmpgtw %%mm0, %%mm6 \n\t"\
- "pcmpgtw %%mm1, %%mm7 \n\t"\
- "pxor %%mm6, %%mm0 \n\t"\
- "pxor %%mm7, %%mm1 \n\t"\
- "psubusw %%mm4, %%mm0 \n\t"\
- "psubusw %%mm4, %%mm1 \n\t"\
- "pxor %%mm6, %%mm0 \n\t"\
- "pxor %%mm7, %%mm1 \n\t"\
- "movq " #src2 ", %%mm2 \n\t"\
- "movq " #src3 ", %%mm3 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- "pcmpgtw %%mm2, %%mm6 \n\t"\
- "pcmpgtw %%mm3, %%mm7 \n\t"\
- "pxor %%mm6, %%mm2 \n\t"\
- "pxor %%mm7, %%mm3 \n\t"\
- "psubusw %%mm4, %%mm2 \n\t"\
- "psubusw %%mm4, %%mm3 \n\t"\
- "pxor %%mm6, %%mm2 \n\t"\
- "pxor %%mm7, %%mm3 \n\t"\
-\
- "paddsw %%mm5, %%mm0 \n\t"\
- "paddsw %%mm5, %%mm1 \n\t"\
- "paddsw %%mm5, %%mm2 \n\t"\
- "paddsw %%mm5, %%mm3 \n\t"\
- "psraw $3, %%mm0 \n\t"\
- "psraw $3, %%mm1 \n\t"\
- "psraw $3, %%mm2 \n\t"\
- "psraw $3, %%mm3 \n\t"\
-\
- "movq %%mm0, %%mm7 \n\t"\
- "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\
- "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\
- "movq %%mm1, %%mm2 \n\t"\
- "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\
- "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\
- "movq %%mm0, %%mm3 \n\t"\
- "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\
- "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\
- "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\
- "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\
-\
- "movq %%mm0, " #dst0 " \n\t"\
- "movq %%mm7, " #dst1 " \n\t"\
- "movq %%mm3, " #dst2 " \n\t"\
- "movq %%mm1, " #dst3 " \n\t"
-
- "movd %2, %%mm4 \n\t"
- "movd %3, %%mm5 \n\t"
- "packssdw %%mm4, %%mm4 \n\t"
- "packssdw %%mm5, %%mm5 \n\t"
- "packssdw %%mm4, %%mm4 \n\t"
- "packssdw %%mm5, %%mm5 \n\t"
- REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
- REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
- REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
- REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
- : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
- );
-
- dst[0]= (src[0] + 4)>>3;
-}
-#endif
-
-static inline void add_block(int16_t *dst, int stride, int16_t block[64]){
- int y;
-
- for(y=0; y<8; y++){
- *(uint32_t*)&dst[0 + y*stride]+= *(uint32_t*)&block[0 + y*8];
- *(uint32_t*)&dst[2 + y*stride]+= *(uint32_t*)&block[2 + y*8];
- *(uint32_t*)&dst[4 + y*stride]+= *(uint32_t*)&block[4 + y*8];
- *(uint32_t*)&dst[6 + y*stride]+= *(uint32_t*)&block[6 + y*8];
- }
-}
-
-static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
- int y, x;
-
-#define STORE(pos) \
- temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>6;\
- if(temp & 0x100) temp= ~(temp>>31);\
- dst[x + y*dst_stride + pos]= temp;
-
- for(y=0; y<height; y++){
- const uint8_t *d= dither[y];
- for(x=0; x<width; x+=8){
- int temp;
- STORE(0);
- STORE(1);
- STORE(2);
- STORE(3);
- STORE(4);
- STORE(5);
- STORE(6);
- STORE(7);
- }
- }
-}
-
-#if HAVE_MMX
-static void store_slice_mmx(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
- int y;
-
- for(y=0; y<height; y++){
- uint8_t *dst1= dst;
- int16_t *src1= src;
- __asm__ volatile(
- "movq (%3), %%mm3 \n\t"
- "movq (%3), %%mm4 \n\t"
- "movd %4, %%mm2 \n\t"
- "pxor %%mm0, %%mm0 \n\t"
- "punpcklbw %%mm0, %%mm3 \n\t"
- "punpckhbw %%mm0, %%mm4 \n\t"
- "psraw %%mm2, %%mm3 \n\t"
- "psraw %%mm2, %%mm4 \n\t"
- "movd %5, %%mm2 \n\t"
- "1: \n\t"
- "movq (%0), %%mm0 \n\t"
- "movq 8(%0), %%mm1 \n\t"
- "paddw %%mm3, %%mm0 \n\t"
- "paddw %%mm4, %%mm1 \n\t"
- "psraw %%mm2, %%mm0 \n\t"
- "psraw %%mm2, %%mm1 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%1) \n\t"
- "add $16, %0 \n\t"
- "add $8, %1 \n\t"
- "cmp %2, %1 \n\t"
- " jb 1b \n\t"
- : "+r" (src1), "+r"(dst1)
- : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(6-log2_scale)
- );
- src += src_stride;
- dst += dst_stride;
- }
-// if(width != mmxw)
-// store_slice_c(dst + mmxw, src + mmxw, dst_stride, src_stride, width - mmxw, log2_scale);
-}
-#endif
-
-static void (*store_slice)(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)= store_slice_c;
-
-static void (*requantize)(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation)= hardthresh_c;
-
-static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){
- int x, y, i;
- const int count= 1<<p->log2_count;
- const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15));
- uint64_t __attribute__((aligned(16))) block_align[32];
- int16_t *block = (int16_t *)block_align;
- int16_t *block2= (int16_t *)(block_align+16);
-
- if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
- for(y=0; y<height; y++){
- int index= 8 + 8*stride + y*stride;
- fast_memcpy(p->src + index, src + y*src_stride, width);
- for(x=0; x<8; x++){
- p->src[index - x - 1]= p->src[index + x ];
- p->src[index + width + x ]= p->src[index + width - x - 1];
- }
- }
- for(y=0; y<8; y++){
- fast_memcpy(p->src + ( 7-y)*stride, p->src + ( y+8)*stride, stride);
- fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride);
- }
- //FIXME (try edge emu)
-
- for(y=0; y<height+8; y+=8){
- memset(p->temp + (8+y)*stride, 0, 8*stride*sizeof(int16_t));
- for(x=0; x<width+8; x+=8){
- const int qps= 3 + is_luma;
- int qp;
-
- if(p->qp)
- qp= p->qp;
- else{
- qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride];
- qp = FFMAX(1, norm_qscale(qp, p->mpeg2));
- }
- for(i=0; i<count; i++){
- const int x1= x + offset[i+count-1][0];
- const int y1= y + offset[i+count-1][1];
- const int index= x1 + y1*stride;
- p->dsp.get_pixels(block, p->src + index, stride);
- p->dsp.fdct(block);
- requantize(block2, block, qp, p->dsp.idct_permutation);
- p->dsp.idct(block2);
- add_block(p->temp + index, stride, block2);
- }
- }
- if(y)
- store_slice(dst + (y-8)*dst_stride, p->temp + 8 + y*stride, dst_stride, stride, width, XMIN(8, height+8-y), 6-p->log2_count);
- }
-#if 0
- for(y=0; y<height; y++){
- for(x=0; x<width; x++){
- if((((x>>6) ^ (y>>6)) & 1) == 0)
- dst[x + y*dst_stride]= p->src[8 + 8*stride + x + y*stride];
- if((x&63) == 0 || (y&63)==0)
- dst[x + y*dst_stride] += 128;
- }
- }
-#endif
- //FIXME reorder for better caching
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
- int h= (height+16+15)&(~15);
-
- vf->priv->temp_stride= (width+16+15)&(~15);
- vf->priv->temp= malloc(vf->priv->temp_stride*h*sizeof(int16_t));
- vf->priv->src = malloc(vf->priv->temp_stride*h*sizeof(uint8_t));
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi){
- if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
- // ok, we can do pp in-place (or pp disabled):
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
- mpi->planes[0]=vf->dmpi->planes[0];
- mpi->stride[0]=vf->dmpi->stride[0];
- mpi->width=vf->dmpi->width;
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- mpi->planes[1]=vf->dmpi->planes[1];
- mpi->planes[2]=vf->dmpi->planes[2];
- mpi->stride[1]=vf->dmpi->stride[1];
- mpi->stride[2]=vf->dmpi->stride[2];
- }
- mpi->flags|=MP_IMGFLAG_DIRECT;
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- mp_image_t *dmpi;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- // no DR, so get a new image! hope we'll get DR buffer:
- dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP,
- MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
- mpi->width,mpi->height);
- ff_vf_clone_mpi_attributes(dmpi, mpi);
- }else{
- dmpi=vf->dmpi;
- }
-
- vf->priv->mpeg2= mpi->qscale_type;
- if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){
- int w = mpi->qstride;
- int h = (mpi->h + 15) >> 4;
- if (!w) {
- w = (mpi->w + 15) >> 4;
- h = 1;
- }
- if(!vf->priv->non_b_qp)
- vf->priv->non_b_qp= malloc(w*h);
- fast_memcpy(vf->priv->non_b_qp, mpi->qscale, w*h);
- }
- if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
- char *qp_tab= vf->priv->non_b_qp;
- if((vf->priv->mode&4) || !qp_tab)
- qp_tab= mpi->qscale;
-
- if(qp_tab || vf->priv->qp){
- filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, qp_tab, mpi->qstride, 1);
- filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
- filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
- }else{
- memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
- memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
- }
- }
-
-#if HAVE_MMX
- if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
-#endif
-#if HAVE_MMX2
- if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
-#endif
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
- free(vf->priv->temp);
- vf->priv->temp= NULL;
- free(vf->priv->src);
- vf->priv->src= NULL;
- free(vf->priv->avctx);
- vf->priv->avctx= NULL;
- free(vf->priv->non_b_qp);
- vf->priv->non_b_qp= NULL;
-
- free(vf->priv);
- vf->priv=NULL;
-}
-
-//===========================================================================//
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt){
- case IMGFMT_YVU9:
- case IMGFMT_IF09:
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- case IMGFMT_CLPL:
- case IMGFMT_Y800:
- case IMGFMT_Y8:
- case IMGFMT_444P:
- case IMGFMT_422P:
- case IMGFMT_411P:
- return ff_vf_next_query_format(vf,fmt);
- }
- return 0;
-}
-
-static int control(struct vf_instance *vf, int request, void* data){
- switch(request){
- case VFCTRL_QUERY_MAX_PP_LEVEL:
- return 6;
- case VFCTRL_SET_PP_LEVEL:
- vf->priv->log2_count= *((unsigned int*)data);
- return CONTROL_TRUE;
- }
- return ff_vf_next_control(vf,request,data);
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
-
- int log2c=-1;
-
- vf->config=config;
- vf->put_image=put_image;
- vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->control= control;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
- ff_init_avcodec();
-
- vf->priv->avctx= avcodec_alloc_context3(NULL);
- ff_dsputil_init(&vf->priv->dsp, vf->priv->avctx);
-
- vf->priv->log2_count= 3;
-
- if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode);
-
- if( log2c >=0 && log2c <=6 )
- vf->priv->log2_count = log2c;
-
- if(vf->priv->qp < 0)
- vf->priv->qp = 0;
-
- switch(vf->priv->mode&3){
- default:
- case 0: requantize= hardthresh_c; break;
- case 1: requantize= softthresh_c; break;
- }
-
-#if HAVE_MMX
- if(ff_gCpuCaps.hasMMX){
- store_slice= store_slice_mmx;
- switch(vf->priv->mode&3){
- case 0: requantize= hardthresh_mmx; break;
- case 1: requantize= softthresh_mmx; break;
- }
- }
-#endif
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_spp = {
- "simple postprocess",
- "spp",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_telecine.c b/ffmpeg/libavfilter/libmpcodecs/vf_telecine.c
deleted file mode 100644
index 77f75f0..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_telecine.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-struct vf_priv_s {
- int frame;
-};
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- mp_image_t *dmpi;
- int ret;
- int w = (IMGFMT_IS_YUVP16(mpi->imgfmt) ? 2 : 1) * mpi->w;
- int chroma_width = (IMGFMT_IS_YUVP16(mpi->imgfmt) ? 2 : 1) * mpi->chroma_width;
-
- vf->priv->frame = (vf->priv->frame+1)%4;
-
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE, mpi->width, mpi->height);
-
- ret = 0;
- // 0/0 1/1 2/2 2/3 3/0
- switch (vf->priv->frame) {
- case 0:
- my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
- mpi->planes[0]+mpi->stride[0], w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
- mpi->planes[1]+mpi->stride[1],
- chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
- mpi->planes[2]+mpi->stride[2],
- chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- /* Fallthrough */
- case 1:
- case 2:
- memcpy_pic(dmpi->planes[0], mpi->planes[0], w, mpi->h,
- dmpi->stride[0], mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1], mpi->planes[1],
- chroma_width, mpi->chroma_height,
- dmpi->stride[1], mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2],
- chroma_width, mpi->chroma_height,
- dmpi->stride[2], mpi->stride[2]);
- }
- return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE) || ret;
- case 3:
- my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
- mpi->planes[0]+mpi->stride[0], w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
- mpi->planes[1]+mpi->stride[1],
- chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
- mpi->planes[2]+mpi->stride[2],
- chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- my_memcpy_pic(dmpi->planes[0], mpi->planes[0], w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1], mpi->planes[1],
- chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2], mpi->planes[2],
- chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- return ret;
- }
- return 0;
-}
-
-#if 0
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- /* FIXME - figure out which other formats work */
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-#endif
-
-static void uninit(struct vf_instance *vf)
-{
- free(vf->priv);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- //vf->config = config;
- vf->put_image = put_image;
- //vf->query_format = query_format;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
- vf->priv = calloc(1, sizeof(struct vf_priv_s));
- vf->priv->frame = 1;
- if (args) sscanf(args, "%d", &vf->priv->frame);
- vf->priv->frame--;
- return 1;
-}
-
-const vf_info_t ff_vf_info_telecine = {
- "telecine filter",
- "telecine",
- "Rich Felker",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_tinterlace.c b/ffmpeg/libavfilter/libmpcodecs/vf_tinterlace.c
deleted file mode 100644
index 6c7dbab..0000000
--- a/ffmpeg/libavfilter/libmpcodecs/vf_tinterlace.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (C) 2003 Michael Zucchi <notzed@ximian.com>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "config.h"
-#include "mp_msg.h"
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-
-#include "libvo/fastmemcpy.h"
-
-struct vf_priv_s {
- int mode;
- int frame;
- mp_image_t *dmpi;
-};
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
-{
- int ret = 0;
- mp_image_t *dmpi;
-
- switch (vf->priv->mode) {
- case 0:
- dmpi = vf->priv->dmpi;
- if (dmpi == NULL) {
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE,
- mpi->width, mpi->height*2);
-
- vf->priv->dmpi = dmpi;
-
- memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h,
- dmpi->stride[0]*2, mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[1]*2, mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[2]*2, mpi->stride[2]);
- }
- } else {
- vf->priv->dmpi = NULL;
-
- memcpy_pic(dmpi->planes[0]+dmpi->stride[0], mpi->planes[0], mpi->w, mpi->h,
- dmpi->stride[0]*2, mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1]+dmpi->stride[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[1]*2, mpi->stride[1]);
- memcpy_pic(dmpi->planes[2]+dmpi->stride[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[2]*2, mpi->stride[2]);
- }
- ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- }
- break;
- case 1:
- if (vf->priv->frame & 1)
- ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE);
- break;
- case 2:
- if ((vf->priv->frame & 1) == 0)
- ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE);
- break;
- case 3:
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
- mpi->width, mpi->height*2);
- /* fixme, just clear alternate lines */
- ff_vf_mpi_clear(dmpi, 0, 0, dmpi->w, dmpi->h);
- if ((vf->priv->frame & 1) == 0) {
- memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h,
- dmpi->stride[0]*2, mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[1]*2, mpi->stride[1]);
- memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[2]*2, mpi->stride[2]);
- }
- } else {
- memcpy_pic(dmpi->planes[0]+dmpi->stride[0], mpi->planes[0], mpi->w, mpi->h,
- dmpi->stride[0]*2, mpi->stride[0]);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- memcpy_pic(dmpi->planes[1]+dmpi->stride[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[1]*2, mpi->stride[1]);
- memcpy_pic(dmpi->planes[2]+dmpi->stride[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height,
- dmpi->stride[2]*2, mpi->stride[2]);
- }
- }
- ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- break;
- case 4:
- // Interleave even lines (only) from Frame 'i' with odd
- // lines (only) from Frame 'i+1', halving the Frame
- // rate and preserving image height.
-
- dmpi = vf->priv->dmpi;
-
- // @@ Need help: Should I set dmpi->fields to indicate
- // that the (new) frame will be interlaced!? E.g. ...
- // dmpi->fields |= MP_IMGFIELD_INTERLACED;
- // dmpi->fields |= MP_IMGFIELD_TOP_FIRST;
- // etc.
-
- if (dmpi == NULL) {
- dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
- MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
- MP_IMGFLAG_PRESERVE,
- mpi->width, mpi->height);
-
- vf->priv->dmpi = dmpi;
-
- my_memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1], mpi->planes[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2], mpi->planes[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- } else {
- vf->priv->dmpi = NULL;
-
- my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
- mpi->planes[0]+mpi->stride[0],
- mpi->w, mpi->h/2,
- dmpi->stride[0]*2, mpi->stride[0]*2);
- if (mpi->flags & MP_IMGFLAG_PLANAR) {
- my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
- mpi->planes[1]+mpi->stride[1],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[1]*2, mpi->stride[1]*2);
- my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
- mpi->planes[2]+mpi->stride[2],
- mpi->chroma_width, mpi->chroma_height/2,
- dmpi->stride[2]*2, mpi->stride[2]*2);
- }
- ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
- }
- break;
- }
-
- vf->priv->frame++;
-
- return ret;
-}
-
-static int query_format(struct vf_instance *vf, unsigned int fmt)
-{
- /* FIXME - figure out which other formats work */
- switch (fmt) {
- case IMGFMT_YV12:
- case IMGFMT_IYUV:
- case IMGFMT_I420:
- return ff_vf_next_query_format(vf, fmt);
- }
- return 0;
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt)
-{
- switch (vf->priv->mode) {
- case 0:
- case 3:
- return ff_vf_next_config(vf,width,height*2,d_width,d_height*2,flags,outfmt);
- case 1: /* odd frames */
- case 2: /* even frames */
- case 4: /* alternate frame (height-preserving) interlacing */
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
- }
- return 0;
-}
-
-static void uninit(struct vf_instance *vf)
-{
- free(vf->priv);
-}
-
-static int vf_open(vf_instance_t *vf, char *args)
-{
- struct vf_priv_s *p;
- vf->config = config;
- vf->put_image = put_image;
- vf->query_format = query_format;
- vf->uninit = uninit;
- vf->default_reqs = VFCAP_ACCEPT_STRIDE;
- vf->priv = p = calloc(1, sizeof(struct vf_priv_s));
- p->mode = 0;
- if (args)
- sscanf(args, "%d", &p->mode);
- p->frame = 0;
- return 1;
-}
-
-const vf_info_t ff_vf_info_tinterlace = {
- "temporal field interlacing",
- "tinterlace",
- "Michael Zucchi",
- "",
- vf_open,
- NULL
-};
diff --git a/ffmpeg/libavfilter/libmpcodecs/vf_uspp.c b/ffmpeg/libavfilter/libmpcodecs/vf_uspp.c
index 54cc0f9..1fb2523 100644
--- a/ffmpeg/libavfilter/libmpcodecs/vf_uspp.c
+++ b/ffmpeg/libavfilter/libmpcodecs/vf_uspp.c
@@ -245,8 +245,8 @@ static int config(struct vf_instance *vf,
av_dict_free(&opts);
assert(avctx_enc->codec);
}
- vf->priv->frame= avcodec_alloc_frame();
- vf->priv->frame_dec= avcodec_alloc_frame();
+ vf->priv->frame= av_frame_alloc();
+ vf->priv->frame_dec= av_frame_alloc();
vf->priv->outbuf_size= (width + BLOCK)*(height + BLOCK)*10;
vf->priv->outbuf= malloc(vf->priv->outbuf_size);
diff --git a/ffmpeg/libavfilter/lswsutils.c b/ffmpeg/libavfilter/lswsutils.c
index 6902ee9..ebb4f93 100644
--- a/ffmpeg/libavfilter/lswsutils.c
+++ b/ffmpeg/libavfilter/lswsutils.c
@@ -28,7 +28,7 @@ int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
int ret;
struct SwsContext *sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
dst_w, dst_h, dst_pix_fmt,
- SWS_BILINEAR, NULL, NULL, NULL);
+ 0, NULL, NULL, NULL);
if (!sws_ctx) {
av_log(log_ctx, AV_LOG_ERROR,
"Impossible to create scale context for the conversion "
diff --git a/ffmpeg/libavfilter/split.c b/ffmpeg/libavfilter/split.c
index b57d8cd..6abd5ee 100644
--- a/ffmpeg/libavfilter/split.c
+++ b/ffmpeg/libavfilter/split.c
@@ -25,27 +25,27 @@
#include <stdio.h>
+#include "libavutil/attributes.h"
#include "libavutil/internal.h"
#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
#include "video.h"
-static int split_init(AVFilterContext *ctx, const char *args)
+typedef struct SplitContext {
+ const AVClass *class;
+ int nb_outputs;
+} SplitContext;
+
+static av_cold int split_init(AVFilterContext *ctx)
{
- int i, nb_outputs = 2;
-
- if (args) {
- nb_outputs = strtol(args, NULL, 0);
- if (nb_outputs <= 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid number of outputs specified: %d.\n",
- nb_outputs);
- return AVERROR(EINVAL);
- }
- }
+ SplitContext *s = ctx->priv;
+ int i;
- for (i = 0; i < nb_outputs; i++) {
+ for (i = 0; i < s->nb_outputs; i++) {
char name[32];
AVFilterPad pad = { 0 };
@@ -59,7 +59,7 @@ static int split_init(AVFilterContext *ctx, const char *args)
return 0;
}
-static void split_uninit(AVFilterContext *ctx)
+static av_cold void split_uninit(AVFilterContext *ctx)
{
int i;
@@ -91,44 +91,57 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ret;
}
+#define OFFSET(x) offsetof(SplitContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption options[] = {
+ { "outputs", "set number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
+ { NULL }
+};
+
+#define split_options options
+AVFILTER_DEFINE_CLASS(split);
+
+#define asplit_options options
+AVFILTER_DEFINE_CLASS(asplit);
+
static const AVFilterPad avfilter_vf_split_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_vf_split = {
- .name = "split",
- .description = NULL_IF_CONFIG_SMALL("Pass on the input video to N outputs."),
-
- .init = split_init,
- .uninit = split_uninit,
-
- .inputs = avfilter_vf_split_inputs,
- .outputs = NULL,
+AVFilter ff_vf_split = {
+ .name = "split",
+ .description = NULL_IF_CONFIG_SMALL("Pass on the input to N video outputs."),
+ .priv_size = sizeof(SplitContext),
+ .priv_class = &split_class,
+ .init = split_init,
+ .uninit = split_uninit,
+ .inputs = avfilter_vf_split_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
static const AVFilterPad avfilter_af_asplit_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
-AVFilter avfilter_af_asplit = {
+AVFilter ff_af_asplit = {
.name = "asplit",
.description = NULL_IF_CONFIG_SMALL("Pass on the audio input to N audio outputs."),
-
- .init = split_init,
- .uninit = split_uninit,
-
- .inputs = avfilter_af_asplit_inputs,
- .outputs = NULL,
+ .priv_size = sizeof(SplitContext),
+ .priv_class = &asplit_class,
+ .init = split_init,
+ .uninit = split_uninit,
+ .inputs = avfilter_af_asplit_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/ffmpeg/libavfilter/src_movie.c b/ffmpeg/libavfilter/src_movie.c
index 455ebe0..d1289e2 100644
--- a/ffmpeg/libavfilter/src_movie.c
+++ b/ffmpeg/libavfilter/src_movie.c
@@ -27,9 +27,10 @@
* @todo support a PTS correction mechanism
*/
-/* #define DEBUG */
-
#include <float.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
#include "libavutil/avstring.h"
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
@@ -70,19 +71,20 @@ typedef struct {
} MovieContext;
#define OFFSET(x) offsetof(MovieContext, x)
-#define F AV_OPT_FLAG_FILTERING_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption movie_options[]= {
-{"format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX, F },
-{"f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX, F },
-{"streams", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, F },
-{"s", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, F },
-{"si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, F },
-{"stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, F },
-{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000, F },
-{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000, F },
-{"loop", "set loop count", OFFSET(loop_count), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, F },
-{NULL},
+ { "filename", NULL, OFFSET(file_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
+ { "sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
+ { "streams", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
+ { "s", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
+ { "loop", "set loop count", OFFSET(loop_count), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, FLAGS },
+ { NULL },
};
static int movie_config_output_props(AVFilterLink *outlink);
@@ -91,13 +93,13 @@ static int movie_request_frame(AVFilterLink *outlink);
static AVStream *find_stream(void *log, AVFormatContext *avf, const char *spec)
{
int i, ret, already = 0, stream_id = -1;
- char type_char, dummy;
+ char type_char[2], dummy;
AVStream *found = NULL;
enum AVMediaType type;
- ret = sscanf(spec, "d%[av]%d%c", &type_char, &stream_id, &dummy);
+ ret = sscanf(spec, "d%1[av]%d%c", type_char, &stream_id, &dummy);
if (ret >= 1 && ret <= 2) {
- type = type_char == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
+ type = type_char[0] == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
ret = av_find_best_stream(avf, type, stream_id, -1, NULL, 0);
if (ret < 0) {
av_log(log, AV_LOG_ERROR, "No %s stream with index '%d' found\n",
@@ -186,7 +188,7 @@ static int guess_channel_layout(MovieStream *st, int st_index, void *log_ctx)
return 0;
}
-static av_cold int movie_common_init(AVFilterContext *ctx, const char *args, const AVClass *class)
+static av_cold int movie_common_init(AVFilterContext *ctx)
{
MovieContext *movie = ctx->priv;
AVInputFormat *iformat = NULL;
@@ -196,22 +198,11 @@ static av_cold int movie_common_init(AVFilterContext *ctx, const char *args, con
char name[16];
AVStream *st;
- movie->class = class;
- av_opt_set_defaults(movie);
-
- if (args) {
- movie->file_name = av_get_token(&args, ":");
- if (!movie->file_name)
- return AVERROR(ENOMEM);
- }
- if (!args || !*movie->file_name) {
+ if (!movie->file_name) {
av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
return AVERROR(EINVAL);
}
- if (*args++ == ':' && (ret = av_set_options_string(movie, args, "=", ":")) < 0)
- return ret;
-
movie->seek_point = movie->seek_point_d * 1000000 + 0.5;
stream_specs = movie->stream_specs;
@@ -332,8 +323,6 @@ static av_cold void movie_uninit(AVFilterContext *ctx)
if (movie->st[i].st)
avcodec_close(movie->st[i].st->codec);
}
- av_opt_free(movie);
- av_freep(&movie->file_name);
av_freep(&movie->st);
av_freep(&movie->out_index);
av_frame_free(&movie->frame);
@@ -526,9 +515,12 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
if (ret < 0) {
av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret));
av_frame_free(&movie->frame);
+ av_free_packet(&movie->pkt0);
+ movie->pkt.size = 0;
+ movie->pkt.data = NULL;
return 0;
}
- if (!ret)
+ if (!ret || st->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
ret = pkt->size;
pkt->data += ret;
@@ -574,22 +566,18 @@ static int movie_request_frame(AVFilterLink *outlink)
AVFILTER_DEFINE_CLASS(movie);
-static av_cold int movie_init(AVFilterContext *ctx, const char *args)
-{
- return movie_common_init(ctx, args, &movie_class);
-}
-
-AVFilter avfilter_avsrc_movie = {
+AVFilter ff_avsrc_movie = {
.name = "movie",
.description = NULL_IF_CONFIG_SMALL("Read from a movie source."),
.priv_size = sizeof(MovieContext),
- .init = movie_init,
+ .priv_class = &movie_class,
+ .init = movie_common_init,
.uninit = movie_uninit,
.query_formats = movie_query_formats,
.inputs = NULL,
.outputs = NULL,
- .priv_class = &movie_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_MOVIE_FILTER */
@@ -599,22 +587,18 @@ AVFilter avfilter_avsrc_movie = {
#define amovie_options movie_options
AVFILTER_DEFINE_CLASS(amovie);
-static av_cold int amovie_init(AVFilterContext *ctx, const char *args)
-{
- return movie_common_init(ctx, args, &amovie_class);
-}
-
-AVFilter avfilter_avsrc_amovie = {
+AVFilter ff_avsrc_amovie = {
.name = "amovie",
.description = NULL_IF_CONFIG_SMALL("Read audio from a movie source."),
.priv_size = sizeof(MovieContext),
- .init = amovie_init,
+ .init = movie_common_init,
.uninit = movie_uninit,
.query_formats = movie_query_formats,
.inputs = NULL,
.outputs = NULL,
.priv_class = &amovie_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_AMOVIE_FILTER */
diff --git a/ffmpeg/libavfilter/transform.c b/ffmpeg/libavfilter/transform.c
index 1db8c08..3fc547e 100644
--- a/ffmpeg/libavfilter/transform.c
+++ b/ffmpeg/libavfilter/transform.c
@@ -199,4 +199,3 @@ int avfilter_transform(const uint8_t *src, uint8_t *dst,
}
return 0;
}
-
diff --git a/ffmpeg/libavfilter/version.h b/ffmpeg/libavfilter/version.h
index f592fc1..ff5b604 100644
--- a/ffmpeg/libavfilter/version.h
+++ b/ffmpeg/libavfilter/version.h
@@ -23,14 +23,15 @@
/**
* @file
+ * @ingroup lavfi
* Libavfilter version macros
*/
-#include "libavutil/avutil.h"
+#include "libavutil/version.h"
-#define LIBAVFILTER_VERSION_MAJOR 3
-#define LIBAVFILTER_VERSION_MINOR 48
-#define LIBAVFILTER_VERSION_MICRO 100
+#define LIBAVFILTER_VERSION_MAJOR 4
+#define LIBAVFILTER_VERSION_MINOR 0
+#define LIBAVFILTER_VERSION_MICRO 103
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
@@ -49,19 +50,43 @@
*/
#ifndef FF_API_AVFILTERPAD_PUBLIC
-#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4)
+#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 5)
#endif
#ifndef FF_API_FOO_COUNT
-#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4)
+#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 5)
#endif
#ifndef FF_API_FILL_FRAME
-#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4)
+#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 5)
#endif
#ifndef FF_API_BUFFERSRC_BUFFER
-#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
+#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 5)
#endif
#ifndef FF_API_AVFILTERBUFFER
-#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
+#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_OLD_FILTER_OPTS
+#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_ACONVERT_FILTER
+#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_AVFILTER_OPEN
+#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_AVFILTER_INIT_FILTER
+#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_OLD_FILTER_REGISTER
+#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_OLD_GRAPH_PARSE
+#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_DRAWTEXT_OLD_TIMELINE
+#define FF_API_DRAWTEXT_OLD_TIMELINE (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_NOCONST_GET_NAME
+#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 5)
#endif
#endif /* AVFILTER_VERSION_H */
diff --git a/ffmpeg/libavfilter/vf_alphaextract.c b/ffmpeg/libavfilter/vf_alphaextract.c
deleted file mode 100644
index 62ceecf..0000000
--- a/ffmpeg/libavfilter/vf_alphaextract.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2012 Steven Robertson
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * simple channel-swapping filter to get at the alpha component
- */
-
-#include <string.h>
-
-#include "libavutil/pixfmt.h"
-#include "avfilter.h"
-#include "drawutils.h"
-#include "internal.h"
-#include "formats.h"
-#include "video.h"
-
-enum { Y, U, V, A };
-
-typedef struct {
- int is_packed_rgb;
- uint8_t rgba_map[4];
-} AlphaExtractContext;
-
-static int query_formats(AVFilterContext *ctx)
-{
- static const enum AVPixelFormat in_fmts[] = {
- AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
- AV_PIX_FMT_NONE
- };
- static const enum AVPixelFormat out_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
- ff_formats_ref(ff_make_format_list(in_fmts), &ctx->inputs[0]->out_formats);
- ff_formats_ref(ff_make_format_list(out_fmts), &ctx->outputs[0]->in_formats);
- return 0;
-}
-
-static int config_input(AVFilterLink *inlink)
-{
- AlphaExtractContext *extract = inlink->dst->priv;
- extract->is_packed_rgb =
- ff_fill_rgba_map(extract->rgba_map, inlink->format) >= 0;
- return 0;
-}
-
-static int filter_frame(AVFilterLink *inlink, AVFrame *cur_buf)
-{
- AlphaExtractContext *extract = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
- AVFrame *out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- int ret;
-
- if (!out_buf) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
- av_frame_copy_props(out_buf, cur_buf);
-
- if (extract->is_packed_rgb) {
- int x, y;
- uint8_t *pcur, *pout;
- for (y = 0; y < outlink->h; y++) {
- pcur = cur_buf->data[0] + y * cur_buf->linesize[0] + extract->rgba_map[A];
- pout = out_buf->data[0] + y * out_buf->linesize[0];
- for (x = 0; x < outlink->w; x++) {
- *pout = *pcur;
- pout += 1;
- pcur += 4;
- }
- }
- } else {
- const int linesize = abs(FFMIN(out_buf->linesize[Y], cur_buf->linesize[A]));
- int y;
- for (y = 0; y < outlink->h; y++) {
- memcpy(out_buf->data[Y] + y * out_buf->linesize[Y],
- cur_buf->data[A] + y * cur_buf->linesize[A],
- linesize);
- }
- }
-
- ret = ff_filter_frame(outlink, out_buf);
-
-end:
- av_frame_free(&cur_buf);
- return ret;
-}
-
-static const AVFilterPad alphaextract_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad alphaextract_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- },
- { NULL }
-};
-
-AVFilter avfilter_vf_alphaextract = {
- .name = "alphaextract",
- .description = NULL_IF_CONFIG_SMALL("Extract an alpha channel as a "
- "grayscale image component."),
- .priv_size = sizeof(AlphaExtractContext),
- .query_formats = query_formats,
- .inputs = alphaextract_inputs,
- .outputs = alphaextract_outputs,
-};
diff --git a/ffmpeg/libavfilter/vf_alphamerge.c b/ffmpeg/libavfilter/vf_alphamerge.c
index 644c7f4..5f0da35 100644
--- a/ffmpeg/libavfilter/vf_alphamerge.c
+++ b/ffmpeg/libavfilter/vf_alphamerge.c
@@ -175,7 +175,6 @@ static const AVFilterPad alphamerge_inputs[] = {
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
- .get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.needs_writable = 1,
},{
@@ -196,7 +195,7 @@ static const AVFilterPad alphamerge_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_alphamerge = {
+AVFilter ff_vf_alphamerge = {
.name = "alphamerge",
.description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
"input into the alpha channel of the first input."),
diff --git a/ffmpeg/libavfilter/vf_aspect.c b/ffmpeg/libavfilter/vf_aspect.c
index 710b81f..97fb216 100644
--- a/ffmpeg/libavfilter/vf_aspect.c
+++ b/ffmpeg/libavfilter/vf_aspect.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2010 Bobby Bingham
-
+ *
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
@@ -23,112 +23,189 @@
* aspect ratio modification video filters
*/
+#include <float.h>
+
#include "libavutil/common.h"
-#include "libavutil/opt.h"
+#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+
#include "avfilter.h"
#include "internal.h"
#include "video.h"
+static const char *const var_names[] = {
+ "w",
+ "h",
+ "a", "dar",
+ "sar",
+ "hsub",
+ "vsub",
+ NULL
+};
+
+enum var_name {
+ VAR_W,
+ VAR_H,
+ VAR_A, VAR_DAR,
+ VAR_SAR,
+ VAR_HSUB,
+ VAR_VSUB,
+ VARS_NB
+};
+
typedef struct {
const AVClass *class;
- AVRational ratio;
- char *ratio_str;
+ AVRational dar;
+ AVRational sar;
int max;
+#if FF_API_OLD_FILTER_OPTS
+ float aspect_den;
+#endif
+ char *ratio_expr;
} AspectContext;
-#define OFFSET(x) offsetof(AspectContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption options[] = {
- {"max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
- {"ratio", "set ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
- {"r", "set ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
- {NULL}
-};
-
-static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class)
+static av_cold int init(AVFilterContext *ctx)
{
- AspectContext *aspect = ctx->priv;
- static const char *shorthand[] = { "ratio", "max", NULL };
- char c;
+ AspectContext *s = ctx->priv;
int ret;
- AVRational q;
-
- aspect->class = class;
- av_opt_set_defaults(aspect);
- if (args && sscanf(args, "%d:%d%c", &q.num, &q.den, &c) == 2) {
- aspect->ratio_str = av_strdup(args);
+#if FF_API_OLD_FILTER_OPTS
+ if (s->ratio_expr && s->aspect_den > 0) {
+ double num;
av_log(ctx, AV_LOG_WARNING,
"num:den syntax is deprecated, please use num/den or named options instead\n");
- } else if ((ret = av_opt_set_from_string(aspect, args, shorthand, "=", ":")) < 0) {
- return ret;
- }
-
- if (aspect->ratio_str) {
- ret = av_parse_ratio(&aspect->ratio, aspect->ratio_str, aspect->max, 0, ctx);
- if (ret < 0 || aspect->ratio.num < 0 || aspect->ratio.den <= 0) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid string '%s' for aspect ratio\n", args);
+ ret = av_expr_parse_and_eval(&num, s->ratio_expr, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse ratio numerator \"%s\"\n", s->ratio_expr);
return AVERROR(EINVAL);
}
+ s->sar = s->dar = av_d2q(num / s->aspect_den, s->max);
}
+#endif
- av_log(ctx, AV_LOG_VERBOSE, "a:%d/%d\n", aspect->ratio.num, aspect->ratio.den);
return 0;
}
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
- AspectContext *aspect = link->dst->priv;
+ AspectContext *s = link->dst->priv;
- frame->sample_aspect_ratio = aspect->ratio;
+ frame->sample_aspect_ratio = s->sar;
return ff_filter_frame(link->dst->outputs[0], frame);
}
-static av_cold void uninit(AVFilterContext *ctx)
-{
- AspectContext *aspect = ctx->priv;
+#define OFFSET(x) offsetof(AspectContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
- av_opt_free(aspect);
+static inline void compute_dar(AVRational *dar, AVRational sar, int w, int h)
+{
+ if (sar.num && sar.den) {
+ av_reduce(&dar->num, &dar->den, sar.num * w, sar.den * h, INT_MAX);
+ } else {
+ av_reduce(&dar->num, &dar->den, w, h, INT_MAX);
+ }
}
-#if CONFIG_SETDAR_FILTER
-
-#define setdar_options options
-AVFILTER_DEFINE_CLASS(setdar);
-
-static av_cold int setdar_init(AVFilterContext *ctx, const char *args)
+static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
{
- return init(ctx, args, &setdar_class);
+ AVFilterContext *ctx = inlink->dst;
+ AspectContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ double var_values[VARS_NB], res;
+ int ret;
+
+ var_values[VAR_W] = inlink->w;
+ var_values[VAR_H] = inlink->h;
+ var_values[VAR_A] = (double) inlink->w / inlink->h;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
+ var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
+
+ /* evaluate new aspect ratio*/
+ ret = av_expr_parse_and_eval(&res, s->ratio_expr,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ ret = av_parse_ratio(aspect_ratio, s->ratio_expr, s->max, 0, ctx);
+ } else
+ *aspect_ratio = av_d2q(res, s->max);
+
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s'\n", s->ratio_expr);
+ return ret;
+ }
+ if (aspect_ratio->num < 0 || aspect_ratio->den <= 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid string '%s' for aspect ratio\n", s->ratio_expr);
+ return AVERROR(EINVAL);
+ }
+ return 0;
}
+#if CONFIG_SETDAR_FILTER
+
static int setdar_config_props(AVFilterLink *inlink)
{
- AspectContext *aspect = inlink->dst->priv;
- AVRational dar = aspect->ratio;
-
- av_reduce(&aspect->ratio.num, &aspect->ratio.den,
- aspect->ratio.num * inlink->h,
- aspect->ratio.den * inlink->w, 100);
+ AspectContext *s = inlink->dst->priv;
+ AVRational dar;
+ AVRational old_dar;
+ AVRational old_sar = inlink->sample_aspect_ratio;
+ int ret;
- av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n",
- inlink->w, inlink->h, dar.num, dar.den, aspect->ratio.num, aspect->ratio.den);
+#if FF_API_OLD_FILTER_OPTS
+ if (!(s->ratio_expr && s->aspect_den > 0)) {
+#endif
+ if ((ret = get_aspect_ratio(inlink, &s->dar)))
+ return ret;
+#if FF_API_OLD_FILTER_OPTS
+ }
+#endif
+
+ if (s->dar.num && s->dar.den) {
+ av_reduce(&s->sar.num, &s->sar.den,
+ s->dar.num * inlink->h,
+ s->dar.den * inlink->w, INT_MAX);
+ inlink->sample_aspect_ratio = s->sar;
+ dar = s->dar;
+ } else {
+ inlink->sample_aspect_ratio = (AVRational){ 1, 1 };
+ dar = (AVRational){ inlink->w, inlink->h };
+ }
- inlink->sample_aspect_ratio = aspect->ratio;
+ compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
+ av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d dar:%d/%d sar:%d/%d -> dar:%d/%d sar:%d/%d\n",
+ inlink->w, inlink->h, old_dar.num, old_dar.den, old_sar.num, old_sar.den,
+ dar.num, dar.den, inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den);
return 0;
}
+static const AVOption setdar_options[] = {
+ { "dar", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "ratio", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "r", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+#if FF_API_OLD_FILTER_OPTS
+ { "dar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
+#endif
+ { "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(setdar);
+
static const AVFilterPad avfilter_vf_setdar_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = setdar_config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = setdar_config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -141,49 +218,66 @@ static const AVFilterPad avfilter_vf_setdar_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_setdar = {
- .name = "setdar",
+AVFilter ff_vf_setdar = {
+ .name = "setdar",
.description = NULL_IF_CONFIG_SMALL("Set the frame display aspect ratio."),
-
- .init = setdar_init,
- .uninit = uninit,
-
- .priv_size = sizeof(AspectContext),
-
- .inputs = avfilter_vf_setdar_inputs,
-
- .outputs = avfilter_vf_setdar_outputs,
- .priv_class = &setdar_class,
+ .init = init,
+ .priv_size = sizeof(AspectContext),
+ .priv_class = &setdar_class,
+ .inputs = avfilter_vf_setdar_inputs,
+ .outputs = avfilter_vf_setdar_outputs,
};
#endif /* CONFIG_SETDAR_FILTER */
#if CONFIG_SETSAR_FILTER
-#define setsar_options options
-AVFILTER_DEFINE_CLASS(setsar);
-
-static av_cold int setsar_init(AVFilterContext *ctx, const char *args)
-{
- return init(ctx, args, &setsar_class);
-}
-
static int setsar_config_props(AVFilterLink *inlink)
{
- AspectContext *aspect = inlink->dst->priv;
+ AspectContext *s = inlink->dst->priv;
+ AVRational old_sar = inlink->sample_aspect_ratio;
+ AVRational old_dar, dar;
+ int ret;
- inlink->sample_aspect_ratio = aspect->ratio;
+#if FF_API_OLD_FILTER_OPTS
+ if (!(s->ratio_expr && s->aspect_den > 0)) {
+#endif
+ if ((ret = get_aspect_ratio(inlink, &s->sar)))
+ return ret;
+#if FF_API_OLD_FILTER_OPTS
+ }
+#endif
+
+ inlink->sample_aspect_ratio = s->sar;
+
+ compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
+ compute_dar(&dar, s->sar, inlink->w, inlink->h);
+ av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d dar:%d/%d -> sar:%d/%d dar:%d/%d\n",
+ inlink->w, inlink->h, old_sar.num, old_sar.den, old_dar.num, old_dar.den,
+ inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, dar.num, dar.den);
return 0;
}
+static const AVOption setsar_options[] = {
+ { "sar", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "ratio", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "r", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+#if FF_API_OLD_FILTER_OPTS
+ { "sar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
+#endif
+ { "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(setsar);
+
static const AVFilterPad avfilter_vf_setsar_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = setsar_config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = setsar_config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -196,19 +290,14 @@ static const AVFilterPad avfilter_vf_setsar_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_setsar = {
- .name = "setsar",
+AVFilter ff_vf_setsar = {
+ .name = "setsar",
.description = NULL_IF_CONFIG_SMALL("Set the pixel sample aspect ratio."),
-
- .init = setsar_init,
- .uninit = uninit,
-
- .priv_size = sizeof(AspectContext),
-
- .inputs = avfilter_vf_setsar_inputs,
-
- .outputs = avfilter_vf_setsar_outputs,
- .priv_class = &setsar_class,
+ .init = init,
+ .priv_size = sizeof(AspectContext),
+ .priv_class = &setsar_class,
+ .inputs = avfilter_vf_setsar_inputs,
+ .outputs = avfilter_vf_setsar_outputs,
};
#endif /* CONFIG_SETSAR_FILTER */
diff --git a/ffmpeg/libavfilter/vf_bbox.c b/ffmpeg/libavfilter/vf_bbox.c
index 4ff0625..6c6aab1 100644
--- a/ffmpeg/libavfilter/vf_bbox.c
+++ b/ffmpeg/libavfilter/vf_bbox.c
@@ -23,6 +23,7 @@
* bounding box detection filter
*/
+#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
#include "avfilter.h"
@@ -30,16 +31,19 @@
#include "internal.h"
typedef struct {
- unsigned int frame;
- int vsub, hsub;
+ const AVClass *class;
+ int min_val;
} BBoxContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- BBoxContext *bbox = ctx->priv;
- bbox->frame = 0;
- return 0;
-}
+#define OFFSET(x) offsetof(BBoxContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption bbox_options[] = {
+ { "min_val", "set minimum luminance value for bounding box", OFFSET(min_val), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 254, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(bbox);
static int query_formats(AVFilterContext *ctx)
{
@@ -56,25 +60,39 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
+#define SET_META(key, value) \
+ snprintf(buf, sizeof(buf), "%d", value); \
+ av_dict_set(metadata, key, buf, 0);
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
BBoxContext *bbox = ctx->priv;
FFBoundingBox box;
int has_bbox, w, h;
+ char buf[32];
has_bbox =
ff_calculate_bounding_box(&box,
frame->data[0], frame->linesize[0],
- inlink->w, inlink->h, 16);
+ inlink->w, inlink->h, bbox->min_val);
w = box.x2 - box.x1 + 1;
h = box.y2 - box.y1 + 1;
av_log(ctx, AV_LOG_INFO,
- "n:%d pts:%s pts_time:%s", bbox->frame,
+ "n:%"PRId64" pts:%s pts_time:%s", inlink->frame_count,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
if (has_bbox) {
+ AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
+
+ SET_META("lavfi.bbox.x1", box.x1)
+ SET_META("lavfi.bbox.x2", box.x2)
+ SET_META("lavfi.bbox.y1", box.y1)
+ SET_META("lavfi.bbox.y2", box.y2)
+ SET_META("lavfi.bbox.w", w)
+ SET_META("lavfi.bbox.h", h)
+
av_log(ctx, AV_LOG_INFO,
" x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
" crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
@@ -84,16 +102,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
av_log(ctx, AV_LOG_INFO, "\n");
- bbox->frame++;
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
static const AVFilterPad bbox_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -106,12 +122,13 @@ static const AVFilterPad bbox_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_bbox = {
+AVFilter ff_vf_bbox = {
.name = "bbox",
.description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."),
.priv_size = sizeof(BBoxContext),
+ .priv_class = &bbox_class,
.query_formats = query_formats,
- .init = init,
.inputs = bbox_inputs,
.outputs = bbox_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_blackdetect.c b/ffmpeg/libavfilter/vf_blackdetect.c
index e8af624..90a28a9 100644
--- a/ffmpeg/libavfilter/vf_blackdetect.c
+++ b/ffmpeg/libavfilter/vf_blackdetect.c
@@ -43,7 +43,6 @@ typedef struct {
double pixel_black_th;
unsigned int pixel_black_th_i;
- unsigned int frame_count; ///< frame number
unsigned int nb_black_pixels; ///< number of black pixels counted so far
} BlackDetectContext;
@@ -57,13 +56,13 @@ static const AVOption blackdetect_options[] = {
{ "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
{ "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
{ "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(blackdetect);
#define YUVJ_FORMATS \
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
static enum AVPixelFormat yuvj_formats[] = {
YUVJ_FORMATS, AV_PIX_FMT_NONE
@@ -72,8 +71,11 @@ static enum AVPixelFormat yuvj_formats[] = {
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12,
- AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
YUVJ_FORMATS,
AV_PIX_FMT_NONE
};
@@ -82,20 +84,6 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- int ret;
- BlackDetectContext *blackdetect = ctx->priv;
-
- blackdetect->class = &blackdetect_class;
- av_opt_set_defaults(blackdetect);
-
- if ((ret = av_set_options_string(blackdetect, args, "=", ":")) < 0)
- return ret;
-
- return 0;
-}
-
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
@@ -163,8 +151,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
av_log(ctx, AV_LOG_DEBUG,
- "frame:%u picture_black_ratio:%f pts:%s t:%s type:%c\n",
- blackdetect->frame_count, picture_black_ratio,
+ "frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
+ inlink->frame_count, picture_black_ratio,
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
av_get_picture_type_char(picref->pict_type));
@@ -182,18 +170,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
}
blackdetect->last_picref_pts = picref->pts;
- blackdetect->frame_count++;
blackdetect->nb_black_pixels = 0;
return ff_filter_frame(inlink->dst->outputs[0], picref);
}
static const AVFilterPad blackdetect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -207,11 +193,10 @@ static const AVFilterPad blackdetect_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_blackdetect = {
+AVFilter ff_vf_blackdetect = {
.name = "blackdetect",
.description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."),
.priv_size = sizeof(BlackDetectContext),
- .init = init,
.query_formats = query_formats,
.inputs = blackdetect_inputs,
.outputs = blackdetect_outputs,
diff --git a/ffmpeg/libavfilter/vf_blackframe.c b/ffmpeg/libavfilter/vf_blackframe.c
index a69ed97..9b0c973 100644
--- a/ffmpeg/libavfilter/vf_blackframe.c
+++ b/ffmpeg/libavfilter/vf_blackframe.c
@@ -33,31 +33,19 @@
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "avfilter.h"
-#include "internal.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct {
const AVClass *class;
- unsigned int bamount; ///< black amount
- unsigned int bthresh; ///< black threshold
+ int bamount; ///< black amount
+ int bthresh; ///< black threshold
unsigned int frame; ///< frame number
unsigned int nblack; ///< number of black pixels counted so far
unsigned int last_keyframe; ///< frame number of the last received key-frame
} BlackFrameContext;
-#define OFFSET(x) offsetof(BlackFrameContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-static const AVOption blackframe_options[] = {
- { "amount", "set least percentual amount of pixels below the black threshold enabling black detection", OFFSET(bamount), AV_OPT_TYPE_INT, {.i64=98}, 0, 100, FLAGS },
- { "thresh", "set threshold below which a pixel value is considered black", OFFSET(bthresh), AV_OPT_TYPE_INT, {.i64=32}, 0, 255, FLAGS },
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(blackframe);
-
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
@@ -70,66 +58,55 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- BlackFrameContext *blackframe = ctx->priv;
- static const char *shorthand[] = { "amount", "thresh", NULL };
- int ret;
-
- blackframe->class = &blackframe_class;
- av_opt_set_defaults(blackframe);
-
- if ((ret = av_opt_set_from_string(blackframe, args, shorthand, "=", ":")) < 0)
- return ret;
-
- av_log(ctx, AV_LOG_VERBOSE, "bamount:%u bthresh:%u\n",
- blackframe->bamount, blackframe->bthresh);
-
- return 0;
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- BlackFrameContext *blackframe = ctx->priv;
- av_opt_free(blackframe);
-}
-
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- BlackFrameContext *blackframe = ctx->priv;
+ BlackFrameContext *s = ctx->priv;
int x, i;
int pblack = 0;
uint8_t *p = frame->data[0];
for (i = 0; i < frame->height; i++) {
for (x = 0; x < inlink->w; x++)
- blackframe->nblack += p[x] < blackframe->bthresh;
+ s->nblack += p[x] < s->bthresh;
p += frame->linesize[0];
}
if (frame->key_frame)
- blackframe->last_keyframe = blackframe->frame;
+ s->last_keyframe = s->frame;
- pblack = blackframe->nblack * 100 / (inlink->w * inlink->h);
- if (pblack >= blackframe->bamount)
+ pblack = s->nblack * 100 / (inlink->w * inlink->h);
+ if (pblack >= s->bamount)
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
"type:%c last_keyframe:%d\n",
- blackframe->frame, pblack, frame->pts,
+ s->frame, pblack, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
- av_get_picture_type_char(frame->pict_type), blackframe->last_keyframe);
+ av_get_picture_type_char(frame->pict_type), s->last_keyframe);
- blackframe->frame++;
- blackframe->nblack = 0;
+ s->frame++;
+ s->nblack = 0;
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
+#define OFFSET(x) offsetof(BlackFrameContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption blackframe_options[] = {
+ { "amount", "Percentage of the pixels that have to be below the threshold "
+ "for the frame to be considered black.", OFFSET(bamount), AV_OPT_TYPE_INT, { .i64 = 98 }, 0, 100, FLAGS },
+ { "threshold", "threshold below which a pixel value is considered black",
+ OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
+ { "thresh", "threshold below which a pixel value is considered black",
+ OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(blackframe);
+
static const AVFilterPad avfilter_vf_blackframe_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -142,19 +119,12 @@ static const AVFilterPad avfilter_vf_blackframe_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_blackframe = {
- .name = "blackframe",
- .description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
-
- .priv_size = sizeof(BlackFrameContext),
- .init = init,
- .uninit = uninit,
-
+AVFilter ff_vf_blackframe = {
+ .name = "blackframe",
+ .description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
+ .priv_size = sizeof(BlackFrameContext),
+ .priv_class = &blackframe_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_blackframe_inputs,
-
- .outputs = avfilter_vf_blackframe_outputs,
-
- .priv_class = &blackframe_class,
+ .inputs = avfilter_vf_blackframe_inputs,
+ .outputs = avfilter_vf_blackframe_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_blend.c b/ffmpeg/libavfilter/vf_blend.c
index bc276d4..d422a9c 100644
--- a/ffmpeg/libavfilter/vf_blend.c
+++ b/ffmpeg/libavfilter/vf_blend.c
@@ -26,6 +26,7 @@
#include "bufferqueue.h"
#include "formats.h"
#include "internal.h"
+#include "dualinput.h"
#include "video.h"
#define TOP 0
@@ -60,27 +61,35 @@ enum BlendMode {
BLEND_NB
};
-static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "A", "B", "TOP", "BOTTOM", NULL };
-enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
+static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
typedef struct FilterParams {
enum BlendMode mode;
- double values[VAR_VARS_NB];
double opacity;
AVExpr *e;
char *expr_str;
void (*blend)(const uint8_t *top, int top_linesize,
const uint8_t *bottom, int bottom_linesize,
uint8_t *dst, int dst_linesize,
- int width, int height, struct FilterParams *param);
+ int width, int start, int end,
+ struct FilterParams *param, double *values);
} FilterParams;
+typedef struct ThreadData {
+ const AVFrame *top, *bottom;
+ AVFrame *dst;
+ AVFilterLink *inlink;
+ int plane;
+ int w, h;
+ FilterParams *param;
+} ThreadData;
+
typedef struct {
const AVClass *class;
- struct FFBufQueue queue_top;
- struct FFBufQueue queue_bottom;
+ FFDualInputContext dinput;
int hsub, vsub; ///< chroma subsampling values
- int frame_requested;
+ int nb_planes;
char *all_expr;
enum BlendMode all_mode;
double all_opacity;
@@ -131,7 +140,9 @@ static const AVOption blend_options[] = {
{ "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
- { NULL },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(blend);
@@ -139,21 +150,23 @@ AVFILTER_DEFINE_CLASS(blend);
static void blend_normal(const uint8_t *top, int top_linesize,
const uint8_t *bottom, int bottom_linesize,
uint8_t *dst, int dst_linesize,
- int width, int height, FilterParams *param)
+ int width, int start, int end,
+ FilterParams *param, double *values)
{
- av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, height);
+ av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, end - start);
}
#define DEFINE_BLEND(name, expr) \
static void blend_## name(const uint8_t *top, int top_linesize, \
const uint8_t *bottom, int bottom_linesize, \
uint8_t *dst, int dst_linesize, \
- int width, int height, FilterParams *param) \
+ int width, int start, int end, \
+ FilterParams *param, double *values) \
{ \
double opacity = param->opacity; \
int i, j; \
\
- for (i = 0; i < height; i++) { \
+ for (i = start; i < end; i++) { \
for (j = 0; j < width; j++) { \
dst[j] = top[j] + ((expr) - top[j]) * opacity; \
} \
@@ -198,14 +211,13 @@ DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
static void blend_expr(const uint8_t *top, int top_linesize,
const uint8_t *bottom, int bottom_linesize,
uint8_t *dst, int dst_linesize,
- int width, int height,
- FilterParams *param)
+ int width, int start, int end,
+ FilterParams *param, double *values)
{
AVExpr *e = param->e;
- double *values = param->values;
int y, x;
- for (y = 0; y < height; y++) {
+ for (y = start; y < end; y++) {
values[VAR_Y] = y;
for (x = 0; x < width; x++) {
values[VAR_X] = x;
@@ -219,16 +231,69 @@ static void blend_expr(const uint8_t *top, int top_linesize,
}
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
+ const uint8_t *top = td->top->data[td->plane];
+ const uint8_t *bottom = td->bottom->data[td->plane];
+ uint8_t *dst = td->dst->data[td->plane];
+ double values[VAR_VARS_NB];
+
+ values[VAR_N] = td->inlink->frame_count;
+ values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
+ values[VAR_W] = td->w;
+ values[VAR_H] = td->h;
+ values[VAR_SW] = td->w / (double)td->dst->width;
+ values[VAR_SH] = td->h / (double)td->dst->height;
+
+ td->param->blend(top + slice_start * td->top->linesize[td->plane],
+ td->top->linesize[td->plane],
+ bottom + slice_start * td->bottom->linesize[td->plane],
+ td->bottom->linesize[td->plane],
+ dst + slice_start * td->dst->linesize[td->plane],
+ td->dst->linesize[td->plane],
+ td->w, slice_start, slice_end, td->param, &values[0]);
+ return 0;
+}
+
+static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
+ const AVFrame *bottom_buf)
{
BlendContext *b = ctx->priv;
- int ret, plane;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *dst_buf;
+ int plane;
- b->class = &blend_class;
- av_opt_set_defaults(b);
+ dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!dst_buf)
+ return top_buf;
+ av_frame_copy_props(dst_buf, top_buf);
- if ((ret = av_set_options_string(b, args, "=", ":")) < 0)
- return ret;
+ for (plane = 0; plane < b->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
+ int outw = FF_CEIL_RSHIFT(dst_buf->width, hsub);
+ int outh = FF_CEIL_RSHIFT(dst_buf->height, vsub);
+ FilterParams *param = &b->params[plane];
+ ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
+ .w = outw, .h = outh, .param = param, .plane = plane,
+ .inlink = inlink };
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
+ }
+
+ av_frame_free(&top_buf);
+
+ return dst_buf;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ BlendContext *b = ctx->priv;
+ int ret, plane;
for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
FilterParams *param = &b->params[plane];
@@ -279,6 +344,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
}
}
+ b->dinput.process = blend_frame;
return 0;
}
@@ -286,8 +352,9 @@ static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
@@ -299,13 +366,16 @@ static int config_output(AVFilterLink *outlink)
AVFilterContext *ctx = outlink->src;
AVFilterLink *toplink = ctx->inputs[TOP];
AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
+ BlendContext *b = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
+ int ret;
if (toplink->format != bottomlink->format) {
av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
return AVERROR(EINVAL);
}
- if (toplink->w != bottomlink->w ||
- toplink->h != bottomlink->h ||
+ if (toplink->w != bottomlink->w ||
+ toplink->h != bottomlink->h ||
toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
@@ -321,20 +391,18 @@ static int config_output(AVFilterLink *outlink)
}
outlink->w = toplink->w;
- outlink->h = bottomlink->h;
+ outlink->h = toplink->h;
outlink->time_base = toplink->time_base;
outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
outlink->frame_rate = toplink->frame_rate;
- return 0;
-}
-
-static int config_input_top(AVFilterLink *inlink)
-{
- BlendContext *b = inlink->dst->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
b->hsub = pix_desc->log2_chroma_w;
b->vsub = pix_desc->log2_chroma_h;
+ b->nb_planes = av_pix_fmt_count_planes(toplink->format);
+
+ if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
+ return ret;
+
return 0;
}
@@ -343,107 +411,32 @@ static av_cold void uninit(AVFilterContext *ctx)
BlendContext *b = ctx->priv;
int i;
- av_opt_free(b);
- ff_bufqueue_discard_all(&b->queue_top);
- ff_bufqueue_discard_all(&b->queue_bottom);
-
+ ff_dualinput_uninit(&b->dinput);
for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
av_expr_free(b->params[i].e);
}
static int request_frame(AVFilterLink *outlink)
{
- AVFilterContext *ctx = outlink->src;
- BlendContext *b = ctx->priv;
- int in, ret;
-
- b->frame_requested = 1;
- while (b->frame_requested) {
- in = ff_bufqueue_peek(&b->queue_top, TOP) ? BOTTOM : TOP;
- ret = ff_request_frame(ctx->inputs[in]);
- if (ret < 0)
- return ret;
- }
- return 0;
-}
-
-static void blend_frame(AVFilterContext *ctx,
- AVFrame *top_buf,
- AVFrame *bottom_buf,
- AVFrame *dst_buf)
-{
- BlendContext *b = ctx->priv;
- AVFilterLink *inlink = ctx->inputs[0];
- FilterParams *param;
- int plane;
-
- for (plane = 0; dst_buf->data[plane]; plane++) {
- int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
- int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
- int outw = dst_buf->width >> hsub;
- int outh = dst_buf->height >> vsub;
- uint8_t *dst = dst_buf->data[plane];
- uint8_t *top = top_buf->data[plane];
- uint8_t *bottom = bottom_buf->data[plane];
-
- param = &b->params[plane];
- param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base);
- param->values[VAR_W] = outw;
- param->values[VAR_H] = outh;
- param->values[VAR_SW] = outw / dst_buf->width;
- param->values[VAR_SH] = outh / dst_buf->height;
- param->blend(top, top_buf->linesize[plane],
- bottom, bottom_buf->linesize[plane],
- dst, dst_buf->linesize[plane], outw, outh, param);
- }
+ BlendContext *b = outlink->src->priv;
+ return ff_dualinput_request_frame(&b->dinput, outlink);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
- AVFilterContext *ctx = inlink->dst;
- AVFilterLink *outlink = ctx->outputs[0];
- BlendContext *b = ctx->priv;
-
- int ret = 0;
- int is_bottom = (inlink == ctx->inputs[BOTTOM]);
- struct FFBufQueue *queue =
- (is_bottom ? &b->queue_bottom : &b->queue_top);
- ff_bufqueue_add(ctx, queue, buf);
-
- while (1) {
- AVFrame *top_buf, *bottom_buf, *out_buf;
-
- if (!ff_bufqueue_peek(&b->queue_top, TOP) ||
- !ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break;
-
- top_buf = ff_bufqueue_get(&b->queue_top);
- bottom_buf = ff_bufqueue_get(&b->queue_bottom);
-
- out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out_buf) {
- return AVERROR(ENOMEM);
- }
- av_frame_copy_props(out_buf, top_buf);
-
- b->frame_requested = 0;
- blend_frame(ctx, top_buf, bottom_buf, out_buf);
- ret = ff_filter_frame(ctx->outputs[0], out_buf);
- av_frame_free(&top_buf);
- av_frame_free(&bottom_buf);
- }
- return ret;
+ BlendContext *b = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
}
static const AVFilterPad blend_inputs[] = {
{
- .name = "top",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input_top,
- .filter_frame = filter_frame,
+ .name = "top",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},{
- .name = "bottom",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
+ .name = "bottom",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -458,7 +451,7 @@ static const AVFilterPad blend_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_blend = {
+AVFilter ff_vf_blend = {
.name = "blend",
.description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
.init = init,
@@ -468,4 +461,5 @@ AVFilter avfilter_vf_blend = {
.inputs = blend_inputs,
.outputs = blend_outputs,
.priv_class = &blend_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/ffmpeg/libavfilter/vf_boxblur.c b/ffmpeg/libavfilter/vf_boxblur.c
index 3c72ddb..3183f43 100644
--- a/ffmpeg/libavfilter/vf_boxblur.c
+++ b/ffmpeg/libavfilter/vf_boxblur.c
@@ -73,79 +73,46 @@ typedef struct {
uint8_t *temp[2]; ///< temporary buffer used in blur_power()
} BoxBlurContext;
-#define OFFSET(x) offsetof(BoxBlurContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-static const AVOption boxblur_options[] = {
- { "luma_radius", "set luma radius", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
- { "lr", "set luma radius", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
- { "luma_power", "set luma power", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
- { "lp", "set luma power", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
-
- { "chroma_radius", "set chroma radius", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
- { "cr", "set chroma radius", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
- { "chroma_power", "set chroma power", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
- { "cp", "set chroma power", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
-
- { "alpha_radius", "set alpha radius", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
- { "ar", "set alpha radius", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
- { "alpha_power", "set alpha power", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
- { "ap", "set alpha power", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
-
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(boxblur);
-
#define Y 0
#define U 1
#define V 2
#define A 3
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- BoxBlurContext *boxblur = ctx->priv;
- static const char *shorthand[] = {
- "luma_radius", "luma_power",
- "chroma_radius", "chroma_power",
- "alpha_radius", "alpha_power",
- NULL
- };
- int ret;
-
- boxblur->class = &boxblur_class;
- av_opt_set_defaults(boxblur);
+ BoxBlurContext *s = ctx->priv;
- if ((ret = av_opt_set_from_string(boxblur, args, shorthand, "=", ":")) < 0)
- return ret;
+ if (!s->luma_param.radius_expr) {
+ av_log(ctx, AV_LOG_ERROR, "Luma radius expression is not set.\n");
+ return AVERROR(EINVAL);
+ }
/* fill missing params */
- if (!boxblur->chroma_param.radius_expr) {
- boxblur->chroma_param.radius_expr = av_strdup(boxblur->luma_param.radius_expr);
- if (!boxblur->chroma_param.radius_expr)
+ if (!s->chroma_param.radius_expr) {
+ s->chroma_param.radius_expr = av_strdup(s->luma_param.radius_expr);
+ if (!s->chroma_param.radius_expr)
return AVERROR(ENOMEM);
}
- if (boxblur->chroma_param.power < 0)
- boxblur->chroma_param.power = boxblur->luma_param.power;
+ if (s->chroma_param.power < 0)
+ s->chroma_param.power = s->luma_param.power;
- if (!boxblur->alpha_param.radius_expr) {
- boxblur->alpha_param.radius_expr = av_strdup(boxblur->luma_param.radius_expr);
- if (!boxblur->alpha_param.radius_expr)
+ if (!s->alpha_param.radius_expr) {
+ s->alpha_param.radius_expr = av_strdup(s->luma_param.radius_expr);
+ if (!s->alpha_param.radius_expr)
return AVERROR(ENOMEM);
}
- if (boxblur->alpha_param.power < 0)
- boxblur->alpha_param.power = boxblur->luma_param.power;
+ if (s->alpha_param.power < 0)
+ s->alpha_param.power = s->luma_param.power;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
- BoxBlurContext *boxblur = ctx->priv;
+ BoxBlurContext *s = ctx->priv;
- av_freep(&boxblur->temp[0]);
- av_freep(&boxblur->temp[1]);
- av_opt_free(boxblur);
+ av_freep(&s->temp[0]);
+ av_freep(&s->temp[1]);
}
static int query_formats(AVFilterContext *ctx)
@@ -167,32 +134,32 @@ static int config_input(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst;
- BoxBlurContext *boxblur = ctx->priv;
+ BoxBlurContext *s = ctx->priv;
int w = inlink->w, h = inlink->h;
int cw, ch;
double var_values[VARS_NB], res;
char *expr;
int ret;
- if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h))) ||
- !(boxblur->temp[1] = av_malloc(FFMAX(w, h))))
+ if (!(s->temp[0] = av_malloc(FFMAX(w, h))) ||
+ !(s->temp[1] = av_malloc(FFMAX(w, h))))
return AVERROR(ENOMEM);
- boxblur->hsub = desc->log2_chroma_w;
- boxblur->vsub = desc->log2_chroma_h;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
var_values[VAR_W] = inlink->w;
var_values[VAR_H] = inlink->h;
- var_values[VAR_CW] = cw = w>>boxblur->hsub;
- var_values[VAR_CH] = ch = h>>boxblur->vsub;
- var_values[VAR_HSUB] = 1<<boxblur->hsub;
- var_values[VAR_VSUB] = 1<<boxblur->vsub;
+ var_values[VAR_CW] = cw = w>>s->hsub;
+ var_values[VAR_CH] = ch = h>>s->vsub;
+ var_values[VAR_HSUB] = 1<<s->hsub;
+ var_values[VAR_VSUB] = 1<<s->vsub;
#define EVAL_RADIUS_EXPR(comp) \
- expr = boxblur->comp##_param.radius_expr; \
+ expr = s->comp##_param.radius_expr; \
ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \
NULL, NULL, NULL, NULL, NULL, 0, ctx); \
- boxblur->comp##_param.radius = res; \
+ s->comp##_param.radius = res; \
if (ret < 0) { \
av_log(NULL, AV_LOG_ERROR, \
"Error when evaluating " #comp " radius expression '%s'\n", expr); \
@@ -207,30 +174,30 @@ static int config_input(AVFilterLink *inlink)
"chroma_radius:%d chroma_power:%d "
"alpha_radius:%d alpha_power:%d "
"w:%d chroma_w:%d h:%d chroma_h:%d\n",
- boxblur->luma_param .radius, boxblur->luma_param .power,
- boxblur->chroma_param.radius, boxblur->chroma_param.power,
- boxblur->alpha_param .radius, boxblur->alpha_param .power,
+ s->luma_param .radius, s->luma_param .power,
+ s->chroma_param.radius, s->chroma_param.power,
+ s->alpha_param .radius, s->alpha_param .power,
w, cw, h, ch);
#define CHECK_RADIUS_VAL(w_, h_, comp) \
- if (boxblur->comp##_param.radius < 0 || \
- 2*boxblur->comp##_param.radius > FFMIN(w_, h_)) { \
+ if (s->comp##_param.radius < 0 || \
+ 2*s->comp##_param.radius > FFMIN(w_, h_)) { \
av_log(ctx, AV_LOG_ERROR, \
"Invalid " #comp " radius value %d, must be >= 0 and <= %d\n", \
- boxblur->comp##_param.radius, FFMIN(w_, h_)/2); \
+ s->comp##_param.radius, FFMIN(w_, h_)/2); \
return AVERROR(EINVAL); \
}
CHECK_RADIUS_VAL(w, h, luma);
CHECK_RADIUS_VAL(cw, ch, chroma);
CHECK_RADIUS_VAL(w, h, alpha);
- boxblur->radius[Y] = boxblur->luma_param.radius;
- boxblur->radius[U] = boxblur->radius[V] = boxblur->chroma_param.radius;
- boxblur->radius[A] = boxblur->alpha_param.radius;
+ s->radius[Y] = s->luma_param.radius;
+ s->radius[U] = s->radius[V] = s->chroma_param.radius;
+ s->radius[A] = s->alpha_param.radius;
- boxblur->power[Y] = boxblur->luma_param.power;
- boxblur->power[U] = boxblur->power[V] = boxblur->chroma_param.power;
- boxblur->power[A] = boxblur->alpha_param.power;
+ s->power[Y] = s->luma_param.power;
+ s->power[U] = s->power[V] = s->chroma_param.power;
+ s->power[A] = s->alpha_param.power;
return 0;
}
@@ -331,11 +298,11 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
- BoxBlurContext *boxblur = ctx->priv;
+ BoxBlurContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int plane;
- int cw = inlink->w >> boxblur->hsub, ch = in->height >> boxblur->vsub;
+ int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub), ch = FF_CEIL_RSHIFT(in->height, s->vsub);
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->height, ch, ch, in->height };
@@ -346,23 +313,47 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- for (plane = 0; in->data[plane] && plane < 4; plane++)
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
hblur(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
- w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
- boxblur->temp);
+ w[plane], h[plane], s->radius[plane], s->power[plane],
+ s->temp);
- for (plane = 0; in->data[plane] && plane < 4; plane++)
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
vblur(out->data[plane], out->linesize[plane],
out->data[plane], out->linesize[plane],
- w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
- boxblur->temp);
+ w[plane], h[plane], s->radius[plane], s->power[plane],
+ s->temp);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(BoxBlurContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption boxblur_options[] = {
+ { "luma_radius", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
+ { "lr", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
+ { "luma_power", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
+ { "lp", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
+
+ { "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "cr", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "chroma_power", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "cp", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+
+ { "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "ar", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "alpha_power", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "ap", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(boxblur);
+
static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
{
.name = "default",
@@ -381,16 +372,15 @@ static const AVFilterPad avfilter_vf_boxblur_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_boxblur = {
+AVFilter ff_vf_boxblur = {
.name = "boxblur",
.description = NULL_IF_CONFIG_SMALL("Blur the input."),
.priv_size = sizeof(BoxBlurContext),
+ .priv_class = &boxblur_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_boxblur_inputs,
- .outputs = avfilter_vf_boxblur_outputs,
-
- .priv_class = &boxblur_class,
+ .inputs = avfilter_vf_boxblur_inputs,
+ .outputs = avfilter_vf_boxblur_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_colormatrix.c b/ffmpeg/libavfilter/vf_colormatrix.c
index 4daa58e..e1b48fa 100644
--- a/ffmpeg/libavfilter/vf_colormatrix.c
+++ b/ffmpeg/libavfilter/vf_colormatrix.c
@@ -33,6 +33,7 @@
#include "formats.h"
#include "internal.h"
#include "video.h"
+#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
@@ -54,15 +55,39 @@ static const double yuv_coeff[4][3][3] = {
{ -0.4450, -0.0550, +0.5000 } },
};
+enum ColorMode {
+ COLOR_MODE_NONE = -1,
+ COLOR_MODE_BT709,
+ COLOR_MODE_FCC,
+ COLOR_MODE_BT601,
+ COLOR_MODE_SMPTE240M,
+ COLOR_MODE_COUNT
+};
+
typedef struct {
+ const AVClass *class;
int yuv_convert[16][3][3];
int interlaced;
- int source, dest, mode;
- char src[256];
- char dst[256];
+ enum ColorMode source, dest;
+ int mode;
int hsub, vsub;
} ColorMatrixContext;
+#define OFFSET(x) offsetof(ColorMatrixContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption colormatrix_options[] = {
+ { "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
+ { "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
+ { "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
+ { "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
+ { "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
+ { "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colormatrix);
+
#define ma m[0][0]
#define mb m[0][1]
#define mc m[0][2]
@@ -133,45 +158,19 @@ static void calc_coefficients(AVFilterContext *ctx)
}
}
-static const char *color_modes[] = {"bt709", "FCC", "bt601", "smpte240m"};
+static const char *color_modes[] = {"bt709", "fcc", "bt601", "smpte240m"};
-static int get_color_mode_index(const char *name)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(color_modes); i++)
- if (!av_strcasecmp(color_modes[i], name))
- return i;
- return -1;
-}
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
ColorMatrixContext *color = ctx->priv;
- if (!args)
- goto usage;
- if (sscanf(args, "%255[^:]:%255[^:]", color->src, color->dst) != 2) {
- usage:
- av_log(ctx, AV_LOG_ERROR, "usage: <src>:<dst>\n");
- av_log(ctx, AV_LOG_ERROR, "possible options: bt709,bt601,smpte240m,fcc\n");
- return -1;
- }
-
- color->source = get_color_mode_index(color->src);
- if (color->source < 0) {
- av_log(ctx, AV_LOG_ERROR, "unknown color space %s\n", color->src);
- return AVERROR(EINVAL);
- }
-
- color->dest = get_color_mode_index(color->dst);
- if (color->dest < 0) {
- av_log(ctx, AV_LOG_ERROR, "unknown color space %s\n", color->dst);
+ if (color->source == COLOR_MODE_NONE || color->dest == COLOR_MODE_NONE) {
+ av_log(ctx, AV_LOG_ERROR, "Unspecified source or destination color space\n");
return AVERROR(EINVAL);
}
if (color->source == color->dest) {
- av_log(ctx, AV_LOG_ERROR, "source and destination color space are identical\n");
+ av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
return AVERROR(EINVAL);
}
@@ -313,7 +312,8 @@ static int config_input(AVFilterLink *inlink)
color->hsub = pix_desc->log2_chroma_w;
color->vsub = pix_desc->log2_chroma_h;
- av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n", color->src, color->dst);
+ av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
+ color_modes[color->source], color_modes[color->dest]);
return 0;
}
@@ -359,10 +359,10 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
static const AVFilterPad colormatrix_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -375,13 +375,14 @@ static const AVFilterPad colormatrix_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_colormatrix = {
+AVFilter ff_vf_colormatrix = {
.name = "colormatrix",
- .description = NULL_IF_CONFIG_SMALL("Color matrix conversion"),
-
+ .description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
.priv_size = sizeof(ColorMatrixContext),
.init = init,
.query_formats = query_formats,
.inputs = colormatrix_inputs,
.outputs = colormatrix_outputs,
+ .priv_class = &colormatrix_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_copy.c b/ffmpeg/libavfilter/vf_copy.c
index df7ec31..fcc85c5 100644
--- a/ffmpeg/libavfilter/vf_copy.c
+++ b/ffmpeg/libavfilter/vf_copy.c
@@ -37,7 +37,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
- av_image_copy(out->data, out->linesize, in->data, in->linesize,
+ av_image_copy(out->data, out->linesize, (const uint8_t**) in->data, in->linesize,
in->format, in->width, in->height);
av_frame_free(&in);
@@ -46,10 +46,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVFilterPad avfilter_vf_copy_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -62,10 +61,9 @@ static const AVFilterPad avfilter_vf_copy_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_copy = {
- .name = "copy",
+AVFilter ff_vf_copy = {
+ .name = "copy",
.description = NULL_IF_CONFIG_SMALL("Copy the input video unchanged to the output."),
-
- .inputs = avfilter_vf_copy_inputs,
- .outputs = avfilter_vf_copy_outputs,
+ .inputs = avfilter_vf_copy_inputs,
+ .outputs = avfilter_vf_copy_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_crop.c b/ffmpeg/libavfilter/vf_crop.c
index f99d1a7..261db33 100644
--- a/ffmpeg/libavfilter/vf_crop.c
+++ b/ffmpeg/libavfilter/vf_crop.c
@@ -23,8 +23,6 @@
* video crop filter
*/
-/* #define DEBUG */
-
#include <stdio.h>
#include "avfilter.h"
@@ -70,6 +68,7 @@ enum var_name {
VAR_X,
VAR_Y,
VAR_N,
+ VAR_POS,
VAR_T,
VAR_VARS_NB
};
@@ -91,61 +90,30 @@ typedef struct {
double var_values[VAR_VARS_NB];
} CropContext;
-#define OFFSET(x) offsetof(CropContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption crop_options[] = {
- { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
- {NULL}
-};
-
-AVFILTER_DEFINE_CLASS(crop);
-
-static av_cold void uninit(AVFilterContext *ctx)
+static int query_formats(AVFilterContext *ctx)
{
- CropContext *crop = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ int fmt;
+
+ for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM)) &&
+ !((desc->log2_chroma_w || desc->log2_chroma_h) && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR)))
+ ff_add_format(&formats, fmt);
+ }
- av_expr_free(crop->x_pexpr); crop->x_pexpr = NULL;
- av_expr_free(crop->y_pexpr); crop->y_pexpr = NULL;
+ ff_set_common_formats(ctx, formats);
+ return 0;
}
-static int query_formats(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGB48LE,
- AV_PIX_FMT_BGR48BE, AV_PIX_FMT_BGR48LE,
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ CropContext *s = ctx->priv;
- return 0;
+ av_expr_free(s->x_pexpr);
+ s->x_pexpr = NULL;
+ av_expr_free(s->y_pexpr);
+ s->y_pexpr = NULL;
}
static inline int normalize_double(int *n, double d)
@@ -166,85 +134,93 @@ static inline int normalize_double(int *n, double d)
static int config_input(AVFilterLink *link)
{
AVFilterContext *ctx = link->dst;
- CropContext *crop = ctx->priv;
+ CropContext *s = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(link->format);
int ret;
const char *expr;
double res;
- crop->var_values[VAR_IN_W] = crop->var_values[VAR_IW] = ctx->inputs[0]->w;
- crop->var_values[VAR_IN_H] = crop->var_values[VAR_IH] = ctx->inputs[0]->h;
- crop->var_values[VAR_A] = (float) link->w / link->h;
- crop->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
- crop->var_values[VAR_DAR] = crop->var_values[VAR_A] * crop->var_values[VAR_SAR];
- crop->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
- crop->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
- crop->var_values[VAR_X] = NAN;
- crop->var_values[VAR_Y] = NAN;
- crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = NAN;
- crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN;
- crop->var_values[VAR_N] = 0;
- crop->var_values[VAR_T] = NAN;
-
- av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
- crop->hsub = pix_desc->log2_chroma_w;
- crop->vsub = pix_desc->log2_chroma_h;
-
- if ((ret = av_expr_parse_and_eval(&res, (expr = crop->w_expr),
- var_names, crop->var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
- crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = crop->h_expr),
- var_names, crop->var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
- crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = res;
+ s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = ctx->inputs[0]->w;
+ s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = ctx->inputs[0]->h;
+ s->var_values[VAR_A] = (float) link->w / link->h;
+ s->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
+ s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = NAN;
+ s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = NAN;
+ s->var_values[VAR_N] = 0;
+ s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_POS] = NAN;
+
+ av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
+ s->hsub = pix_desc->log2_chroma_w;
+ s->vsub = pix_desc->log2_chroma_h;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, s->var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail_expr;
+ s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
+ var_names, s->var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail_expr;
+ s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = res;
/* evaluate again ow as it may depend on oh */
- if ((ret = av_expr_parse_and_eval(&res, (expr = crop->w_expr),
- var_names, crop->var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
- crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res;
- if (normalize_double(&crop->w, crop->var_values[VAR_OUT_W]) < 0 ||
- normalize_double(&crop->h, crop->var_values[VAR_OUT_H]) < 0) {
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, s->var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail_expr;
+
+ s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
+ if (normalize_double(&s->w, s->var_values[VAR_OUT_W]) < 0 ||
+ normalize_double(&s->h, s->var_values[VAR_OUT_H]) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Too big value or invalid expression for out_w/ow or out_h/oh. "
"Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
- crop->w_expr, crop->h_expr);
+ s->w_expr, s->h_expr);
return AVERROR(EINVAL);
}
- crop->w &= ~((1 << crop->hsub) - 1);
- crop->h &= ~((1 << crop->vsub) - 1);
+ s->w &= ~((1 << s->hsub) - 1);
+ s->h &= ~((1 << s->vsub) - 1);
- if ((ret = av_expr_parse(&crop->x_pexpr, crop->x_expr, var_names,
+ av_expr_free(s->x_pexpr);
+ av_expr_free(s->y_pexpr);
+ s->x_pexpr = s->y_pexpr = NULL;
+ if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
NULL, NULL, NULL, NULL, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&crop->y_pexpr, crop->y_expr, var_names,
+ (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
NULL, NULL, NULL, NULL, 0, ctx)) < 0)
return AVERROR(EINVAL);
- if (crop->keep_aspect) {
+ if (s->keep_aspect) {
AVRational dar = av_mul_q(link->sample_aspect_ratio,
(AVRational){ link->w, link->h });
- av_reduce(&crop->out_sar.num, &crop->out_sar.den,
- dar.num * crop->h, dar.den * crop->w, INT_MAX);
+ av_reduce(&s->out_sar.num, &s->out_sar.den,
+ dar.num * s->h, dar.den * s->w, INT_MAX);
} else
- crop->out_sar = link->sample_aspect_ratio;
+ s->out_sar = link->sample_aspect_ratio;
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
- crop->w, crop->h, crop->out_sar.num, crop->out_sar.den);
+ s->w, s->h, s->out_sar.num, s->out_sar.den);
- if (crop->w <= 0 || crop->h <= 0 ||
- crop->w > link->w || crop->h > link->h) {
+ if (s->w <= 0 || s->h <= 0 ||
+ s->w > link->w || s->h > link->h) {
av_log(ctx, AV_LOG_ERROR,
"Invalid too big or non positive size for width '%d' or height '%d'\n",
- crop->w, crop->h);
+ s->w, s->h);
return AVERROR(EINVAL);
}
/* set default, required in the case the first computed value for x/y is NAN */
- crop->x = (link->w - crop->w) / 2;
- crop->y = (link->h - crop->h) / 2;
- crop->x &= ~((1 << crop->hsub) - 1);
- crop->y &= ~((1 << crop->vsub) - 1);
+ s->x = (link->w - s->w) / 2;
+ s->y = (link->h - s->h) / 2;
+ s->x &= ~((1 << s->hsub) - 1);
+ s->y &= ~((1 << s->vsub) - 1);
return 0;
fail_expr:
@@ -254,11 +230,11 @@ fail_expr:
static int config_output(AVFilterLink *link)
{
- CropContext *crop = link->src->priv;
+ CropContext *s = link->src->priv;
- link->w = crop->w;
- link->h = crop->h;
- link->sample_aspect_ratio = crop->out_sar;
+ link->w = s->w;
+ link->h = s->h;
+ link->sample_aspect_ratio = s->out_sar;
return 0;
}
@@ -266,63 +242,83 @@ static int config_output(AVFilterLink *link)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
- CropContext *crop = ctx->priv;
+ CropContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int i;
- frame->width = crop->w;
- frame->height = crop->h;
+ frame->width = s->w;
+ frame->height = s->h;
- crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
+ s->var_values[VAR_N] = link->frame_count;
+ s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
- crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
- crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL);
- crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
-
- normalize_double(&crop->x, crop->var_values[VAR_X]);
- normalize_double(&crop->y, crop->var_values[VAR_Y]);
-
- if (crop->x < 0) crop->x = 0;
- if (crop->y < 0) crop->y = 0;
- if ((unsigned)crop->x + (unsigned)crop->w > link->w) crop->x = link->w - crop->w;
- if ((unsigned)crop->y + (unsigned)crop->h > link->h) crop->y = link->h - crop->h;
- crop->x &= ~((1 << crop->hsub) - 1);
- crop->y &= ~((1 << crop->vsub) - 1);
-
- av_dlog(ctx, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
- (int)crop->var_values[VAR_N], crop->var_values[VAR_T], crop->x,
- crop->y, crop->x+crop->w, crop->y+crop->h);
-
- frame->data[0] += crop->y * frame->linesize[0];
- frame->data[0] += crop->x * crop->max_step[0];
-
- if (!(desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL)) {
+ s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
+ NAN : av_frame_get_pkt_pos(frame);
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+
+ normalize_double(&s->x, s->var_values[VAR_X]);
+ normalize_double(&s->y, s->var_values[VAR_Y]);
+
+ if (s->x < 0)
+ s->x = 0;
+ if (s->y < 0)
+ s->y = 0;
+ if ((unsigned)s->x + (unsigned)s->w > link->w)
+ s->x = link->w - s->w;
+ if ((unsigned)s->y + (unsigned)s->h > link->h)
+ s->y = link->h - s->h;
+ s->x &= ~((1 << s->hsub) - 1);
+ s->y &= ~((1 << s->vsub) - 1);
+
+ av_dlog(ctx, "n:%d t:%f pos:%f x:%d y:%d x+w:%d y+h:%d\n",
+ (int)s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+ s->x, s->y, s->x+s->w, s->y+s->h);
+
+ frame->data[0] += s->y * frame->linesize[0];
+ frame->data[0] += s->x * s->max_step[0];
+
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)) {
for (i = 1; i < 3; i ++) {
if (frame->data[i]) {
- frame->data[i] += (crop->y >> crop->vsub) * frame->linesize[i];
- frame->data[i] += (crop->x * crop->max_step[i]) >> crop->hsub;
+ frame->data[i] += (s->y >> s->vsub) * frame->linesize[i];
+ frame->data[i] += (s->x * s->max_step[i]) >> s->hsub;
}
}
}
/* alpha plane */
if (frame->data[3]) {
- frame->data[3] += crop->y * frame->linesize[3];
- frame->data[3] += crop->x * crop->max_step[3];
+ frame->data[3] += s->y * frame->linesize[3];
+ frame->data[3] += s->x * s->max_step[3];
}
- crop->var_values[VAR_N] += 1.0;
-
return ff_filter_frame(link->dst->outputs[0], frame);
}
+#define OFFSET(x) offsetof(CropContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption crop_options[] = {
+ { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(crop);
+
static const AVFilterPad avfilter_vf_crop_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
@@ -336,19 +332,13 @@ static const AVFilterPad avfilter_vf_crop_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "w", "h", "x", "y", "keep_aspect", NULL };
-
-AVFilter avfilter_vf_crop = {
- .name = "crop",
- .description = NULL_IF_CONFIG_SMALL("Crop the input video to width:height:x:y."),
-
- .priv_size = sizeof(CropContext),
-
+AVFilter ff_vf_crop = {
+ .name = "crop",
+ .description = NULL_IF_CONFIG_SMALL("Crop the input video."),
+ .priv_size = sizeof(CropContext),
+ .priv_class = &crop_class,
.query_formats = query_formats,
.uninit = uninit,
-
- .inputs = avfilter_vf_crop_inputs,
- .outputs = avfilter_vf_crop_outputs,
- .priv_class = &crop_class,
- .shorthand = shorthand,
+ .inputs = avfilter_vf_crop_inputs,
+ .outputs = avfilter_vf_crop_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_cropdetect.c b/ffmpeg/libavfilter/vf_cropdetect.c
index 3dd4de7..4a6b658 100644
--- a/ffmpeg/libavfilter/vf_cropdetect.c
+++ b/ffmpeg/libavfilter/vf_cropdetect.c
@@ -26,6 +26,7 @@
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
+
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -41,18 +42,6 @@ typedef struct {
int max_pixsteps[4];
} CropDetectContext;
-#define OFFSET(x) offsetof(CropDetectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-static const AVOption cropdetect_options[] = {
- { "limit", "set black threshold", OFFSET(limit), AV_OPT_TYPE_INT, {.i64=24}, 0, 255, FLAGS },
- { "round", "set width/height round value", OFFSET(round), AV_OPT_TYPE_INT, {.i64=16}, 0, INT_MAX, FLAGS },
- { "reset_count", "set after how many frames to reset detected info", OFFSET(reset_count), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(cropdetect);
-
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
@@ -95,119 +84,123 @@ static int checkline(void *ctx, const unsigned char *src, int stride, int len, i
return total;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- CropDetectContext *cd = ctx->priv;
- static const char *shorthand[] = { "limit", "round", "reset_count", NULL };
- int ret;
-
- cd->frame_nb = -2;
- cd->class = &cropdetect_class;
- av_opt_set_defaults(cd);
+ CropDetectContext *s = ctx->priv;
- if ((ret = av_opt_set_from_string(cd, args, shorthand, "=", ":")) < 0)
- return ret;
+ s->frame_nb = -2;
av_log(ctx, AV_LOG_VERBOSE, "limit:%d round:%d reset_count:%d\n",
- cd->limit, cd->round, cd->reset_count);
+ s->limit, s->round, s->reset_count);
return 0;
}
-static av_cold void uninit(AVFilterContext *ctx)
-{
- CropDetectContext *cd = ctx->priv;
- av_opt_free(cd);
-}
-
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- CropDetectContext *cd = ctx->priv;
+ CropDetectContext *s = ctx->priv;
- av_image_fill_max_pixsteps(cd->max_pixsteps, NULL,
+ av_image_fill_max_pixsteps(s->max_pixsteps, NULL,
av_pix_fmt_desc_get(inlink->format));
- cd->x1 = inlink->w - 1;
- cd->y1 = inlink->h - 1;
- cd->x2 = 0;
- cd->y2 = 0;
+ s->x1 = inlink->w - 1;
+ s->y1 = inlink->h - 1;
+ s->x2 = 0;
+ s->y2 = 0;
return 0;
}
+#define SET_META(key, value) \
+ snprintf(buf, sizeof(buf), "%d", value); \
+ av_dict_set(metadata, key, buf, 0)
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- CropDetectContext *cd = ctx->priv;
- int bpp = cd->max_pixsteps[0];
+ CropDetectContext *s = ctx->priv;
+ int bpp = s->max_pixsteps[0];
int w, h, x, y, shrink_by;
+ AVDictionary **metadata;
+ char buf[32];
// ignore first 2 frames - they may be empty
- if (++cd->frame_nb > 0) {
+ if (++s->frame_nb > 0) {
+ metadata = avpriv_frame_get_metadatap(frame);
+
// Reset the crop area every reset_count frames, if reset_count is > 0
- if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) {
- cd->x1 = frame->width - 1;
- cd->y1 = frame->height - 1;
- cd->x2 = 0;
- cd->y2 = 0;
- cd->frame_nb = 1;
+ if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
+ s->x1 = frame->width - 1;
+ s->y1 = frame->height - 1;
+ s->x2 = 0;
+ s->y2 = 0;
+ s->frame_nb = 1;
}
- for (y = 0; y < cd->y1; y++) {
- if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) {
- cd->y1 = y;
+ for (y = 0; y < s->y1; y++) {
+ if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
+ s->y1 = y;
break;
}
}
- for (y = frame->height - 1; y > cd->y2; y--) {
- if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) {
- cd->y2 = y;
+ for (y = frame->height - 1; y > s->y2; y--) {
+ if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
+ s->y2 = y;
break;
}
}
- for (y = 0; y < cd->x1; y++) {
- if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) {
- cd->x1 = y;
+ for (y = 0; y < s->x1; y++) {
+ if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
+ s->x1 = y;
break;
}
}
- for (y = frame->width - 1; y > cd->x2; y--) {
- if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) {
- cd->x2 = y;
+ for (y = frame->width - 1; y > s->x2; y--) {
+ if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
+ s->x2 = y;
break;
}
}
// round x and y (up), important for yuv colorspaces
// make sure they stay rounded!
- x = (cd->x1+1) & ~1;
- y = (cd->y1+1) & ~1;
+ x = (s->x1+1) & ~1;
+ y = (s->y1+1) & ~1;
- w = cd->x2 - x + 1;
- h = cd->y2 - y + 1;
+ w = s->x2 - x + 1;
+ h = s->y2 - y + 1;
// w and h must be divisible by 2 as well because of yuv
// colorspace problems.
- if (cd->round <= 1)
- cd->round = 16;
- if (cd->round % 2)
- cd->round *= 2;
+ if (s->round <= 1)
+ s->round = 16;
+ if (s->round % 2)
+ s->round *= 2;
- shrink_by = w % cd->round;
+ shrink_by = w % s->round;
w -= shrink_by;
x += (shrink_by/2 + 1) & ~1;
- shrink_by = h % cd->round;
+ shrink_by = h % s->round;
h -= shrink_by;
y += (shrink_by/2 + 1) & ~1;
+ SET_META("lavfi.cropdetect.x1", s->x1);
+ SET_META("lavfi.cropdetect.x2", s->x2);
+ SET_META("lavfi.cropdetect.y1", s->y1);
+ SET_META("lavfi.cropdetect.y2", s->y2);
+ SET_META("lavfi.cropdetect.w", w);
+ SET_META("lavfi.cropdetect.h", h);
+ SET_META("lavfi.cropdetect.x", x);
+ SET_META("lavfi.cropdetect.y", y);
+
av_log(ctx, AV_LOG_INFO,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
- cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pts,
+ s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
w, h, x, y);
}
@@ -215,13 +208,25 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
+#define OFFSET(x) offsetof(CropDetectContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption cropdetect_options[] = {
+ { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_INT, { .i64 = 24 }, 0, 255, FLAGS },
+ { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
+ { "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(cropdetect);
+
static const AVFilterPad avfilter_vf_cropdetect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -234,15 +239,14 @@ static const AVFilterPad avfilter_vf_cropdetect_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_cropdetect = {
- .name = "cropdetect",
- .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
-
- .priv_size = sizeof(CropDetectContext),
- .init = init,
- .uninit = uninit,
+AVFilter ff_vf_cropdetect = {
+ .name = "cropdetect",
+ .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
+ .priv_size = sizeof(CropDetectContext),
+ .priv_class = &cropdetect_class,
+ .init = init,
.query_formats = query_formats,
- .inputs = avfilter_vf_cropdetect_inputs,
- .outputs = avfilter_vf_cropdetect_outputs,
- .priv_class = &cropdetect_class,
+ .inputs = avfilter_vf_cropdetect_inputs,
+ .outputs = avfilter_vf_cropdetect_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_curves.c b/ffmpeg/libavfilter/vf_curves.c
index 7fee90a..5123430 100644
--- a/ffmpeg/libavfilter/vf_curves.c
+++ b/ffmpeg/libavfilter/vf_curves.c
@@ -19,13 +19,23 @@
*/
#include "libavutil/opt.h"
+#include "libavutil/bprint.h"
#include "libavutil/eval.h"
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
struct keypoint {
double x, y;
struct keypoint *next;
@@ -33,26 +43,92 @@ struct keypoint {
#define NB_COMP 3
+enum preset {
+ PRESET_NONE,
+ PRESET_COLOR_NEGATIVE,
+ PRESET_CROSS_PROCESS,
+ PRESET_DARKER,
+ PRESET_INCREASE_CONTRAST,
+ PRESET_LIGHTER,
+ PRESET_LINEAR_CONTRAST,
+ PRESET_MEDIUM_CONTRAST,
+ PRESET_NEGATIVE,
+ PRESET_STRONG_CONTRAST,
+ PRESET_VINTAGE,
+ NB_PRESETS,
+};
+
typedef struct {
const AVClass *class;
- char *comp_points_str[NB_COMP];
- uint8_t graph[NB_COMP][256];
+ enum preset preset;
+ char *comp_points_str[NB_COMP + 1];
+ char *comp_points_str_all;
+ uint8_t graph[NB_COMP + 1][256];
+ char *psfile;
+ uint8_t rgba_map[4];
+ int step;
} CurvesContext;
#define OFFSET(x) offsetof(CurvesContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption curves_options[] = {
+ { "preset", "select a color curves preset", OFFSET(preset), AV_OPT_TYPE_INT, {.i64=PRESET_NONE}, PRESET_NONE, NB_PRESETS-1, FLAGS, "preset_name" },
+ { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NONE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "color_negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_COLOR_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "cross_process", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_CROSS_PROCESS}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "darker", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_DARKER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "increase_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_INCREASE_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "lighter", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LIGHTER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "linear_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LINEAR_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "medium_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_MEDIUM_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "strong_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_STRONG_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "vintage", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_VINTAGE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "master","set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "m", "set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "red", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "r", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "green", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "g", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "blue", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "b", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "all", "set points coordinates for all components", OFFSET(comp_points_str_all), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "psfile", "set Photoshop curves file name", OFFSET(psfile), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(curves);
+static const struct {
+ const char *r;
+ const char *g;
+ const char *b;
+ const char *master;
+} curves_presets[] = {
+ [PRESET_COLOR_NEGATIVE] = {
+ "0/1 0.129/1 0.466/0.498 0.725/0 1/0",
+ "0/1 0.109/1 0.301/0.498 0.517/0 1/0",
+ "0/1 0.098/1 0.235/0.498 0.423/0 1/0",
+ },
+ [PRESET_CROSS_PROCESS] = {
+ "0.25/0.156 0.501/0.501 0.686/0.745",
+ "0.25/0.188 0.38/0.501 0.745/0.815 1/0.815",
+ "0.231/0.094 0.709/0.874",
+ },
+ [PRESET_DARKER] = { .master = "0.5/0.4" },
+ [PRESET_INCREASE_CONTRAST] = { .master = "0.149/0.066 0.831/0.905 0.905/0.98" },
+ [PRESET_LIGHTER] = { .master = "0.4/0.5" },
+ [PRESET_LINEAR_CONTRAST] = { .master = "0.305/0.286 0.694/0.713" },
+ [PRESET_MEDIUM_CONTRAST] = { .master = "0.286/0.219 0.639/0.643" },
+ [PRESET_NEGATIVE] = { .master = "0/1 1/0" },
+ [PRESET_STRONG_CONTRAST] = { .master = "0.301/0.196 0.592/0.6 0.686/0.737" },
+ [PRESET_VINTAGE] = {
+ "0/0.11 0.42/0.51 1/0.95",
+ "0.50/0.48",
+ "0/0.22 0.49/0.44 1/0.8",
+ }
+};
+
static struct keypoint *make_point(double x, double y, struct keypoint *next)
{
struct keypoint *point = av_mallocz(sizeof(*point));
@@ -175,27 +251,27 @@ static int interpolate(AVFilterContext *ctx, uint8_t *y, const struct keypoint *
point = point->next;
}
-#define B 0 /* sub diagonal (below main) */
-#define M 1 /* main diagonal (center) */
-#define A 2 /* sup diagonal (above main) */
+#define BD 0 /* sub diagonal (below main) */
+#define MD 1 /* main diagonal (center) */
+#define AD 2 /* sup diagonal (above main) */
/* left side of the polynomials into a tridiagonal matrix. */
- matrix[0][M] = matrix[n - 1][M] = 1;
+ matrix[0][MD] = matrix[n - 1][MD] = 1;
for (i = 1; i < n - 1; i++) {
- matrix[i][B] = h[i-1];
- matrix[i][M] = 2 * (h[i-1] + h[i]);
- matrix[i][A] = h[i];
+ matrix[i][BD] = h[i-1];
+ matrix[i][MD] = 2 * (h[i-1] + h[i]);
+ matrix[i][AD] = h[i];
}
/* tridiagonal solving of the linear system */
for (i = 1; i < n; i++) {
- double den = matrix[i][M] - matrix[i][B] * matrix[i-1][A];
+ double den = matrix[i][MD] - matrix[i][BD] * matrix[i-1][AD];
double k = den ? 1./den : 1.;
- matrix[i][A] *= k;
- r[i] = (r[i] - matrix[i][B] * r[i - 1]) * k;
+ matrix[i][AD] *= k;
+ r[i] = (r[i] - matrix[i][BD] * r[i - 1]) * k;
}
for (i = n - 2; i >= 0; i--)
- r[i] = r[i] - matrix[i][A] * r[i + 1];
+ r[i] = r[i] - matrix[i][AD] * r[i + 1];
/* compute the graph with x=[0..255] */
i = 0;
@@ -235,19 +311,101 @@ end:
return ret;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static int parse_psfile(AVFilterContext *ctx, const char *fname)
{
- int i, j, ret;
CurvesContext *curves = ctx->priv;
- struct keypoint *comp_points[NB_COMP] = {0};
+ uint8_t *buf;
+ size_t size;
+ int i, ret, av_unused(version), nb_curves;
+ AVBPrint ptstr;
+ static const int comp_ids[] = {3, 0, 1, 2};
- curves->class = &curves_class;
- av_opt_set_defaults(curves);
+ av_bprint_init(&ptstr, 0, AV_BPRINT_SIZE_AUTOMATIC);
- if ((ret = av_set_options_string(curves, args, "=", ":")) < 0)
+ ret = av_file_map(fname, &buf, &size, 0, NULL);
+ if (ret < 0)
return ret;
- for (i = 0; i < NB_COMP; i++) {
+#define READ16(dst) do { \
+ if (size < 2) \
+ return AVERROR_INVALIDDATA; \
+ dst = AV_RB16(buf); \
+ buf += 2; \
+ size -= 2; \
+} while (0)
+
+ READ16(version);
+ READ16(nb_curves);
+ for (i = 0; i < FFMIN(nb_curves, FF_ARRAY_ELEMS(comp_ids)); i++) {
+ int nb_points, n;
+ av_bprint_clear(&ptstr);
+ READ16(nb_points);
+ for (n = 0; n < nb_points; n++) {
+ int y, x;
+ READ16(y);
+ READ16(x);
+ av_bprintf(&ptstr, "%f/%f ", x / 255., y / 255.);
+ }
+ if (*ptstr.str) {
+ char **pts = &curves->comp_points_str[comp_ids[i]];
+ if (!*pts) {
+ *pts = av_strdup(ptstr.str);
+ av_log(ctx, AV_LOG_DEBUG, "curves %d (intid=%d) [%d points]: [%s]\n",
+ i, comp_ids[i], nb_points, *pts);
+ if (!*pts) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ }
+ }
+ }
+end:
+ av_bprint_finalize(&ptstr, NULL);
+ av_file_unmap(buf, size);
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ int i, j, ret;
+ CurvesContext *curves = ctx->priv;
+ struct keypoint *comp_points[NB_COMP + 1] = {0};
+ char **pts = curves->comp_points_str;
+ const char *allp = curves->comp_points_str_all;
+
+ //if (!allp && curves->preset != PRESET_NONE && curves_presets[curves->preset].all)
+ // allp = curves_presets[curves->preset].all;
+
+ if (allp) {
+ for (i = 0; i < NB_COMP; i++) {
+ if (!pts[i])
+ pts[i] = av_strdup(allp);
+ if (!pts[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ if (curves->psfile) {
+ ret = parse_psfile(ctx, curves->psfile);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (curves->preset != PRESET_NONE) {
+#define SET_COMP_IF_NOT_SET(n, name) do { \
+ if (!pts[n] && curves_presets[curves->preset].name) { \
+ pts[n] = av_strdup(curves_presets[curves->preset].name); \
+ if (!pts[n]) \
+ return AVERROR(ENOMEM); \
+ } \
+} while (0)
+ SET_COMP_IF_NOT_SET(0, r);
+ SET_COMP_IF_NOT_SET(1, g);
+ SET_COMP_IF_NOT_SET(2, b);
+ SET_COMP_IF_NOT_SET(3, master);
+ }
+
+ for (i = 0; i < NB_COMP + 1; i++) {
ret = parse_points_str(ctx, comp_points + i, curves->comp_points_str[i]);
if (ret < 0)
return ret;
@@ -256,6 +414,12 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return ret;
}
+ if (pts[NB_COMP]) {
+ for (i = 0; i < NB_COMP; i++)
+ for (j = 0; j < 256; j++)
+ curves->graph[i][j] = curves->graph[NB_COMP][curves->graph[i][j]];
+ }
+
if (av_log_get_level() >= AV_LOG_VERBOSE) {
for (i = 0; i < NB_COMP; i++) {
struct keypoint *point = comp_points[i];
@@ -272,7 +436,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
}
}
- for (i = 0; i < NB_COMP; i++) {
+ for (i = 0; i < NB_COMP + 1; i++) {
struct keypoint *point = comp_points[i];
while (point) {
struct keypoint *next = point->next;
@@ -281,26 +445,48 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
}
}
- av_opt_free(curves);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE};
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_NONE
+ };
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
+static int config_input(AVFilterLink *inlink)
+{
+ CurvesContext *curves = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ ff_fill_rgba_map(curves->rgba_map, inlink->format);
+ curves->step = av_get_padded_bits_per_pixel(desc) >> 3;
+
+ return 0;
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- int x, y, i, direct = 0;
+ int x, y, direct = 0;
AVFilterContext *ctx = inlink->dst;
CurvesContext *curves = ctx->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *dst;
const uint8_t *src;
+ const int step = curves->step;
+ const uint8_t r = curves->rgba_map[R];
+ const uint8_t g = curves->rgba_map[G];
+ const uint8_t b = curves->rgba_map[B];
+ const uint8_t a = curves->rgba_map[A];
if (av_frame_is_writable(in)) {
direct = 1;
@@ -318,12 +504,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
src = in ->data[0];
for (y = 0; y < inlink->h; y++) {
- uint8_t *dstp = dst;
- const uint8_t *srcp = src;
-
- for (x = 0; x < inlink->w; x++)
- for (i = 0; i < NB_COMP; i++, dstp++, srcp++)
- *dstp = curves->graph[i][*srcp];
+ for (x = 0; x < inlink->w * step; x += step) {
+ dst[x + r] = curves->graph[R][src[x + r]];
+ dst[x + g] = curves->graph[G][src[x + g]];
+ dst[x + b] = curves->graph[B][src[x + b]];
+ if (!direct && step == 4)
+ dst[x + a] = src[x + a];
+ }
dst += out->linesize[0];
src += in ->linesize[0];
}
@@ -339,19 +526,20 @@ static const AVFilterPad curves_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
static const AVFilterPad curves_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- },
- { NULL }
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
};
-AVFilter avfilter_vf_curves = {
+AVFilter ff_vf_curves = {
.name = "curves",
.description = NULL_IF_CONFIG_SMALL("Adjust components curves."),
.priv_size = sizeof(CurvesContext),
@@ -360,4 +548,5 @@ AVFilter avfilter_vf_curves = {
.inputs = curves_inputs,
.outputs = curves_outputs,
.priv_class = &curves_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_decimate.c b/ffmpeg/libavfilter/vf_decimate.c
index 630f3ba..5efafe9 100644
--- a/ffmpeg/libavfilter/vf_decimate.c
+++ b/ffmpeg/libavfilter/vf_decimate.c
@@ -1,259 +1,403 @@
/*
- * Copyright (c) 2003 Rich Felker
- * Copyright (c) 2012 Stefano Sabatini
+ * Copyright (c) 2012 Fredrik Mellbin
+ * Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
- * FFmpeg is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/**
- * @file decimate filter, ported from libmpcodecs/vf_decimate.c by
- * Rich Felker.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timestamp.h"
-#include "libavcodec/dsputil.h"
#include "avfilter.h"
#include "internal.h"
-#include "formats.h"
-#include "video.h"
-
-typedef struct {
- const AVClass *class;
- int lo, hi; ///< lower and higher threshold number of differences
- ///< values for 8x8 blocks
-
- float frac; ///< threshold of changed pixels over the total fraction
- int max_drop_count; ///< if positive: maximum number of sequential frames to drop
- ///< if negative: minimum number of frames between two drops
+#define INPUT_MAIN 0
+#define INPUT_CLEANSRC 1
- int drop_count; ///< if positive: number of frames sequentially dropped
- ///< if negative: number of sequential frames which were not dropped
+struct qitem {
+ AVFrame *frame;
+ int64_t maxbdiff;
+ int64_t totdiff;
+};
- int hsub, vsub; ///< chroma subsampling values
- AVFrame *ref; ///< reference picture
- DSPContext dspctx; ///< context providing optimized diff routines
- AVCodecContext *avctx; ///< codec context required for the DSPContext
+typedef struct {
+ const AVClass *class;
+ struct qitem *queue; ///< window of cycle frames and the associated data diff
+ int fid; ///< current frame id in the queue
+ int filled; ///< 1 if the queue is filled, 0 otherwise
+ AVFrame *last; ///< last frame from the previous queue
+ AVFrame **clean_src; ///< frame queue for the clean source
+ int got_frame[2]; ///< frame request flag for each input stream
+ double ts_unit; ///< timestamp units for the output frames
+ int64_t start_pts; ///< base for output timestamps
+ uint32_t eof; ///< bitmask for end of stream
+ int hsub, vsub; ///< chroma subsampling values
+ int depth;
+ int nxblocks, nyblocks;
+ int bdiffsize;
+ int64_t *bdiffs;
+
+ /* options */
+ int cycle;
+ double dupthresh_flt;
+ double scthresh_flt;
+ int64_t dupthresh;
+ int64_t scthresh;
+ int blockx, blocky;
+ int ppsrc;
+ int chroma;
} DecimateContext;
#define OFFSET(x) offsetof(DecimateContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption decimate_options[] = {
- { "max", "set the maximum number of consecutive dropped frames (positive), or the minimum interval between dropped frames (negative)",
- OFFSET(max_drop_count), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
- { "hi", "set high dropping threshold", OFFSET(hi), AV_OPT_TYPE_INT, {.i64=64*12}, INT_MIN, INT_MAX, FLAGS },
- { "lo", "set low dropping threshold", OFFSET(lo), AV_OPT_TYPE_INT, {.i64=64*5}, INT_MIN, INT_MAX, FLAGS },
- { "frac", "set fraction dropping threshold", OFFSET(frac), AV_OPT_TYPE_FLOAT, {.dbl=0.33}, 0, 1, FLAGS },
+ { "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
+ { "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
+ { "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
+ { "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
+ { "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
+ { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(decimate);
-/**
- * Return 1 if the two planes are different, 0 otherwise.
- */
-static int diff_planes(AVFilterContext *ctx,
- uint8_t *cur, uint8_t *ref, int linesize,
- int w, int h)
+static void calc_diffs(const DecimateContext *dm, struct qitem *q,
+ const AVFrame *f1, const AVFrame *f2)
{
- DecimateContext *decimate = ctx->priv;
- DSPContext *dspctx = &decimate->dspctx;
-
- int x, y;
- int d, c = 0;
- int t = (w/16)*(h/16)*decimate->frac;
- int16_t block[8*8];
-
- /* compute difference for blocks of 8x8 bytes */
- for (y = 0; y < h-7; y += 4) {
- for (x = 8; x < w-7; x += 4) {
- dspctx->diff_pixels(block,
- cur+x+y*linesize,
- ref+x+y*linesize, linesize);
- d = dspctx->sum_abs_dctelem(block);
- if (d > decimate->hi)
- return 1;
- if (d > decimate->lo) {
- c++;
- if (c > t)
- return 1;
- }
+ int64_t maxdiff = -1;
+ int64_t *bdiffs = dm->bdiffs;
+ int plane, i, j;
+
+ memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
+
+ for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
+ int x, y, xl;
+ const int linesize1 = f1->linesize[plane];
+ const int linesize2 = f2->linesize[plane];
+ const uint8_t *f1p = f1->data[plane];
+ const uint8_t *f2p = f2->data[plane];
+ int width = plane ? FF_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
+ int height = plane ? FF_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
+ int hblockx = dm->blockx / 2;
+ int hblocky = dm->blocky / 2;
+
+ if (plane) {
+ hblockx >>= dm->hsub;
+ hblocky >>= dm->vsub;
+ }
+
+ for (y = 0; y < height; y++) {
+ int ydest = y / hblocky;
+ int xdest = 0;
+
+#define CALC_DIFF(nbits) do { \
+ for (x = 0; x < width; x += hblockx) { \
+ int64_t acc = 0; \
+ int m = FFMIN(width, x + hblockx); \
+ for (xl = x; xl < m; xl++) \
+ acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
+ ((const uint##nbits##_t *)f2p)[xl]); \
+ bdiffs[ydest * dm->nxblocks + xdest] += acc; \
+ xdest++; \
+ } \
+} while (0)
+ if (dm->depth == 8) CALC_DIFF(8);
+ else CALC_DIFF(16);
+
+ f1p += linesize1;
+ f2p += linesize2;
}
}
- return 0;
+
+ for (i = 0; i < dm->nyblocks - 1; i++) {
+ for (j = 0; j < dm->nxblocks - 1; j++) {
+ int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
+ + bdiffs[ i * dm->nxblocks + j + 1]
+ + bdiffs[(i + 1) * dm->nxblocks + j ]
+ + bdiffs[(i + 1) * dm->nxblocks + j + 1];
+ if (tmp > maxdiff)
+ maxdiff = tmp;
+ }
+ }
+
+ q->totdiff = 0;
+ for (i = 0; i < dm->bdiffsize; i++)
+ q->totdiff += bdiffs[i];
+ q->maxbdiff = maxdiff;
}
-/**
- * Tell if the frame should be decimated, for example if it is no much
- * different with respect to the reference frame ref.
- */
-static int decimate_frame(AVFilterContext *ctx,
- AVFrame *cur, AVFrame *ref)
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- DecimateContext *decimate = ctx->priv;
- int plane;
-
- if (decimate->max_drop_count > 0 &&
- decimate->drop_count >= decimate->max_drop_count)
- return 0;
- if (decimate->max_drop_count < 0 &&
- (decimate->drop_count-1) > decimate->max_drop_count)
+ int scpos = -1, duppos = -1;
+ int drop = INT_MIN, i, lowest = 0, ret;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DecimateContext *dm = ctx->priv;
+ AVFrame *prv;
+
+ /* update frames queue(s) */
+ if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
+ dm->queue[dm->fid].frame = in;
+ dm->got_frame[INPUT_MAIN] = 1;
+ } else {
+ dm->clean_src[dm->fid] = in;
+ dm->got_frame[INPUT_CLEANSRC] = 1;
+ }
+ if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
return 0;
-
- for (plane = 0; ref->data[plane] && ref->linesize[plane]; plane++) {
- int vsub = plane == 1 || plane == 2 ? decimate->vsub : 0;
- int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0;
- if (diff_planes(ctx,
- cur->data[plane], ref->data[plane], ref->linesize[plane],
- ref->width>>hsub, ref->height>>vsub))
+ dm->got_frame[INPUT_MAIN] = dm->got_frame[INPUT_CLEANSRC] = 0;
+
+ if (in) {
+ /* update frame metrics */
+ prv = dm->fid ? dm->queue[dm->fid - 1].frame : dm->last;
+ if (!prv)
+ prv = in;
+ calc_diffs(dm, &dm->queue[dm->fid], prv, in);
+ if (++dm->fid != dm->cycle)
return 0;
+ av_frame_free(&dm->last);
+ dm->last = av_frame_clone(in);
+ dm->fid = 0;
+
+ /* we have a complete cycle, select the frame to drop */
+ lowest = 0;
+ for (i = 0; i < dm->cycle; i++) {
+ if (dm->queue[i].totdiff > dm->scthresh)
+ scpos = i;
+ if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
+ lowest = i;
+ }
+ if (dm->queue[lowest].maxbdiff < dm->dupthresh)
+ duppos = lowest;
+ drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
}
- return 1;
-}
+ /* metrics debug */
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
+ for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
+ av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
+ i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
+ i == scpos ? " sc" : "",
+ i == duppos ? " dup" : "",
+ i == lowest ? " lowest" : "",
+ i == drop ? " [DROP]" : "");
+ }
+ }
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- DecimateContext *decimate = ctx->priv;
+ /* push all frames except the drop */
+ ret = 0;
+ for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
+ if (i == drop) {
+ if (dm->ppsrc)
+ av_frame_free(&dm->clean_src[i]);
+ av_frame_free(&dm->queue[i].frame);
+ } else {
+ AVFrame *frame = dm->queue[i].frame;
+ if (frame->pts != AV_NOPTS_VALUE && dm->start_pts == AV_NOPTS_VALUE)
+ dm->start_pts = frame->pts;
+ if (dm->ppsrc) {
+ av_frame_free(&frame);
+ frame = dm->clean_src[i];
+ }
+ frame->pts = outlink->frame_count * dm->ts_unit +
+ (dm->start_pts == AV_NOPTS_VALUE ? 0 : dm->start_pts);
+ ret = ff_filter_frame(outlink, frame);
+ if (ret < 0)
+ break;
+ }
+ }
- av_log(ctx, AV_LOG_VERBOSE, "max_drop_count:%d hi:%d lo:%d frac:%f\n",
- decimate->max_drop_count, decimate->hi, decimate->lo, decimate->frac);
+ return ret;
+}
- decimate->avctx = avcodec_alloc_context3(NULL);
- if (!decimate->avctx)
+static int config_input(AVFilterLink *inlink)
+{
+ int max_value;
+ AVFilterContext *ctx = inlink->dst;
+ DecimateContext *dm = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int w = inlink->w;
+ const int h = inlink->h;
+
+ dm->hsub = pix_desc->log2_chroma_w;
+ dm->vsub = pix_desc->log2_chroma_h;
+ dm->depth = pix_desc->comp[0].depth_minus1 + 1;
+ max_value = (1 << dm->depth) - 1;
+ dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
+ dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
+ dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
+ dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
+ dm->bdiffsize = dm->nxblocks * dm->nyblocks;
+ dm->bdiffs = av_malloc(dm->bdiffsize * sizeof(*dm->bdiffs));
+ dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
+
+ if (!dm->bdiffs || !dm->queue)
return AVERROR(ENOMEM);
- dsputil_init(&decimate->dspctx, decimate->avctx);
- return 0;
-}
+ if (dm->ppsrc) {
+ dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
+ if (!dm->clean_src)
+ return AVERROR(ENOMEM);
+ }
-static av_cold void uninit(AVFilterContext *ctx)
-{
- DecimateContext *decimate = ctx->priv;
- av_frame_free(&decimate->ref);
- avcodec_close(decimate->avctx);
- av_freep(&decimate->avctx);
+ return 0;
}
-static int query_formats(AVFilterContext *ctx)
+static av_cold int decimate_init(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_NONE
+ DecimateContext *dm = ctx->priv;
+ AVFilterPad pad = {
+ .name = av_strdup("main"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_MAIN, &pad);
+
+ if (dm->ppsrc) {
+ pad.name = av_strdup("clean_src");
+ pad.config_props = NULL;
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
+ }
- return 0;
-}
+ if ((dm->blockx & (dm->blockx - 1)) ||
+ (dm->blocky & (dm->blocky - 1))) {
+ av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
+ return AVERROR(EINVAL);
+ }
-static int config_input(AVFilterLink *inlink)
-{
- AVFilterContext *ctx = inlink->dst;
- DecimateContext *decimate = ctx->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
- decimate->hsub = pix_desc->log2_chroma_w;
- decimate->vsub = pix_desc->log2_chroma_h;
+ dm->start_pts = AV_NOPTS_VALUE;
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *cur)
+static av_cold void decimate_uninit(AVFilterContext *ctx)
{
- DecimateContext *decimate = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
- int ret;
+ int i;
+ DecimateContext *dm = ctx->priv;
+
+ av_frame_free(&dm->last);
+ av_freep(&dm->bdiffs);
+ av_freep(&dm->queue);
+ av_freep(&dm->clean_src);
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
- if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) {
- decimate->drop_count = FFMAX(1, decimate->drop_count+1);
- } else {
- av_frame_free(&decimate->ref);
- decimate->ref = cur;
- decimate->drop_count = FFMIN(-1, decimate->drop_count-1);
+static int request_inlink(AVFilterContext *ctx, int lid)
+{
+ int ret = 0;
+ DecimateContext *dm = ctx->priv;
- if (ret = ff_filter_frame(outlink, av_frame_clone(cur)) < 0)
- return ret;
+ if (!dm->got_frame[lid]) {
+ AVFilterLink *inlink = ctx->inputs[lid];
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) { // flushing
+ dm->eof |= 1 << lid;
+ ret = filter_frame(inlink, NULL);
+ }
}
-
- av_log(inlink->dst, AV_LOG_DEBUG,
- "%s pts:%s pts_time:%s drop_count:%d\n",
- decimate->drop_count > 0 ? "drop" : "keep",
- av_ts2str(cur->pts), av_ts2timestr(cur->pts, &inlink->time_base),
- decimate->drop_count);
-
- if (decimate->drop_count > 0)
- av_frame_free(&cur);
-
- return 0;
+ return ret;
}
static int request_frame(AVFilterLink *outlink)
{
- DecimateContext *decimate = outlink->src->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
+ AVFilterContext *ctx = outlink->src;
+ DecimateContext *dm = ctx->priv;
+ const uint32_t eof_mask = 1<<INPUT_MAIN | dm->ppsrc<<INPUT_CLEANSRC;
+
+ if ((dm->eof & eof_mask) == eof_mask) // flush done?
+ return AVERROR_EOF;
+ if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
+ return ret;
+ if (dm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
+ return ret;
+ return 0;
+}
- do {
- ret = ff_request_frame(inlink);
- } while (decimate->drop_count > 0 && ret >= 0);
-
- return ret;
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
+#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
+#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
+ PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
}
-static const AVFilterPad decimate_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DecimateContext *dm = ctx->priv;
+ const AVFilterLink *inlink =
+ ctx->inputs[dm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
+ AVRational fps = inlink->frame_rate;
+
+ if (!fps.num || !fps.den) {
+ av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
+ "current rate of %d/%d is invalid\n", fps.num, fps.den);
+ return AVERROR(EINVAL);
+ }
+ fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
+ av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
+ inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ outlink->time_base = inlink->time_base;
+ outlink->frame_rate = fps;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ dm->ts_unit = av_q2d(av_inv_q(av_mul_q(fps, outlink->time_base)));
+ return 0;
+}
static const AVFilterPad decimate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
+ .config_props = config_output,
},
{ NULL }
};
-static const char *const shorthand[] = { "max", "hi", "lo", "frac", NULL };
-
-AVFilter avfilter_vf_decimate = {
- .name = "decimate",
- .description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(DecimateContext),
+AVFilter ff_vf_decimate = {
+ .name = "decimate",
+ .description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
+ .init = decimate_init,
+ .uninit = decimate_uninit,
+ .priv_size = sizeof(DecimateContext),
.query_formats = query_formats,
- .inputs = decimate_inputs,
.outputs = decimate_outputs,
.priv_class = &decimate_class,
- .shorthand = shorthand,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/ffmpeg/libavfilter/vf_delogo.c b/ffmpeg/libavfilter/vf_delogo.c
index 159f69f..fbc8983 100644
--- a/ffmpeg/libavfilter/vf_delogo.c
+++ b/ffmpeg/libavfilter/vf_delogo.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com>
* Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013 Jean Delvare <khali@linux-fr.org>
*
* This file is part of FFmpeg.
*
@@ -22,7 +23,8 @@
/**
* @file
* A very simple tv station logo remover
- * Ported from MPlayer libmpcodecs/vf_delogo.c.
+ * Originally imported from MPlayer libmpcodecs/vf_delogo.c,
+ * the algorithm was later improved.
*/
#include "libavutil/common.h"
@@ -35,8 +37,8 @@
#include "video.h"
/**
- * Apply a simple delogo algorithm to the image in dst and put the
- * result in src.
+ * Apply a simple delogo algorithm to the image in src and put the
+ * result in dst.
*
* The algorithm is only applied to the region specified by the logo
* parameters.
@@ -54,15 +56,16 @@
*/
static void apply_delogo(uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
- int w, int h,
+ int w, int h, AVRational sar,
int logo_x, int logo_y, int logo_w, int logo_h,
- int band, int show, int direct)
+ unsigned int band, int show, int direct)
{
int x, y;
- int interp, dist;
+ uint64_t interp, weightl, weightr, weightt, weightb;
uint8_t *xdst, *xsrc;
uint8_t *topleft, *botleft, *topright;
+ unsigned int left_sample, right_sample;
int xclipl, xclipr, yclipt, yclipb;
int logo_x1, logo_x2, logo_y1, logo_y2;
@@ -87,32 +90,43 @@ static void apply_delogo(uint8_t *dst, int dst_linesize,
src += (logo_y1 + 1) * src_linesize;
for (y = logo_y1+1; y < logo_y2-1; y++) {
+ left_sample = topleft[src_linesize*(y-logo_y1)] +
+ topleft[src_linesize*(y-logo_y1-1)] +
+ topleft[src_linesize*(y-logo_y1+1)];
+ right_sample = topright[src_linesize*(y-logo_y1)] +
+ topright[src_linesize*(y-logo_y1-1)] +
+ topright[src_linesize*(y-logo_y1+1)];
+
for (x = logo_x1+1,
xdst = dst+logo_x1+1,
xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {
+
+ /* Weighted interpolation based on relative distances, taking SAR into account */
+ weightl = (uint64_t) (logo_x2-1-x) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
+ weightr = (uint64_t)(x-logo_x1) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
+ weightt = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (logo_y2-1-y) * sar.num;
+ weightb = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (y-logo_y1) * sar.num;
+
interp =
- (topleft[src_linesize*(y-logo_y -yclipt)] +
- topleft[src_linesize*(y-logo_y-1-yclipt)] +
- topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w
+ left_sample * weightl
+
- (topright[src_linesize*(y-logo_y-yclipt)] +
- topright[src_linesize*(y-logo_y-1-yclipt)] +
- topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w
+ right_sample * weightr
+
- (topleft[x-logo_x-xclipl] +
- topleft[x-logo_x-1-xclipl] +
- topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h
+ (topleft[x-logo_x1] +
+ topleft[x-logo_x1-1] +
+ topleft[x-logo_x1+1]) * weightt
+
- (botleft[x-logo_x-xclipl] +
- botleft[x-logo_x-1-xclipl] +
- botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h;
- interp /= 6;
+ (botleft[x-logo_x1] +
+ botleft[x-logo_x1-1] +
+ botleft[x-logo_x1+1]) * weightb;
+ interp /= (weightl + weightr + weightt + weightb) * 3U;
if (y >= logo_y+band && y < logo_y+logo_h-band &&
x >= logo_x+band && x < logo_x+logo_w-band) {
*xdst = interp;
} else {
- dist = 0;
+ unsigned dist = 0;
+
if (x < logo_x+band)
dist = FFMAX(dist, logo_x-x+band);
else if (x >= logo_x+logo_w-band)
@@ -143,14 +157,14 @@ typedef struct {
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption delogo_options[]= {
- {"x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS},
- {"y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS},
- {"w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS},
- {"h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS},
- {"band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, {.i64 = 4}, -1, INT_MAX, FLAGS},
- {"t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, {.i64 = 4}, -1, INT_MAX, FLAGS},
- {"show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
- {NULL},
+ { "x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
+ { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
+ { "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(delogo);
@@ -168,13 +182,13 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- DelogoContext *delogo = ctx->priv;
+ DelogoContext *s = ctx->priv;
#define CHECK_UNSET_OPT(opt) \
- if (delogo->opt == -1) { \
- av_log(delogo, AV_LOG_ERROR, "Option %s was not set.\n", #opt); \
+ if (s->opt == -1) { \
+ av_log(s, AV_LOG_ERROR, "Option %s was not set.\n", #opt); \
return AVERROR(EINVAL); \
}
CHECK_UNSET_OPT(x);
@@ -182,25 +196,20 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
CHECK_UNSET_OPT(w);
CHECK_UNSET_OPT(h);
- if (delogo->band < 0 || delogo->show) {
- delogo->show = 1;
- delogo->band = 4;
- }
-
av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n",
- delogo->x, delogo->y, delogo->w, delogo->h, delogo->band, delogo->show);
+ s->x, s->y, s->w, s->h, s->band, s->show);
- delogo->w += delogo->band*2;
- delogo->h += delogo->band*2;
- delogo->x -= delogo->band;
- delogo->y -= delogo->band;
+ s->w += s->band*2;
+ s->h += s->band*2;
+ s->x -= s->band;
+ s->y -= s->band;
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- DelogoContext *delogo = inlink->dst->priv;
+ DelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFrame *out;
@@ -208,6 +217,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
+ AVRational sar;
if (av_frame_is_writable(in)) {
direct = 1;
@@ -222,17 +232,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
}
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
+ sar = in->sample_aspect_ratio;
+ /* Assume square pixels if SAR is unknown */
+ if (!sar.num)
+ sar.num = sar.den = 1;
+
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
apply_delogo(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
- inlink->w>>hsub, inlink->h>>vsub,
- delogo->x>>hsub, delogo->y>>vsub,
- delogo->w>>hsub, delogo->h>>vsub,
- delogo->band>>FFMIN(hsub, vsub),
- delogo->show, direct);
+ FF_CEIL_RSHIFT(inlink->w, hsub),
+ FF_CEIL_RSHIFT(inlink->h, vsub),
+ sar, s->x>>hsub, s->y>>vsub,
+ /* Up and left borders were rounded down, inject lost bits
+ * into width and height to avoid error accumulation */
+ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
+ FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
+ s->band>>FFMIN(hsub, vsub),
+ s->show, direct);
}
if (!direct)
@@ -243,10 +262,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVFilterPad avfilter_vf_delogo_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -259,17 +277,14 @@ static const AVFilterPad avfilter_vf_delogo_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "x", "y", "w", "h", "band", NULL };
-
-AVFilter avfilter_vf_delogo = {
+AVFilter ff_vf_delogo = {
.name = "delogo",
.description = NULL_IF_CONFIG_SMALL("Remove logo from input video."),
.priv_size = sizeof(DelogoContext),
+ .priv_class = &delogo_class,
.init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_delogo_inputs,
- .outputs = avfilter_vf_delogo_outputs,
- .priv_class = &delogo_class,
- .shorthand = shorthand,
+ .inputs = avfilter_vf_delogo_inputs,
+ .outputs = avfilter_vf_delogo_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_deshake.c b/ffmpeg/libavfilter/vf_deshake.c
index 138c25d..1d62c44 100644
--- a/ffmpeg/libavfilter/vf_deshake.c
+++ b/ffmpeg/libavfilter/vf_deshake.c
@@ -59,65 +59,24 @@
#include "libavutil/pixdesc.h"
#include "libavcodec/dsputil.h"
-#include "transform.h"
+#include "deshake.h"
+#include "deshake_opencl.h"
#define CHROMA_WIDTH(link) -((-link->w) >> av_pix_fmt_desc_get(link->format)->log2_chroma_w)
#define CHROMA_HEIGHT(link) -((-link->h) >> av_pix_fmt_desc_get(link->format)->log2_chroma_h)
-enum SearchMethod {
- EXHAUSTIVE, ///< Search all possible positions
- SMART_EXHAUSTIVE, ///< Search most possible positions (faster)
- SEARCH_COUNT
-};
-
-typedef struct {
- int x; ///< Horizontal shift
- int y; ///< Vertical shift
-} IntMotionVector;
-
-typedef struct {
- double x; ///< Horizontal shift
- double y; ///< Vertical shift
-} MotionVector;
-
-typedef struct {
- MotionVector vector; ///< Motion vector
- double angle; ///< Angle of rotation
- double zoom; ///< Zoom percentage
-} Transform;
-
-typedef struct {
- const AVClass *class;
- AVFrame *ref; ///< Previous frame
- int rx; ///< Maximum horizontal shift
- int ry; ///< Maximum vertical shift
- int edge; ///< Edge fill method
- int blocksize; ///< Size of blocks to compare
- int contrast; ///< Contrast threshold
- int search; ///< Motion search method
- AVCodecContext *avctx;
- DSPContext c; ///< Context providing optimized SAD methods
- Transform last; ///< Transform from last frame
- int refcount; ///< Number of reference frames (defines averaging window)
- FILE *fp;
- Transform avg;
- int cw; ///< Crop motion search to this box
- int ch;
- int cx;
- int cy;
- char *filename; ///< Motion search detailed log filename
-} DeshakeContext;
-
#define OFFSET(x) offsetof(DeshakeContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define MAX_R 64
+
static const AVOption deshake_options[] = {
{ "x", "set x for the rectangular search area", OFFSET(cx), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "y", "set y for the rectangular search area", OFFSET(cy), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "w", "set width for the rectangular search area", OFFSET(cw), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
{ "h", "set height for the rectangular search area", OFFSET(ch), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
- { "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, 64, .flags = FLAGS },
- { "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, 64, .flags = FLAGS },
+ { "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
+ { "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
{ "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=FILL_MIRROR}, FILL_BLANK, FILL_COUNT-1, FLAGS, "edge"},
{ "blank", "fill zeroes at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_BLANK}, INT_MIN, INT_MAX, FLAGS, "edge" },
{ "original", "original image at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_ORIGINAL}, INT_MIN, INT_MAX, FLAGS, "edge" },
@@ -129,6 +88,7 @@ static const AVOption deshake_options[] = {
{ "exhaustive", "exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
{ "less", "less exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=SMART_EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
{ "filename", "set motion search detailed log file name", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, .flags = FLAGS },
{ NULL }
};
@@ -190,8 +150,8 @@ static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
}
} else if (deshake->search == SMART_EXHAUSTIVE) {
// Compare every other possible position and find the best match
- for (y = -deshake->ry + 1; y < deshake->ry - 2; y += 2) {
- for (x = -deshake->rx + 1; x < deshake->rx - 2; x += 2) {
+ for (y = -deshake->ry + 1; y < deshake->ry; y += 2) {
+ for (x = -deshake->rx + 1; x < deshake->rx; x += 2) {
diff = CMP(cx - x, cy - y);
if (diff < smallest) {
smallest = diff;
@@ -237,12 +197,12 @@ static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
{
int highest = 0;
- int lowest = 0;
+ int lowest = 255;
int i, j, pos;
for (i = 0; i <= blocksize * 2; i++) {
// We use a width of 16 here to match the libavcodec sad functions
- for (j = 0; i <= 15; i++) {
+ for (j = 0; j <= 15; j++) {
pos = (y - i) * stride + (x - j);
if (src[pos] < lowest)
lowest = src[pos];
@@ -284,7 +244,7 @@ static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
{
int x, y;
IntMotionVector mv = {0, 0};
- int counts[128][128];
+ int counts[2*MAX_R+1][2*MAX_R+1];
int count_max_value = 0;
int contrast;
@@ -360,28 +320,46 @@ static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
av_free(angles);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static int deshake_transform_c(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out)
+{
+ int i = 0, ret = 0;
+ const float *matrixs[3];
+ int plane_w[3], plane_h[3];
+ matrixs[0] = matrix_y;
+ matrixs[1] = matrixs[2] = matrix_uv;
+ plane_w[0] = width;
+ plane_w[1] = plane_w[2] = cw;
+ plane_h[0] = height;
+ plane_h[1] = plane_h[2] = ch;
+
+ for (i = 0; i < 3; i++) {
+ // Transform the luma and chroma planes
+ ret = avfilter_transform(in->data[i], out->data[i], in->linesize[i], out->linesize[i],
+ plane_w[i], plane_h[i], matrixs[i], interpolate, fill);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
{
int ret;
DeshakeContext *deshake = ctx->priv;
- static const char *shorthand[] = {
- "x", "y", "w", "h", "rx", "ry", "edge",
- "blocksize", "contrast", "search", "filename",
- NULL
- };
deshake->refcount = 20; // XXX: add to options?
-
- deshake->class = &deshake_class;
- av_opt_set_defaults(deshake);
-
- ret = av_opt_set_from_string(deshake, args, shorthand, "=", ":");
- if (ret < 0)
- return ret;
-
deshake->blocksize /= 2;
deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
+ if (deshake->rx % 16) {
+ av_log(ctx, AV_LOG_ERROR, "rx must be a multiple of 16\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
if (deshake->filename)
deshake->fp = fopen(deshake->filename, "w");
if (deshake->fp)
@@ -393,7 +371,18 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
deshake->cw += deshake->cx - (deshake->cx & ~15);
deshake->cx &= ~15;
}
+ deshake->transform = deshake_transform_c;
+ if (!CONFIG_OPENCL && deshake->opencl) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
+ return AVERROR(EINVAL);
+ }
+ if (CONFIG_OPENCL && deshake->opencl) {
+ deshake->transform = ff_opencl_transform;
+ ret = ff_opencl_deshake_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
deshake->cx, deshake->cy, deshake->cw, deshake->ch,
deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
@@ -425,7 +414,7 @@ static int config_props(AVFilterLink *link)
deshake->last.zoom = 0;
deshake->avctx = avcodec_alloc_context3(NULL);
- dsputil_init(&deshake->c, deshake->avctx);
+ avpriv_dsputil_init(&deshake->c, deshake->avctx);
return 0;
}
@@ -433,14 +422,15 @@ static int config_props(AVFilterLink *link)
static av_cold void uninit(AVFilterContext *ctx)
{
DeshakeContext *deshake = ctx->priv;
-
+ if (CONFIG_OPENCL && deshake->opencl) {
+ ff_opencl_deshake_uninit(ctx);
+ }
av_frame_free(&deshake->ref);
if (deshake->fp)
fclose(deshake->fp);
if (deshake->avctx)
avcodec_close(deshake->avctx);
av_freep(&deshake->avctx);
- av_opt_free(deshake);
}
static int filter_frame(AVFilterLink *link, AVFrame *in)
@@ -449,9 +439,10 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
Transform t = {{0},0}, orig = {{0},0};
- float matrix[9];
+ float matrix_y[9], matrix_uv[9];
float alpha = 2.0 / deshake->refcount;
char tmp[256];
+ int ret = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -460,6 +451,12 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
}
av_frame_copy_props(out, in);
+ if (CONFIG_OPENCL && deshake->opencl) {
+ ret = ff_opencl_deshake_process_inout_buf(link->dst,in, out);
+ if (ret < 0)
+ return ret;
+ }
+
if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
// Find the most likely global motion for the current frame
find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
@@ -532,21 +529,19 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
deshake->last.zoom = t.zoom;
// Generate a luma transformation matrix
- avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix);
-
- // Transform the luma plane
- avfilter_transform(in->data[0], out->data[0], in->linesize[0], out->linesize[0], link->w, link->h, matrix, INTERPOLATE_BILINEAR, deshake->edge);
-
+ avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix_y);
// Generate a chroma transformation matrix
- avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix);
-
- // Transform the chroma planes
- avfilter_transform(in->data[1], out->data[1], in->linesize[1], out->linesize[1], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge);
- avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge);
+ avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix_uv);
+ // Transform the luma and chroma planes
+ ret = deshake->transform(link->dst, link->w, link->h, CHROMA_WIDTH(link), CHROMA_HEIGHT(link),
+ matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);
// Cleanup the old reference frame
av_frame_free(&deshake->ref);
+ if (ret < 0)
+ return ret;
+
// Store the current frame as the reference frame for calculating the
// motion of the next frame
deshake->ref = in;
@@ -572,7 +567,7 @@ static const AVFilterPad deshake_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_deshake = {
+AVFilter ff_vf_deshake = {
.name = "deshake",
.description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
.priv_size = sizeof(DeshakeContext),
diff --git a/ffmpeg/libavfilter/vf_drawbox.c b/ffmpeg/libavfilter/vf_drawbox.c
index b831182..dd884a7 100644
--- a/ffmpeg/libavfilter/vf_drawbox.c
+++ b/ffmpeg/libavfilter/vf_drawbox.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
+ * Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
*
* This file is part of FFmpeg.
*
@@ -20,13 +21,14 @@
/**
* @file
- * Box drawing filter. Also a nice template for a filter that needs to
- * write in the input frame.
+ * Box and grid drawing filters. Also a nice template for a filter
+ * that needs to write in the input frame.
*/
#include "libavutil/colorspace.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
+#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
@@ -34,51 +36,66 @@
#include "internal.h"
#include "video.h"
+static const char *const var_names[] = {
+ "dar",
+ "hsub", "vsub",
+ "in_h", "ih", ///< height of the input video
+ "in_w", "iw", ///< width of the input video
+ "sar",
+ "x",
+ "y",
+ "h", ///< height of the rendered box
+ "w", ///< width of the rendered box
+ "t",
+ NULL
+};
+
enum { Y, U, V, A };
+enum var_name {
+ VAR_DAR,
+ VAR_HSUB, VAR_VSUB,
+ VAR_IN_H, VAR_IH,
+ VAR_IN_W, VAR_IW,
+ VAR_SAR,
+ VAR_X,
+ VAR_Y,
+ VAR_H,
+ VAR_W,
+ VAR_T,
+ VARS_NB
+};
+
typedef struct {
const AVClass *class;
- int x, y, w, h, thickness;
+ int x, y, w, h;
+ int thickness;
char *color_str;
unsigned char yuv_color[4];
int invert_color; ///< invert luma color
int vsub, hsub; ///< chroma subsampling
+ char *x_expr, *y_expr; ///< expression for x and y
+ char *w_expr, *h_expr; ///< expression for width and height
+ char *t_expr; ///< expression for thickness
} DrawBoxContext;
-#define OFFSET(x) offsetof(DrawBoxContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-static const AVOption drawbox_options[] = {
- { "x", "set the box top-left corner x position", OFFSET(x), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
- { "y", "set the box top-left corner y position", OFFSET(y), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
- { "width", "set the box width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
- { "w", "set the box width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
- { "height", "set the box height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
- { "h", "set the box height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
- { "color", "set the box edge color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "c", "set the box edge color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "thickness", "set the box maximum thickness", OFFSET(thickness), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX, FLAGS },
- { "t", "set the box maximum thickness", OFFSET(thickness), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX, FLAGS },
- {NULL},
-};
+static const int NUM_EXPR_EVALS = 5;
-AVFILTER_DEFINE_CLASS(drawbox);
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- DrawBoxContext *drawbox = ctx->priv;
+ DrawBoxContext *s = ctx->priv;
uint8_t rgba_color[4];
- if (!strcmp(drawbox->color_str, "invert"))
- drawbox->invert_color = 1;
- else if (av_parse_color(rgba_color, drawbox->color_str, -1, ctx) < 0)
+ if (!strcmp(s->color_str, "invert"))
+ s->invert_color = 1;
+ else if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
return AVERROR(EINVAL);
- if (!drawbox->invert_color) {
- drawbox->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
- drawbox->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
- drawbox->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
- drawbox->yuv_color[A] = rgba_color[3];
+ if (!s->invert_color) {
+ s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
+ s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
+ s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
+ s->yuv_color[A] = rgba_color[3];
}
return 0;
@@ -100,49 +117,112 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
{
- DrawBoxContext *drawbox = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ DrawBoxContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ double var_values[VARS_NB], res;
+ char *expr;
+ int ret;
+ int i;
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
+ var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = s->hsub;
+ var_values[VAR_VSUB] = s->vsub;
+ var_values[VAR_X] = NAN;
+ var_values[VAR_Y] = NAN;
+ var_values[VAR_H] = NAN;
+ var_values[VAR_W] = NAN;
+ var_values[VAR_T] = NAN;
+
+ for (i = 0; i <= NUM_EXPR_EVALS; i++) {
+ /* evaluate expressions, fail on last iteration */
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->x = var_values[VAR_X] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->y = var_values[VAR_Y] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->w = var_values[VAR_W] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->h = var_values[VAR_H] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->thickness = var_values[VAR_T] = res;
+ }
- drawbox->hsub = desc->log2_chroma_w;
- drawbox->vsub = desc->log2_chroma_h;
+ /* if w or h are zero, use the input w/h */
+ s->w = (s->w > 0) ? s->w : inlink->w;
+ s->h = (s->h > 0) ? s->h : inlink->h;
- if (drawbox->w == 0) drawbox->w = inlink->w;
- if (drawbox->h == 0) drawbox->h = inlink->h;
+ /* sanity check width and height */
+ if (s->w < 0 || s->h < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
+ return AVERROR(EINVAL);
+ }
- av_log(inlink->dst, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
- drawbox->x, drawbox->y, drawbox->w, drawbox->h,
- drawbox->yuv_color[Y], drawbox->yuv_color[U], drawbox->yuv_color[V], drawbox->yuv_color[A]);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
+ s->x, s->y, s->w, s->h,
+ s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
return 0;
+
+fail:
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s'.\n",
+ expr);
+ return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
- DrawBoxContext *drawbox = inlink->dst->priv;
- int plane, x, y, xb = drawbox->x, yb = drawbox->y;
+ DrawBoxContext *s = inlink->dst->priv;
+ int plane, x, y, xb = s->x, yb = s->y;
unsigned char *row[4];
- for (y = FFMAX(yb, 0); y < frame->height && y < (yb + drawbox->h); y++) {
+ for (y = FFMAX(yb, 0); y < frame->height && y < (yb + s->h); y++) {
row[0] = frame->data[0] + y * frame->linesize[0];
for (plane = 1; plane < 3; plane++)
row[plane] = frame->data[plane] +
- frame->linesize[plane] * (y >> drawbox->vsub);
+ frame->linesize[plane] * (y >> s->vsub);
- if (drawbox->invert_color) {
- for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->width; x++)
- if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) ||
- (x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness))
+ if (s->invert_color) {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++)
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness))
row[0][x] = 0xff - row[0][x];
} else {
- for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->width; x++) {
- double alpha = (double)drawbox->yuv_color[A] / 255;
-
- if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) ||
- (x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness)) {
- row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawbox->yuv_color[Y];
- row[1][x >> drawbox->hsub] = (1 - alpha) * row[1][x >> drawbox->hsub] + alpha * drawbox->yuv_color[U];
- row[2][x >> drawbox->hsub] = (1 - alpha) * row[2][x >> drawbox->hsub] + alpha * drawbox->yuv_color[V];
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++) {
+ double alpha = (double)s->yuv_color[A] / 255;
+
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness)) {
+ row[0][x ] = (1 - alpha) * row[0][x ] + alpha * s->yuv_color[Y];
+ row[1][x >> s->hsub] = (1 - alpha) * row[1][x >> s->hsub] + alpha * s->yuv_color[U];
+ row[2][x >> s->hsub] = (1 - alpha) * row[2][x >> s->hsub] + alpha * s->yuv_color[V];
}
}
}
@@ -151,19 +231,39 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
-static const AVFilterPad avfilter_vf_drawbox_inputs[] = {
+#define OFFSET(x) offsetof(DrawBoxContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#if CONFIG_DRAWBOX_FILTER
+
+static const AVOption drawbox_options[] = {
+ { "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(drawbox);
+
+static const AVFilterPad drawbox_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
},
{ NULL }
};
-static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
+static const AVFilterPad drawbox_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
@@ -171,17 +271,122 @@ static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "x", "y", "w", "h", "color", "thickness", NULL };
+AVFilter ff_vf_drawbox = {
+ .name = "drawbox",
+ .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
+ .priv_size = sizeof(DrawBoxContext),
+ .priv_class = &drawbox_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = drawbox_inputs,
+ .outputs = drawbox_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+#endif /* CONFIG_DRAWBOX_FILTER */
+
+#if CONFIG_DRAWGRID_FILTER
+static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
+{
+ // x is horizontal (width) coord,
+ // y is vertical (height) coord
+ int x_modulo;
+ int y_modulo;
+
+ // Abstract from the offset
+ x -= drawgrid->x;
+ y -= drawgrid->y;
+
+ x_modulo = x % drawgrid->w;
+ y_modulo = y % drawgrid->h;
-AVFilter avfilter_vf_drawbox = {
- .name = "drawbox",
- .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
- .priv_size = sizeof(DrawBoxContext),
- .init = init,
+ // If x or y got negative, fix values to preserve logics
+ if (x_modulo < 0)
+ x_modulo += drawgrid->w;
+ if (y_modulo < 0)
+ y_modulo += drawgrid->h;
- .query_formats = query_formats,
- .inputs = avfilter_vf_drawbox_inputs,
- .outputs = avfilter_vf_drawbox_outputs,
- .priv_class = &drawbox_class,
- .shorthand = shorthand,
+ return x_modulo < drawgrid->thickness // Belongs to vertical line
+ || y_modulo < drawgrid->thickness; // Belongs to horizontal line
+}
+
+static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ DrawBoxContext *drawgrid = inlink->dst->priv;
+ int plane, x, y;
+ uint8_t *row[4];
+
+ for (y = 0; y < frame->height; y++) {
+ row[0] = frame->data[0] + y * frame->linesize[0];
+
+ for (plane = 1; plane < 3; plane++)
+ row[plane] = frame->data[plane] +
+ frame->linesize[plane] * (y >> drawgrid->vsub);
+
+ if (drawgrid->invert_color) {
+ for (x = 0; x < frame->width; x++)
+ if (pixel_belongs_to_grid(drawgrid, x, y))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = 0; x < frame->width; x++) {
+ double alpha = (double)drawgrid->yuv_color[A] / 255;
+
+ if (pixel_belongs_to_grid(drawgrid, x, y)) {
+ row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawgrid->yuv_color[Y];
+ row[1][x >> drawgrid->hsub] = (1 - alpha) * row[1][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[U];
+ row[2][x >> drawgrid->hsub] = (1 - alpha) * row[2][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[V];
+ }
+ }
+ }
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static const AVOption drawgrid_options[] = {
+ { "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(drawgrid);
+
+static const AVFilterPad drawgrid_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = drawgrid_filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad drawgrid_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_drawgrid = {
+ .name = "drawgrid",
+ .description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
+ .priv_size = sizeof(DrawBoxContext),
+ .priv_class = &drawgrid_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = drawgrid_inputs,
+ .outputs = drawgrid_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
+
+#endif /* CONFIG_DRAWGRID_FILTER */
diff --git a/ffmpeg/libavfilter/vf_drawtext.c b/ffmpeg/libavfilter/vf_drawtext.c
index 10dee14..91b8218 100644
--- a/ffmpeg/libavfilter/vf_drawtext.c
+++ b/ffmpeg/libavfilter/vf_drawtext.c
@@ -48,7 +48,6 @@
#include "video.h"
#include <ft2build.h>
-#include <freetype/config/ftheader.h>
#include FT_FREETYPE_H
#include FT_GLYPH_H
#if CONFIG_FONTCONFIG
@@ -72,6 +71,7 @@ static const char *const var_names[] = {
"text_w", "tw", ///< width of the rendered text
"x",
"y",
+ "pict_type",
NULL
};
@@ -108,6 +108,7 @@ enum var_name {
VAR_TEXT_W, VAR_TW,
VAR_X,
VAR_Y,
+ VAR_PICT_TYPE,
VAR_VARS_NB
};
@@ -134,9 +135,6 @@ typedef struct {
int max_glyph_h; ///< max glyph height
int shadowx, shadowy;
unsigned int fontsize; ///< font size to use
- char *fontcolor_string; ///< font color as string
- char *boxcolor_string; ///< box color as string
- char *shadowcolor_string; ///< shadow color as string
short int draw_box; ///< draw box around text - true or false
int use_kerning; ///< font kerning is used - true/false
@@ -156,69 +154,75 @@ typedef struct {
AVExpr *x_pexpr, *y_pexpr; ///< parsed expressions for x and y
int64_t basetime; ///< base pts time in the real world for display
double var_values[VAR_VARS_NB];
+#if FF_API_DRAWTEXT_OLD_TIMELINE
char *draw_expr; ///< expression for draw
AVExpr *draw_pexpr; ///< parsed expression for draw
int draw; ///< set to zero to prevent drawing
+#endif
AVLFG prng; ///< random
char *tc_opt_string; ///< specified timecode option string
AVRational tc_rate; ///< frame rate for timecode
AVTimecode tc; ///< timecode context
int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise
- int frame_id;
int reload; ///< reload text file for each frame
+ int start_number; ///< starting frame number for n/frame_num var
+ AVDictionary *metadata;
} DrawTextContext;
#define OFFSET(x) offsetof(DrawTextContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption drawtext_options[]= {
-{"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"fontcolor", "set foreground color", OFFSET(fontcolor_string), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"boxcolor", "set box color", OFFSET(boxcolor_string), AV_OPT_TYPE_STRING, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"shadowcolor", "set shadow color", OFFSET(shadowcolor_string), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_INT, {.i64=0}, 0, 1 , FLAGS},
-{"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX , FLAGS},
-{"x", "set x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"y", "set y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"shadowx", "set x", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
-{"shadowy", "set y", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
-{"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX , FLAGS},
-{"basetime", "set base time", OFFSET(basetime), AV_OPT_TYPE_INT64, {.i64=AV_NOPTS_VALUE}, INT64_MIN, INT64_MAX , FLAGS},
-{"draw", "if false do not draw", OFFSET(draw_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS},
-
-{"expansion","set the expansion mode", OFFSET(exp_mode), AV_OPT_TYPE_INT, {.i64=EXP_NORMAL}, 0, 2, FLAGS, "expansion"},
-{"none", "set no expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NONE}, 0, 0, FLAGS, "expansion"},
-{"normal", "set normal expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NORMAL}, 0, 0, FLAGS, "expansion"},
-{"strftime", "set strftime expansion (deprecated)", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_STRFTIME}, 0, 0, FLAGS, "expansion"},
-
-{"timecode", "set initial timecode", OFFSET(tc_opt_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
-{"tc24hmax", "set 24 hours max (timecode only)", OFFSET(tc24hmax), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
-{"timecode_rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
-{"r", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
-{"rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
-{"reload", "reload text file for each frame", OFFSET(reload), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
-{"fix_bounds", "if true, check and fix text coords to avoid clipping", OFFSET(fix_bounds), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS},
-
-/* FT_LOAD_* flags */
-{"ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, {.i64=FT_LOAD_DEFAULT|FT_LOAD_RENDER}, 0, INT_MAX, FLAGS, "ft_load_flags"},
-{"default", "set default", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_DEFAULT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"no_scale", "set no_scale", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_SCALE}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"no_hinting", "set no_hinting", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_HINTING}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"render", "set render", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_RENDER}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"no_bitmap", "set no_bitmap", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_BITMAP}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"vertical_layout", "set vertical_layout", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_VERTICAL_LAYOUT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"force_autohint", "set force_autohint", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_FORCE_AUTOHINT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"crop_bitmap", "set crop_bitmap", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_CROP_BITMAP}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"pedantic", "set pedantic", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_PEDANTIC}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"ignore_global_advance_width", "set ignore_global_advance_width", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"no_recurse", "set no_recurse", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_RECURSE}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"ignore_transform", "set ignore_transform", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_IGNORE_TRANSFORM}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"monochrome", "set monochrome", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_MONOCHROME}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"linear_design", "set linear_design", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_LINEAR_DESIGN}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{"no_autohint", "set no_autohint", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_AUTOHINT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"},
-{NULL},
+ {"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"fontcolor", "set foreground color", OFFSET(fontcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"boxcolor", "set box color", OFFSET(boxcolor.rgba), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"shadowcolor", "set shadow color", OFFSET(shadowcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_INT, {.i64=0}, 0, 1 , FLAGS},
+ {"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX , FLAGS},
+ {"x", "set x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"y", "set y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"shadowx", "set x", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"shadowy", "set y", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX , FLAGS},
+ {"basetime", "set base time", OFFSET(basetime), AV_OPT_TYPE_INT64, {.i64=AV_NOPTS_VALUE}, INT64_MIN, INT64_MAX , FLAGS},
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ {"draw", "if false do not draw (deprecated)", OFFSET(draw_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+#endif
+
+ {"expansion", "set the expansion mode", OFFSET(exp_mode), AV_OPT_TYPE_INT, {.i64=EXP_NORMAL}, 0, 2, FLAGS, "expansion"},
+ {"none", "set no expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NONE}, 0, 0, FLAGS, "expansion"},
+ {"normal", "set normal expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NORMAL}, 0, 0, FLAGS, "expansion"},
+ {"strftime", "set strftime expansion (deprecated)", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_STRFTIME}, 0, 0, FLAGS, "expansion"},
+
+ {"timecode", "set initial timecode", OFFSET(tc_opt_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"tc24hmax", "set 24 hours max (timecode only)", OFFSET(tc24hmax), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"timecode_rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"r", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"reload", "reload text file for each frame", OFFSET(reload), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"fix_bounds", "if true, check and fix text coords to avoid clipping", OFFSET(fix_bounds), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS},
+ {"start_number", "start frame number for n/frame_num variable", OFFSET(start_number), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS},
+
+ /* FT_LOAD_* flags */
+ { "ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, { .i64 = FT_LOAD_DEFAULT | FT_LOAD_RENDER}, 0, INT_MAX, FLAGS, "ft_load_flags" },
+ { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_DEFAULT }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "no_scale", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_SCALE }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "no_hinting", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_HINTING }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "render", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_RENDER }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "no_bitmap", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_BITMAP }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "vertical_layout", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_VERTICAL_LAYOUT }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "force_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_FORCE_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "crop_bitmap", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_CROP_BITMAP }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "pedantic", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_PEDANTIC }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "ignore_global_advance_width", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "no_recurse", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_RECURSE }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "ignore_transform", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_IGNORE_TRANSFORM }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "monochrome", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_MONOCHROME }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "linear_design", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_LINEAR_DESIGN }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { "no_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(drawtext);
@@ -259,13 +263,13 @@ static int glyph_cmp(void *key, const void *b)
*/
static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
Glyph *glyph;
struct AVTreeNode *node = NULL;
int ret;
- /* load glyph into dtext->face->glyph */
- if (FT_Load_Char(dtext->face, code, dtext->ft_load_flags))
+ /* load glyph into s->face->glyph */
+ if (FT_Load_Char(s->face, code, s->ft_load_flags))
return AVERROR(EINVAL);
/* save glyph */
@@ -276,15 +280,15 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
}
glyph->code = code;
- if (FT_Get_Glyph(dtext->face->glyph, glyph->glyph)) {
+ if (FT_Get_Glyph(s->face->glyph, glyph->glyph)) {
ret = AVERROR(EINVAL);
goto error;
}
- glyph->bitmap = dtext->face->glyph->bitmap;
- glyph->bitmap_left = dtext->face->glyph->bitmap_left;
- glyph->bitmap_top = dtext->face->glyph->bitmap_top;
- glyph->advance = dtext->face->glyph->advance.x >> 6;
+ glyph->bitmap = s->face->glyph->bitmap;
+ glyph->bitmap_left = s->face->glyph->bitmap_left;
+ glyph->bitmap_top = s->face->glyph->bitmap_top;
+ glyph->advance = s->face->glyph->advance.x >> 6;
/* measure text height to calculate text_height (or the maximum text height) */
FT_Glyph_Get_CBox(*glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
@@ -294,7 +298,7 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
ret = AVERROR(ENOMEM);
goto error;
}
- av_tree_insert(&dtext->glyphs, glyph, glyph_cmp, &node);
+ av_tree_insert(&s->glyphs, glyph, glyph_cmp, &node);
if (glyph_ptr)
*glyph_ptr = glyph;
@@ -311,10 +315,10 @@ error:
static int load_font_file(AVFilterContext *ctx, const char *path, int index,
const char **error)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
int err;
- err = FT_New_Face(dtext->library, path, index, &dtext->face);
+ err = FT_New_Face(s->library, path, index, &s->face);
if (err) {
*error = FT_ERRMSG(err);
return AVERROR(EINVAL);
@@ -325,7 +329,7 @@ static int load_font_file(AVFilterContext *ctx, const char *path, int index,
#if CONFIG_FONTCONFIG
static int load_font_fontconfig(AVFilterContext *ctx, const char **error)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
FcConfig *fontconfig;
FcPattern *pattern, *fpat;
FcResult result = FcResultMatch;
@@ -338,7 +342,7 @@ static int load_font_fontconfig(AVFilterContext *ctx, const char **error)
*error = "impossible to init fontconfig\n";
return AVERROR(EINVAL);
}
- pattern = FcNameParse(dtext->fontfile ? dtext->fontfile :
+ pattern = FcNameParse(s->fontfile ? s->fontfile :
(uint8_t *)(intptr_t)"default");
if (!pattern) {
*error = "could not parse fontconfig pattern";
@@ -361,8 +365,8 @@ static int load_font_fontconfig(AVFilterContext *ctx, const char **error)
return AVERROR(EINVAL);
}
av_log(ctx, AV_LOG_INFO, "Using \"%s\"\n", filename);
- if (!dtext->fontsize)
- dtext->fontsize = size + 0.5;
+ if (!s->fontsize)
+ s->fontsize = size + 0.5;
err = load_font_file(ctx, filename, index, error);
if (err)
return err;
@@ -375,12 +379,12 @@ static int load_font_fontconfig(AVFilterContext *ctx, const char **error)
static int load_font(AVFilterContext *ctx)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
int err;
const char *error = "unknown error\n";
/* load the face, and set up the encoding, which is by default UTF-8 */
- err = load_font_file(ctx, dtext->fontfile, 0, &error);
+ err = load_font_file(ctx, s->fontfile, 0, &error);
if (!err)
return 0;
#if CONFIG_FONTCONFIG
@@ -389,52 +393,52 @@ static int load_font(AVFilterContext *ctx)
return 0;
#endif
av_log(ctx, AV_LOG_ERROR, "Could not load font \"%s\": %s\n",
- dtext->fontfile, error);
+ s->fontfile, error);
return err;
}
static int load_textfile(AVFilterContext *ctx)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
int err;
uint8_t *textbuf;
size_t textbuf_size;
- if ((err = av_file_map(dtext->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
+ if ((err = av_file_map(s->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"The text file '%s' could not be read or is empty\n",
- dtext->textfile);
+ s->textfile);
return err;
}
- if (!(dtext->text = av_realloc(dtext->text, textbuf_size + 1)))
+ if (!(s->text = av_realloc(s->text, textbuf_size + 1)))
return AVERROR(ENOMEM);
- memcpy(dtext->text, textbuf, textbuf_size);
- dtext->text[textbuf_size] = 0;
+ memcpy(s->text, textbuf, textbuf_size);
+ s->text[textbuf_size] = 0;
av_file_unmap(textbuf, textbuf_size);
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
int err;
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
Glyph *glyph;
- dtext->class = &drawtext_class;
- av_opt_set_defaults(dtext);
-
- if ((err = av_set_options_string(dtext, args, "=", ":")) < 0)
- return err;
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ if (s->draw_expr)
+ av_log(ctx, AV_LOG_WARNING, "'draw' option is deprecated and will be removed soon, "
+ "you are encouraged to use the generic timeline support through the 'enable' option\n");
+#endif
- if (!dtext->fontfile && !CONFIG_FONTCONFIG) {
+ if (!s->fontfile && !CONFIG_FONTCONFIG) {
av_log(ctx, AV_LOG_ERROR, "No font filename provided\n");
return AVERROR(EINVAL);
}
- if (dtext->textfile) {
- if (dtext->text) {
+ if (s->textfile) {
+ if (s->text) {
av_log(ctx, AV_LOG_ERROR,
"Both text and text file provided. Please provide only one\n");
return AVERROR(EINVAL);
@@ -443,45 +447,27 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return err;
}
- if (dtext->reload && !dtext->textfile)
+ if (s->reload && !s->textfile)
av_log(ctx, AV_LOG_WARNING, "No file to reload\n");
- if (dtext->tc_opt_string) {
- int ret = av_timecode_init_from_string(&dtext->tc, dtext->tc_rate,
- dtext->tc_opt_string, ctx);
+ if (s->tc_opt_string) {
+ int ret = av_timecode_init_from_string(&s->tc, s->tc_rate,
+ s->tc_opt_string, ctx);
if (ret < 0)
return ret;
- if (dtext->tc24hmax)
- dtext->tc.flags |= AV_TIMECODE_FLAG_24HOURSMAX;
- if (!dtext->text)
- dtext->text = av_strdup("");
+ if (s->tc24hmax)
+ s->tc.flags |= AV_TIMECODE_FLAG_24HOURSMAX;
+ if (!s->text)
+ s->text = av_strdup("");
}
- if (!dtext->text) {
+ if (!s->text) {
av_log(ctx, AV_LOG_ERROR,
"Either text, a valid file or a timecode must be provided\n");
return AVERROR(EINVAL);
}
- if ((err = av_parse_color(dtext->fontcolor.rgba, dtext->fontcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid font color '%s'\n", dtext->fontcolor_string);
- return err;
- }
-
- if ((err = av_parse_color(dtext->boxcolor.rgba, dtext->boxcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid box color '%s'\n", dtext->boxcolor_string);
- return err;
- }
-
- if ((err = av_parse_color(dtext->shadowcolor.rgba, dtext->shadowcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid shadow color '%s'\n", dtext->shadowcolor_string);
- return err;
- }
-
- if ((err = FT_Init_FreeType(&(dtext->library)))) {
+ if ((err = FT_Init_FreeType(&(s->library)))) {
av_log(ctx, AV_LOG_ERROR,
"Could not load FreeType: %s\n", FT_ERRMSG(err));
return AVERROR(EINVAL);
@@ -490,15 +476,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
err = load_font(ctx);
if (err)
return err;
- if (!dtext->fontsize)
- dtext->fontsize = 16;
- if ((err = FT_Set_Pixel_Sizes(dtext->face, 0, dtext->fontsize))) {
+ if (!s->fontsize)
+ s->fontsize = 16;
+ if ((err = FT_Set_Pixel_Sizes(s->face, 0, s->fontsize))) {
av_log(ctx, AV_LOG_ERROR, "Could not set font size to %d pixels: %s\n",
- dtext->fontsize, FT_ERRMSG(err));
+ s->fontsize, FT_ERRMSG(err));
return AVERROR(EINVAL);
}
- dtext->use_kerning = FT_HAS_KERNING(dtext->face);
+ s->use_kerning = FT_HAS_KERNING(s->face);
/* load the fallback glyph with code 0 */
load_glyph(ctx, NULL, 0);
@@ -508,13 +494,13 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n");
return err;
}
- dtext->tabsize *= glyph->advance;
+ s->tabsize *= glyph->advance;
- if (dtext->exp_mode == EXP_STRFTIME &&
- (strchr(dtext->text, '%') || strchr(dtext->text, '\\')))
+ if (s->exp_mode == EXP_STRFTIME &&
+ (strchr(s->text, '%') || strchr(s->text, '\\')))
av_log(ctx, AV_LOG_WARNING, "expansion=strftime is deprecated.\n");
- av_bprint_init(&dtext->expanded_text, 0, AV_BPRINT_SIZE_UNLIMITED);
+ av_bprint_init(&s->expanded_text, 0, AV_BPRINT_SIZE_UNLIMITED);
return 0;
}
@@ -537,24 +523,26 @@ static int glyph_enu_free(void *opaque, void *elem)
static av_cold void uninit(AVFilterContext *ctx)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
- av_expr_free(dtext->x_pexpr); dtext->x_pexpr = NULL;
- av_expr_free(dtext->y_pexpr); dtext->y_pexpr = NULL;
- av_expr_free(dtext->draw_pexpr); dtext->draw_pexpr = NULL;
- av_opt_free(dtext);
+ av_expr_free(s->x_pexpr);
+ av_expr_free(s->y_pexpr);
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ av_expr_free(s->draw_pexpr);
+ s->x_pexpr = s->y_pexpr = s->draw_pexpr = NULL;
+#endif
+ av_freep(&s->positions);
+ s->nb_positions = 0;
- av_freep(&dtext->positions);
- dtext->nb_positions = 0;
- av_tree_enumerate(dtext->glyphs, NULL, NULL, glyph_enu_free);
- av_tree_destroy(dtext->glyphs);
- dtext->glyphs = NULL;
+ av_tree_enumerate(s->glyphs, NULL, NULL, glyph_enu_free);
+ av_tree_destroy(s->glyphs);
+ s->glyphs = NULL;
- FT_Done_Face(dtext->face);
- FT_Done_FreeType(dtext->library);
+ FT_Done_Face(s->face);
+ FT_Done_FreeType(s->library);
- av_bprint_finalize(&dtext->expanded_text, NULL);
+ av_bprint_finalize(&s->expanded_text, NULL);
}
static inline int is_newline(uint32_t c)
@@ -565,49 +553,60 @@ static inline int is_newline(uint32_t c)
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
int ret;
- ff_draw_init(&dtext->dc, inlink->format, 0);
- ff_draw_color(&dtext->dc, &dtext->fontcolor, dtext->fontcolor.rgba);
- ff_draw_color(&dtext->dc, &dtext->shadowcolor, dtext->shadowcolor.rgba);
- ff_draw_color(&dtext->dc, &dtext->boxcolor, dtext->boxcolor.rgba);
-
- dtext->var_values[VAR_w] = dtext->var_values[VAR_W] = dtext->var_values[VAR_MAIN_W] = inlink->w;
- dtext->var_values[VAR_h] = dtext->var_values[VAR_H] = dtext->var_values[VAR_MAIN_H] = inlink->h;
- dtext->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
- dtext->var_values[VAR_DAR] = (double)inlink->w / inlink->h * dtext->var_values[VAR_SAR];
- dtext->var_values[VAR_HSUB] = 1 << dtext->dc.hsub_max;
- dtext->var_values[VAR_VSUB] = 1 << dtext->dc.vsub_max;
- dtext->var_values[VAR_X] = NAN;
- dtext->var_values[VAR_Y] = NAN;
- if (!dtext->reinit)
- dtext->var_values[VAR_N] = 0;
- dtext->var_values[VAR_T] = NAN;
-
- av_lfg_init(&dtext->prng, av_get_random_seed());
-
- if ((ret = av_expr_parse(&dtext->x_pexpr, dtext->x_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&dtext->y_pexpr, dtext->y_expr, var_names,
+ ff_draw_init(&s->dc, inlink->format, 0);
+ ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba);
+ ff_draw_color(&s->dc, &s->shadowcolor, s->shadowcolor.rgba);
+ ff_draw_color(&s->dc, &s->boxcolor, s->boxcolor.rgba);
+
+ s->var_values[VAR_w] = s->var_values[VAR_W] = s->var_values[VAR_MAIN_W] = inlink->w;
+ s->var_values[VAR_h] = s->var_values[VAR_H] = s->var_values[VAR_MAIN_H] = inlink->h;
+ s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ s->var_values[VAR_DAR] = (double)inlink->w / inlink->h * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1 << s->dc.hsub_max;
+ s->var_values[VAR_VSUB] = 1 << s->dc.vsub_max;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_T] = NAN;
+
+ av_lfg_init(&s->prng, av_get_random_seed());
+
+ av_expr_free(s->x_pexpr);
+ av_expr_free(s->y_pexpr);
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ av_expr_free(s->draw_pexpr);
+ s->x_pexpr = s->y_pexpr = s->draw_pexpr = NULL;
+#else
+ s->x_pexpr = s->y_pexpr = NULL;
+#endif
+
+ if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&dtext->draw_pexpr, dtext->draw_expr, var_names,
+ (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
return AVERROR(EINVAL);
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ if (s->draw_expr &&
+ (ret = av_expr_parse(&s->draw_pexpr, s->draw_expr, var_names,
+ NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
+ return ret;
+#endif
return 0;
}
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
if (!strcmp(cmd, "reinit")) {
int ret;
uninit(ctx);
- dtext->reinit = 1;
- if ((ret = init(ctx, arg)) < 0)
+ s->reinit = 1;
+ if ((ret = init(ctx)) < 0)
return ret;
return config_input(ctx->inputs[0]);
}
@@ -615,21 +614,41 @@ static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char
return AVERROR(ENOSYS);
}
+static int func_pict_type(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+
+ av_bprintf(bp, "%c", av_get_picture_type_char(s->var_values[VAR_PICT_TYPE]));
+ return 0;
+}
+
static int func_pts(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
- av_bprintf(bp, "%.6f", dtext->var_values[VAR_T]);
+ av_bprintf(bp, "%.6f", s->var_values[VAR_T]);
return 0;
}
static int func_frame_num(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
- av_bprintf(bp, "%d", (int)dtext->var_values[VAR_N]);
+ av_bprintf(bp, "%d", (int)s->var_values[VAR_N]);
+ return 0;
+}
+
+static int func_metadata(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ AVDictionaryEntry *e = av_dict_get(s->metadata, argv[0], NULL, 0);
+
+ if (e && e->value)
+ av_bprintf(bp, "%s", e->value);
return 0;
}
@@ -659,13 +678,13 @@ static int func_strftime(AVFilterContext *ctx, AVBPrint *bp,
static int func_eval_expr(AVFilterContext *ctx, AVBPrint *bp,
char *fct, unsigned argc, char **argv, int tag)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
double res;
int ret;
- ret = av_expr_parse_and_eval(&res, argv[0], var_names, dtext->var_values,
+ ret = av_expr_parse_and_eval(&res, argv[0], var_names, s->var_values,
NULL, NULL, fun2_names, fun2,
- &dtext->prng, 0, ctx);
+ &s->prng, 0, ctx);
if (ret < 0)
av_log(ctx, AV_LOG_ERROR,
"Expression '%s' for the expr text expansion function is not valid\n",
@@ -679,16 +698,18 @@ static int func_eval_expr(AVFilterContext *ctx, AVBPrint *bp,
static const struct drawtext_function {
const char *name;
unsigned argc_min, argc_max;
- int tag; /** opaque argument to func */
+ int tag; /**< opaque argument to func */
int (*func)(AVFilterContext *, AVBPrint *, char *, unsigned, char **, int);
} functions[] = {
{ "expr", 1, 1, 0, func_eval_expr },
{ "e", 1, 1, 0, func_eval_expr },
+ { "pict_type", 0, 0, 0, func_pict_type },
{ "pts", 0, 0, 0, func_pts },
{ "gmtime", 0, 1, 'G', func_strftime },
{ "localtime", 0, 1, 'L', func_strftime },
{ "frame_num", 0, 0, 0, func_frame_num },
{ "n", 0, 0, 0, func_frame_num },
+ { "metadata", 1, 1, 0, func_metadata },
};
static int eval_function(AVFilterContext *ctx, AVBPrint *bp, char *fct,
@@ -760,9 +781,9 @@ end:
static int expand_text(AVFilterContext *ctx)
{
- DrawTextContext *dtext = ctx->priv;
- char *text = dtext->text;
- AVBPrint *bp = &dtext->expanded_text;
+ DrawTextContext *s = ctx->priv;
+ char *text = s->text;
+ AVBPrint *bp = &s->expanded_text;
int ret;
av_bprint_clear(bp);
@@ -784,10 +805,10 @@ static int expand_text(AVFilterContext *ctx)
return 0;
}
-static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame,
+static int draw_glyphs(DrawTextContext *s, AVFrame *frame,
int width, int height, const uint8_t rgbcolor[4], FFDrawColor *color, int x, int y)
{
- char *text = dtext->expanded_text.str;
+ char *text = s->expanded_text.str;
uint32_t code = 0;
int i, x1, y1;
uint8_t *p;
@@ -802,16 +823,16 @@ static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame,
continue;
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ glyph = av_tree_find(s->glyphs, &dummy, (void *)glyph_cmp, NULL);
if (glyph->bitmap.pixel_mode != FT_PIXEL_MODE_MONO &&
glyph->bitmap.pixel_mode != FT_PIXEL_MODE_GRAY)
return AVERROR(EINVAL);
- x1 = dtext->positions[i].x+dtext->x+x;
- y1 = dtext->positions[i].y+dtext->y+y;
+ x1 = s->positions[i].x+s->x+x;
+ y1 = s->positions[i].y+s->y+y;
- ff_blend_mask(&dtext->dc, color,
+ ff_blend_mask(&s->dc, color,
frame->data, frame->linesize, width, height,
glyph->bitmap.buffer, glyph->bitmap.pitch,
glyph->bitmap.width, glyph->bitmap.rows,
@@ -825,12 +846,14 @@ static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame,
static int draw_text(AVFilterContext *ctx, AVFrame *frame,
int width, int height)
{
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
uint32_t code = 0, prev_code = 0;
int x = 0, y = 0, i = 0, ret;
int max_text_line_w = 0, len;
int box_w, box_h;
- char *text = dtext->text;
+ char *text;
uint8_t *p;
int y_min = 32000, y_max = -32000;
int x_min = 32000, x_max = -32000;
@@ -840,16 +863,16 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
time_t now = time(0);
struct tm ltime;
- AVBPrint *bp = &dtext->expanded_text;
+ AVBPrint *bp = &s->expanded_text;
av_bprint_clear(bp);
- if(dtext->basetime != AV_NOPTS_VALUE)
- now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + dtext->basetime/1000000;
+ if(s->basetime != AV_NOPTS_VALUE)
+ now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + s->basetime/1000000;
- switch (dtext->exp_mode) {
+ switch (s->exp_mode) {
case EXP_NONE:
- av_bprintf(bp, "%s", dtext->text);
+ av_bprintf(bp, "%s", s->text);
break;
case EXP_NORMAL:
if ((ret = expand_text(ctx)) < 0)
@@ -857,25 +880,25 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
break;
case EXP_STRFTIME:
localtime_r(&now, &ltime);
- av_bprint_strftime(bp, dtext->text, &ltime);
+ av_bprint_strftime(bp, s->text, &ltime);
break;
}
- if (dtext->tc_opt_string) {
+ if (s->tc_opt_string) {
char tcbuf[AV_TIMECODE_STR_SIZE];
- av_timecode_make_string(&dtext->tc, tcbuf, dtext->frame_id++);
+ av_timecode_make_string(&s->tc, tcbuf, inlink->frame_count);
av_bprint_clear(bp);
- av_bprintf(bp, "%s%s", dtext->text, tcbuf);
+ av_bprintf(bp, "%s%s", s->text, tcbuf);
}
if (!av_bprint_is_complete(bp))
return AVERROR(ENOMEM);
- text = dtext->expanded_text.str;
- if ((len = dtext->expanded_text.len) > dtext->nb_positions) {
- if (!(dtext->positions =
- av_realloc(dtext->positions, len*sizeof(*dtext->positions))))
+ text = s->expanded_text.str;
+ if ((len = s->expanded_text.len) > s->nb_positions) {
+ if (!(s->positions =
+ av_realloc(s->positions, len*sizeof(*s->positions))))
return AVERROR(ENOMEM);
- dtext->nb_positions = len;
+ s->nb_positions = len;
}
x = 0;
@@ -887,7 +910,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
/* get glyph */
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL);
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
if (!glyph) {
load_glyph(ctx, &glyph, code);
}
@@ -897,8 +920,8 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
x_min = FFMIN(glyph->bbox.xMin, x_min);
x_max = FFMAX(glyph->bbox.xMax, x_max);
}
- dtext->max_glyph_h = y_max - y_min;
- dtext->max_glyph_w = x_max - x_min;
+ s->max_glyph_h = y_max - y_min;
+ s->max_glyph_w = x_max - x_min;
/* compute and save position for each glyph */
glyph = NULL;
@@ -911,8 +934,9 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
prev_code = code;
if (is_newline(code)) {
+
max_text_line_w = FFMAX(max_text_line_w, x);
- y += dtext->max_glyph_h;
+ y += s->max_glyph_h;
x = 0;
continue;
}
@@ -920,59 +944,65 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
/* get glyph */
prev_glyph = glyph;
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL);
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
/* kerning */
- if (dtext->use_kerning && prev_glyph && glyph->code) {
- FT_Get_Kerning(dtext->face, prev_glyph->code, glyph->code,
+ if (s->use_kerning && prev_glyph && glyph->code) {
+ FT_Get_Kerning(s->face, prev_glyph->code, glyph->code,
ft_kerning_default, &delta);
x += delta.x >> 6;
}
/* save position */
- dtext->positions[i].x = x + glyph->bitmap_left;
- dtext->positions[i].y = y - glyph->bitmap_top + y_max;
- if (code == '\t') x = (x / dtext->tabsize + 1)*dtext->tabsize;
+ s->positions[i].x = x + glyph->bitmap_left;
+ s->positions[i].y = y - glyph->bitmap_top + y_max;
+ if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize;
else x += glyph->advance;
}
max_text_line_w = FFMAX(x, max_text_line_w);
- dtext->var_values[VAR_TW] = dtext->var_values[VAR_TEXT_W] = max_text_line_w;
- dtext->var_values[VAR_TH] = dtext->var_values[VAR_TEXT_H] = y + dtext->max_glyph_h;
+ s->var_values[VAR_TW] = s->var_values[VAR_TEXT_W] = max_text_line_w;
+ s->var_values[VAR_TH] = s->var_values[VAR_TEXT_H] = y + s->max_glyph_h;
- dtext->var_values[VAR_MAX_GLYPH_W] = dtext->max_glyph_w;
- dtext->var_values[VAR_MAX_GLYPH_H] = dtext->max_glyph_h;
- dtext->var_values[VAR_MAX_GLYPH_A] = dtext->var_values[VAR_ASCENT ] = y_max;
- dtext->var_values[VAR_MAX_GLYPH_D] = dtext->var_values[VAR_DESCENT] = y_min;
+ s->var_values[VAR_MAX_GLYPH_W] = s->max_glyph_w;
+ s->var_values[VAR_MAX_GLYPH_H] = s->max_glyph_h;
+ s->var_values[VAR_MAX_GLYPH_A] = s->var_values[VAR_ASCENT ] = y_max;
+ s->var_values[VAR_MAX_GLYPH_D] = s->var_values[VAR_DESCENT] = y_min;
- dtext->var_values[VAR_LINE_H] = dtext->var_values[VAR_LH] = dtext->max_glyph_h;
+ s->var_values[VAR_LINE_H] = s->var_values[VAR_LH] = s->max_glyph_h;
- dtext->x = dtext->var_values[VAR_X] = av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng);
- dtext->y = dtext->var_values[VAR_Y] = av_expr_eval(dtext->y_pexpr, dtext->var_values, &dtext->prng);
- dtext->x = dtext->var_values[VAR_X] = av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng);
- dtext->draw = av_expr_eval(dtext->draw_pexpr, dtext->var_values, &dtext->prng);
+ s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
+ s->y = s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, &s->prng);
+ s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ if (s->draw_pexpr){
+ s->draw = av_expr_eval(s->draw_pexpr, s->var_values, &s->prng);
- if(!dtext->draw)
+ if(!s->draw)
+ return 0;
+ }
+ if (ctx->is_disabled)
return 0;
+#endif
box_w = FFMIN(width - 1 , max_text_line_w);
- box_h = FFMIN(height - 1, y + dtext->max_glyph_h);
+ box_h = FFMIN(height - 1, y + s->max_glyph_h);
/* draw box */
- if (dtext->draw_box)
- ff_blend_rectangle(&dtext->dc, &dtext->boxcolor,
+ if (s->draw_box)
+ ff_blend_rectangle(&s->dc, &s->boxcolor,
frame->data, frame->linesize, width, height,
- dtext->x, dtext->y, box_w, box_h);
+ s->x, s->y, box_w, box_h);
- if (dtext->shadowx || dtext->shadowy) {
- if ((ret = draw_glyphs(dtext, frame, width, height, dtext->shadowcolor.rgba,
- &dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0)
+ if (s->shadowx || s->shadowy) {
+ if ((ret = draw_glyphs(s, frame, width, height, s->shadowcolor.rgba,
+ &s->shadowcolor, s->shadowx, s->shadowy)) < 0)
return ret;
}
- if ((ret = draw_glyphs(dtext, frame, width, height, dtext->fontcolor.rgba,
- &dtext->fontcolor, 0, 0)) < 0)
+ if ((ret = draw_glyphs(s, frame, width, height, s->fontcolor.rgba,
+ &s->fontcolor, 0, 0)) < 0)
return ret;
return 0;
@@ -982,36 +1012,37 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
- DrawTextContext *dtext = ctx->priv;
+ DrawTextContext *s = ctx->priv;
int ret;
- if (dtext->reload)
+ if (s->reload)
if ((ret = load_textfile(ctx)) < 0)
return ret;
- dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
+ s->var_values[VAR_N] = inlink->frame_count+s->start_number;
+ s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(inlink->time_base);
+ s->var_values[VAR_PICT_TYPE] = frame->pict_type;
+ s->metadata = av_frame_get_metadata(frame);
+
draw_text(ctx, frame, frame->width, frame->height);
av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n",
- (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T],
- (int)dtext->var_values[VAR_TEXT_W], (int)dtext->var_values[VAR_TEXT_H],
- dtext->x, dtext->y);
-
- dtext->var_values[VAR_N] += 1.0;
+ (int)s->var_values[VAR_N], s->var_values[VAR_T],
+ (int)s->var_values[VAR_TEXT_W], (int)s->var_values[VAR_TEXT_H],
+ s->x, s->y);
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad avfilter_vf_drawtext_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .config_props = config_input,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -1024,16 +1055,20 @@ static const AVFilterPad avfilter_vf_drawtext_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_drawtext = {
+AVFilter ff_vf_drawtext = {
.name = "drawtext",
.description = NULL_IF_CONFIG_SMALL("Draw text on top of video frames using libfreetype library."),
.priv_size = sizeof(DrawTextContext),
+ .priv_class = &drawtext_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_drawtext_inputs,
- .outputs = avfilter_vf_drawtext_outputs,
+ .inputs = avfilter_vf_drawtext_inputs,
+ .outputs = avfilter_vf_drawtext_outputs,
.process_command = command,
- .priv_class = &drawtext_class,
+#if FF_API_DRAWTEXT_OLD_TIMELINE
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+#else
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+#endif
};
diff --git a/ffmpeg/libavfilter/vf_edgedetect.c b/ffmpeg/libavfilter/vf_edgedetect.c
index b582ab9..c8ec734 100644
--- a/ffmpeg/libavfilter/vf_edgedetect.c
+++ b/ffmpeg/libavfilter/vf_edgedetect.c
@@ -45,22 +45,15 @@ typedef struct {
static const AVOption edgedetect_options[] = {
{ "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
{ "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(edgedetect);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- int ret;
EdgeDetectContext *edgedetect = ctx->priv;
- edgedetect->class = &edgedetect_class;
- av_opt_set_defaults(edgedetect);
-
- if ((ret = av_set_options_string(edgedetect, args, "=", ":")) < 0)
- return ret;
-
edgedetect->low_u8 = edgedetect->low * 255. + .5;
edgedetect->high_u8 = edgedetect->high * 255. + .5;
return 0;
@@ -256,14 +249,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
uint8_t *tmpbuf = edgedetect->tmpbuf;
uint16_t *gradients = edgedetect->gradients;
+ int direct = 0;
AVFrame *out;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
}
- av_frame_copy_props(out, in);
/* gaussian filter to reduce noise */
gaussian_blur(ctx, inlink->w, inlink->h,
@@ -287,7 +286,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
out->data[0], out->linesize[0],
tmpbuf, inlink->w);
- av_frame_free(&in);
+ if (!direct)
+ av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@@ -317,7 +317,7 @@ static const AVFilterPad edgedetect_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_edgedetect = {
+AVFilter ff_vf_edgedetect = {
.name = "edgedetect",
.description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
.priv_size = sizeof(EdgeDetectContext),
@@ -327,4 +327,5 @@ AVFilter avfilter_vf_edgedetect = {
.inputs = edgedetect_inputs,
.outputs = edgedetect_outputs,
.priv_class = &edgedetect_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_fade.c b/ffmpeg/libavfilter/vf_fade.c
index 8036672..cc10b12 100644
--- a/ffmpeg/libavfilter/vf_fade.c
+++ b/ffmpeg/libavfilter/vf_fade.c
@@ -25,6 +25,7 @@
* based heavily on vf_negate.c by Bobby Bingham
*/
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
@@ -32,7 +33,6 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "drawutils.h"
-#include "internal.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -46,60 +46,58 @@
#define U 1
#define V 2
+#define FADE_IN 0
+#define FADE_OUT 1
+
typedef struct {
const AVClass *class;
+ int type;
int factor, fade_per_frame;
- unsigned int frame_index, start_frame, stop_frame, nb_frames;
+ int start_frame, nb_frames;
int hsub, vsub, bpp;
unsigned int black_level, black_level_scaled;
uint8_t is_packed_rgb;
uint8_t rgba_map[4];
int alpha;
-
- char *type;
+ uint64_t start_time, duration;
+ enum {VF_FADE_WAITING=0, VF_FADE_FADING, VF_FADE_DONE} fade_state;
+ uint8_t color_rgba[4]; ///< fade color
+ int black_fade; ///< if color_rgba is black
} FadeContext;
-#define OFFSET(x) offsetof(FadeContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static av_cold int init(AVFilterContext *ctx)
+{
+ FadeContext *s = ctx->priv;
-static const AVOption fade_options[] = {
- { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_STRING, {.str = "in" }, CHAR_MIN, CHAR_MAX, FLAGS },
- { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_STRING, {.str = "in" }, CHAR_MIN, CHAR_MAX, FLAGS },
- { "start_frame", "set expression of frame to start fading", OFFSET(start_frame), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, FLAGS },
- { "s", "set expression of frame to start fading", OFFSET(start_frame), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, FLAGS },
- { "nb_frames", "set expression for fade duration in frames", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64 = 25 }, 0, INT_MAX, FLAGS },
- { "n", "set expression for fade duration in frames", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64 = 25 }, 0, INT_MAX, FLAGS },
- { "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS },
- {NULL},
-};
+ s->fade_per_frame = (1 << 16) / s->nb_frames;
+ s->fade_state = VF_FADE_WAITING;
-AVFILTER_DEFINE_CLASS(fade);
+ if (s->duration != 0) {
+ // If duration (seconds) is non-zero, assume that we are not fading based on frames
+ s->nb_frames = 0; // Mostly to clean up logging
+ }
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- FadeContext *fade = ctx->priv;
-
- fade->fade_per_frame = (1 << 16) / fade->nb_frames;
- if (!strcmp(fade->type, "in"))
- fade->factor = 0;
- else if (!strcmp(fade->type, "out")) {
- fade->fade_per_frame = -fade->fade_per_frame;
- fade->factor = (1 << 16);
- } else {
- av_log(ctx, AV_LOG_ERROR,
- "Type argument must be 'in' or 'out' but '%s' was specified\n", fade->type);
- return AVERROR(EINVAL);
+ // Choose what to log. If both time-based and frame-based options, both lines will be in the log
+ if (s->start_frame || s->nb_frames) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "type:%s start_frame:%d nb_frames:%d alpha:%d\n",
+ s->type == FADE_IN ? "in" : "out", s->start_frame,
+ s->nb_frames,s->alpha);
+ }
+ if (s->start_time || s->duration) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "type:%s start_time:%f duration:%f alpha:%d\n",
+ s->type == FADE_IN ? "in" : "out", (s->start_time / (double)AV_TIME_BASE),
+ (s->duration / (double)AV_TIME_BASE),s->alpha);
}
- fade->stop_frame = fade->start_frame + fade->nb_frames;
- av_log(ctx, AV_LOG_VERBOSE,
- "type:%s start_frame:%d nb_frames:%d alpha:%d\n",
- fade->type, fade->start_frame, fade->nb_frames, fade->alpha);
+ s->black_fade = !memcmp(s->color_rgba, "\x00\x00\x00\xff", 4);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
+ const FadeContext *s = ctx->priv;
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
@@ -111,8 +109,17 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_NONE
};
+ static const enum AVPixelFormat pix_fmts_rgb[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ if (s->black_fade)
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ else
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts_rgb));
return 0;
}
@@ -125,101 +132,258 @@ const static enum AVPixelFormat studio_level_pix_fmts[] = {
static int config_props(AVFilterLink *inlink)
{
- FadeContext *fade = inlink->dst->priv;
+ FadeContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
- fade->hsub = pixdesc->log2_chroma_w;
- fade->vsub = pixdesc->log2_chroma_h;
+ s->hsub = pixdesc->log2_chroma_w;
+ s->vsub = pixdesc->log2_chroma_h;
- fade->bpp = av_get_bits_per_pixel(pixdesc) >> 3;
- fade->alpha &= pixdesc->flags & PIX_FMT_ALPHA;
- fade->is_packed_rgb = ff_fill_rgba_map(fade->rgba_map, inlink->format) >= 0;
+ s->bpp = av_get_bits_per_pixel(pixdesc) >> 3;
+ s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA);
+ s->is_packed_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
/* use CCIR601/709 black level for studio-level pixel non-alpha components */
- fade->black_level =
- ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !fade->alpha ? 16 : 0;
+ s->black_level =
+ ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !s->alpha ? 16 : 0;
/* 32768 = 1 << 15, it is an integer representation
* of 0.5 and is for rounding. */
- fade->black_level_scaled = (fade->black_level << 16) + 32768;
+ s->black_level_scaled = (s->black_level << 16) + 32768;
return 0;
}
-static void fade_plane(int y, int h, int w,
- int fade_factor, int black_level, int black_level_scaled,
- uint8_t offset, uint8_t step, int bytes_per_plane,
- uint8_t *data, int line_size)
+static av_always_inline void filter_rgb(FadeContext *s, const AVFrame *frame,
+ int slice_start, int slice_end,
+ int do_alpha, int step)
{
- uint8_t *p;
int i, j;
+ const uint8_t r_idx = s->rgba_map[R];
+ const uint8_t g_idx = s->rgba_map[G];
+ const uint8_t b_idx = s->rgba_map[B];
+ const uint8_t a_idx = s->rgba_map[A];
+ const uint8_t *c = s->color_rgba;
- /* luma, alpha or rgb plane */
- for (i = 0; i < h; i++) {
- p = data + offset + (y+i) * line_size;
- for (j = 0; j < w * bytes_per_plane; j++) {
- /* fade->factor is using 16 lower-order bits for decimal places. */
- *p = ((*p - black_level) * fade_factor + black_level_scaled) >> 16;
- p+=step;
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[0] + i * frame->linesize[0];
+ for (j = 0; j < frame->width; j++) {
+#define INTERP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)p[c_name] - (int)c[c_idx]) * s->factor + (1<<15)) >> 16)
+ p[r_idx] = INTERP(r_idx, 0);
+ p[g_idx] = INTERP(g_idx, 1);
+ p[b_idx] = INTERP(b_idx, 2);
+ if (do_alpha)
+ p[a_idx] = INTERP(a_idx, 3);
+ p += step;
}
}
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
{
- FadeContext *fade = inlink->dst->priv;
- uint8_t *p;
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+
+ if (s->alpha) filter_rgb(s, frame, slice_start, slice_end, 1, 4);
+ else if (s->bpp == 3) filter_rgb(s, frame, slice_start, slice_end, 0, 3);
+ else if (s->bpp == 4) filter_rgb(s, frame, slice_start, slice_end, 0, 4);
+ else av_assert0(0);
+
+ return 0;
+}
+
+static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+ int i, j;
+
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[0] + i * frame->linesize[0];
+ for (j = 0; j < frame->width * s->bpp; j++) {
+ /* s->factor is using 16 lower-order bits for decimal
+ * places. 32768 = 1 << 15, it is an integer representation
+ * of 0.5 and is for rounding. */
+ *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
+ p++;
+ }
+ }
+
+ return 0;
+}
+
+static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
int i, j, plane;
+ const int width = FF_CEIL_RSHIFT(frame->width, s->hsub);
+ const int height= FF_CEIL_RSHIFT(frame->height, s->vsub);
+ int slice_start = (height * jobnr ) / nb_jobs;
+ int slice_end = (height * (jobnr+1)) / nb_jobs;
+
+ for (plane = 1; plane < 3; plane++) {
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[plane] + i * frame->linesize[plane];
+ for (j = 0; j < width; j++) {
+ /* 8421367 = ((128 << 1) + 1) << 15. It is an integer
+ * representation of 128.5. The .5 is for rounding
+ * purposes. */
+ *p = ((*p - 128) * s->factor + 8421367) >> 16;
+ p++;
+ }
+ }
+ }
- if (fade->factor < UINT16_MAX) {
- if (fade->alpha) {
- // alpha only
- plane = fade->is_packed_rgb ? 0 : A; // alpha is on plane 0 for packed formats
- // or plane 3 for planar formats
- fade_plane(0, frame->height, inlink->w,
- fade->factor, fade->black_level, fade->black_level_scaled,
- fade->is_packed_rgb ? fade->rgba_map[A] : 0, // alpha offset for packed formats
- fade->is_packed_rgb ? 4 : 1, // pixstep for 8 bit packed formats
- 1, frame->data[plane], frame->linesize[plane]);
+ return 0;
+}
+
+static int filter_slice_alpha(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int plane = s->is_packed_rgb ? 0 : A;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+ int i, j;
+
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[plane] + i * frame->linesize[plane] + s->is_packed_rgb*s->rgba_map[A];
+ int step = s->is_packed_rgb ? 4 : 1;
+ for (j = 0; j < frame->width; j++) {
+ /* s->factor is using 16 lower-order bits for decimal
+ * places. 32768 = 1 << 15, it is an integer representation
+ * of 0.5 and is for rounding. */
+ *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
+ p += step;
+ }
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FadeContext *s = ctx->priv;
+ double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base);
+
+ // Calculate Fade assuming this is a Fade In
+ if (s->fade_state == VF_FADE_WAITING) {
+ s->factor=0;
+ if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE
+ && inlink->frame_count >= s->start_frame) {
+ // Time to start fading
+ s->fade_state = VF_FADE_FADING;
+
+ // Save start time in case we are starting based on frames and fading based on time
+ if (s->start_time == 0 && s->start_frame != 0) {
+ s->start_time = frame_timestamp*(double)AV_TIME_BASE;
+ }
+
+ // Save start frame in case we are starting based on time and fading based on frames
+ if (s->start_time != 0 && s->start_frame == 0) {
+ s->start_frame = inlink->frame_count;
+ }
+ }
+ }
+ if (s->fade_state == VF_FADE_FADING) {
+ if (s->duration == 0) {
+ // Fading based on frame count
+ s->factor = (inlink->frame_count - s->start_frame) * s->fade_per_frame;
+ if (inlink->frame_count > s->start_frame + s->nb_frames) {
+ s->fade_state = VF_FADE_DONE;
+ }
+
+ } else {
+ // Fading based on duration
+ s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE)
+ * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
+ if (frame_timestamp > s->start_time/(double)AV_TIME_BASE
+ + s->duration/(double)AV_TIME_BASE) {
+ s->fade_state = VF_FADE_DONE;
+ }
+ }
+ }
+ if (s->fade_state == VF_FADE_DONE) {
+ s->factor=UINT16_MAX;
+ }
+
+ s->factor = av_clip_uint16(s->factor);
+
+ // Invert fade_factor if Fading Out
+ if (s->type == FADE_OUT) {
+ s->factor=UINT16_MAX-s->factor;
+ }
+
+ if (s->factor < UINT16_MAX) {
+ if (s->alpha) {
+ ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
+ } else if (s->is_packed_rgb && !s->black_fade) {
+ ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
} else {
- /* luma or rgb plane */
- fade_plane(0, frame->height, inlink->w,
- fade->factor, fade->black_level, fade->black_level_scaled,
- 0, 1, // offset & pixstep for Y plane or RGB packed format
- fade->bpp, frame->data[0], frame->linesize[0]);
+ /* luma, or rgb plane in case of black */
+ ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
+
if (frame->data[1] && frame->data[2]) {
/* chroma planes */
- for (plane = 1; plane < 3; plane++) {
- for (i = 0; i < frame->height; i++) {
- p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane];
- for (j = 0; j < inlink->w >> fade->hsub; j++) {
- /* 8421367 = ((128 << 1) + 1) << 15. It is an integer
- * representation of 128.5. The .5 is for rounding
- * purposes. */
- *p = ((*p - 128) * fade->factor + 8421367) >> 16;
- p++;
- }
- }
- }
+ ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
}
}
}
- if (fade->frame_index >= fade->start_frame &&
- fade->frame_index <= fade->stop_frame)
- fade->factor += fade->fade_per_frame;
- fade->factor = av_clip_uint16(fade->factor);
- fade->frame_index++;
-
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
+
+#define OFFSET(x) offsetof(FadeContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fade_options[] = {
+ { "type", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
+ { "t", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
+ { "in", "fade-in", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_IN }, .unit = "type" },
+ { "out", "fade-out", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_OUT }, .unit = "type" },
+ { "start_frame", "Number of the first frame to which to apply the effect.",
+ OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "s", "Number of the first frame to which to apply the effect.",
+ OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "nb_frames", "Number of frames to which the effect should be applied.",
+ OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
+ { "n", "Number of frames to which the effect should be applied.",
+ OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
+ { "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS },
+ { "start_time", "Number of seconds of the beginning of the effect.",
+ OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "st", "Number of seconds of the beginning of the effect.",
+ OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "duration", "Duration of the effect in seconds.",
+ OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "d", "Duration of the effect in seconds.",
+ OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(fade);
+
static const AVFilterPad avfilter_vf_fade_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -232,17 +396,14 @@ static const AVFilterPad avfilter_vf_fade_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "type", "start_frame", "nb_frames", NULL };
-
-AVFilter avfilter_vf_fade = {
+AVFilter ff_vf_fade = {
.name = "fade",
.description = NULL_IF_CONFIG_SMALL("Fade in/out input video."),
.init = init,
.priv_size = sizeof(FadeContext),
+ .priv_class = &fade_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_fade_inputs,
- .outputs = avfilter_vf_fade_outputs,
- .priv_class = &fade_class,
- .shorthand = shorthand,
+ .inputs = avfilter_vf_fade_inputs,
+ .outputs = avfilter_vf_fade_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/ffmpeg/libavfilter/vf_field.c b/ffmpeg/libavfilter/vf_field.c
index fc7e043..ed12379 100644
--- a/ffmpeg/libavfilter/vf_field.c
+++ b/ffmpeg/libavfilter/vf_field.c
@@ -44,34 +44,18 @@ static const AVOption field_options[] = {
{"type", "set field type (top or bottom)", OFFSET(type), AV_OPT_TYPE_INT, {.i64=FIELD_TYPE_TOP}, 0, 1, FLAGS, "field_type" },
{"top", "select top field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_TOP}, INT_MIN, INT_MAX, FLAGS, "field_type"},
{"bottom", "select bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field_type"},
-
{NULL}
};
AVFILTER_DEFINE_CLASS(field);
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- FieldContext *field = ctx->priv;
- static const char *shorthand[] = { "type", NULL };
-
- field->class = &field_class;
- av_opt_set_defaults(field);
-
- return av_opt_set_from_string(field, args, shorthand, "=", ":");
-}
-
static int config_props_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FieldContext *field = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- int i;
- for (i = 0; i < desc->nb_components; i++)
- field->nb_planes = FFMAX(field->nb_planes, desc->comp[i].plane);
- field->nb_planes++;
+ field->nb_planes = av_pix_fmt_count_planes(outlink->format);
outlink->w = inlink->w;
outlink->h = (inlink->h + (field->type == FIELD_TYPE_TOP)) / 2;
@@ -101,31 +85,27 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
static const AVFilterPad field_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad field_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props_output,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_output,
},
{ NULL }
};
-AVFilter avfilter_vf_field = {
- .name = "field",
- .description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
-
- .priv_size = sizeof(FieldContext),
- .init = init,
-
- .inputs = field_inputs,
- .outputs = field_outputs,
- .priv_class = &field_class,
+AVFilter ff_vf_field = {
+ .name = "field",
+ .description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
+ .priv_size = sizeof(FieldContext),
+ .inputs = field_inputs,
+ .outputs = field_outputs,
+ .priv_class = &field_class,
};
diff --git a/ffmpeg/libavfilter/vf_fieldorder.c b/ffmpeg/libavfilter/vf_fieldorder.c
index e9093a0..84088a0 100644
--- a/ffmpeg/libavfilter/vf_fieldorder.c
+++ b/ffmpeg/libavfilter/vf_fieldorder.c
@@ -23,58 +23,21 @@
* video field order filter, heavily influenced by vf_pad.c
*/
-#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
+#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
-enum FieldOrder {
- ORDER_TFF,
- ORDER_BFF,
- ORDER_NB,
-};
-
typedef struct {
const AVClass *class;
- enum FieldOrder order;
- unsigned int dst_tff; ///< output bff/tff
+ int dst_tff; ///< output bff/tff
int line_size[4]; ///< bytes of pixel data per line for each plane
} FieldOrderContext;
-#define OFFSET(x) offsetof(FieldOrderContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption fieldorder_options[] = {
- { "order", "set output field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=ORDER_TFF}, 0, ORDER_NB-1, FLAGS, "order" },
- { "tff", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=ORDER_TFF}, .flags=FLAGS, .unit="order" },
- { "bff", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=ORDER_BFF}, .flags=FLAGS, .unit="order" },
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(fieldorder);
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- FieldOrderContext *fieldorder = ctx->priv;
- int ret;
- static const char *shorthand[] = { "order", NULL };
-
- fieldorder->class = &fieldorder_class;
- av_opt_set_defaults(fieldorder);
-
- if ((ret = av_opt_set_from_string(fieldorder, args, shorthand, "=", ":")) < 0)
- return ret;
-
- fieldorder->dst_tff = fieldorder->order == ORDER_TFF;
- av_log(ctx, AV_LOG_VERBOSE, "tff:%d\n", fieldorder->dst_tff);
-
- return 0;
-}
-
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
@@ -87,8 +50,9 @@ static int query_formats(AVFilterContext *ctx)
formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
- if (!(desc->flags & PIX_FMT_HWACCEL ||
- desc->flags & PIX_FMT_BITSTREAM) &&
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
desc->nb_components && !desc->log2_chroma_h &&
(ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
@@ -104,28 +68,10 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
{
- AVFilterContext *ctx = inlink->dst;
- FieldOrderContext *fieldorder = ctx->priv;
- int plane;
-
- /** full an array with the number of bytes that the video
- * data occupies per line for each plane of the input video */
- for (plane = 0; plane < 4; plane++) {
- fieldorder->line_size[plane] = av_image_get_linesize(
- inlink->format,
- inlink->w,
- plane);
- }
-
- return 0;
-}
-
-static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
-{
- AVFilterContext *ctx = inlink->dst;
- AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterContext *ctx = inlink->dst;
+ FieldOrderContext *s = ctx->priv;
- return ff_get_video_buffer(outlink, w, h);
+ return av_image_fill_linesizes(s->line_size, inlink->format, inlink->w);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -133,21 +79,40 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
- int h, plane, line_step, line_size, line;
- uint8_t *data;
+ int h, plane, src_line_step, dst_line_step, line_size, line;
+ uint8_t *dst, *src;
+ AVFrame *out;
if (!frame->interlaced_frame ||
- frame->top_field_first == s->dst_tff)
+ frame->top_field_first == s->dst_tff) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Skipping %s.\n",
+ frame->interlaced_frame ?
+ "frame with same field order" : "progressive frame");
return ff_filter_frame(outlink, frame);
+ }
+
+ if (av_frame_is_writable(frame)) {
+ out = frame;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, frame);
+ }
av_dlog(ctx,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->height;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
- line_step = frame->linesize[plane];
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
+ dst_line_step = out->linesize[plane];
+ src_line_step = frame->linesize[plane];
line_size = s->line_size[plane];
- data = frame->data[plane];
+ dst = out->data[plane];
+ src = frame->data[plane];
if (s->dst_tff) {
/** Move every line up one line, working from
* the top to the bottom of the frame.
@@ -156,11 +121,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->height) {
- memcpy(data, data + line_step, line_size);
+ memcpy(dst, src + src_line_step, line_size);
} else {
- memcpy(data, data - line_step - line_step, line_size);
+ memcpy(dst, src - 2 * src_line_step, line_size);
}
- data += line_step;
+ dst += dst_line_step;
+ src += src_line_step;
}
} else {
/** Move every line down one line, working from
@@ -168,30 +134,44 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
* The original bottom line is lost.
* The new first line is created as a copy of the
* second line from that field. */
- data += (h - 1) * line_step;
+ dst += (h - 1) * dst_line_step;
+ src += (h - 1) * src_line_step;
for (line = h - 1; line >= 0 ; line--) {
if (line > 0) {
- memcpy(data, data - line_step, line_size);
+ memcpy(dst, src - src_line_step, line_size);
} else {
- memcpy(data, data + line_step + line_step, line_size);
+ memcpy(dst, src + 2 * src_line_step, line_size);
}
- data -= line_step;
+ dst -= dst_line_step;
+ src -= src_line_step;
}
}
}
- frame->top_field_first = s->dst_tff;
+ out->top_field_first = s->dst_tff;
- return ff_filter_frame(outlink, frame);
+ if (frame != out)
+ av_frame_free(&frame);
+ return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(FieldOrderContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption fieldorder_options[] = {
+ { "order", "output field order", OFFSET(dst_tff), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS, "order" },
+ { "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags=FLAGS, .unit = "order" },
+ { "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags=FLAGS, .unit = "order" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(fieldorder);
+
static const AVFilterPad avfilter_vf_fieldorder_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -204,13 +184,13 @@ static const AVFilterPad avfilter_vf_fieldorder_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_fieldorder = {
+AVFilter ff_vf_fieldorder = {
.name = "fieldorder",
.description = NULL_IF_CONFIG_SMALL("Set the field order."),
- .init = init,
.priv_size = sizeof(FieldOrderContext),
+ .priv_class = &fieldorder_class,
.query_formats = query_formats,
.inputs = avfilter_vf_fieldorder_inputs,
.outputs = avfilter_vf_fieldorder_outputs,
- .priv_class = &fieldorder_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_format.c b/ffmpeg/libavfilter/vf_format.c
index df3c77a..2e9ff27 100644
--- a/ffmpeg/libavfilter/vf_format.c
+++ b/ffmpeg/libavfilter/vf_format.c
@@ -28,13 +28,16 @@
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+
#include "avfilter.h"
-#include "internal.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct {
+ const AVClass *class;
+ char *pix_fmts;
/**
* List of flags telling if a given image format has been listed
* as argument to the filter.
@@ -44,17 +47,17 @@ typedef struct {
#define AV_PIX_FMT_NAME_MAXSIZE 32
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- FormatContext *format = ctx->priv;
+ FormatContext *s = ctx->priv;
const char *cur, *sep;
char pix_fmt_name[AV_PIX_FMT_NAME_MAXSIZE];
int pix_fmt_name_len, ret;
enum AVPixelFormat pix_fmt;
/* parse the list of formats */
- for (cur = args; cur; cur = sep ? sep+1 : NULL) {
- if (!(sep = strchr(cur, ':')))
+ for (cur = s->pix_fmts; cur; cur = sep ? sep + 1 : NULL) {
+ if (!(sep = strchr(cur, '|')))
pix_fmt_name_len = strlen(cur);
else
pix_fmt_name_len = sep - cur;
@@ -69,27 +72,35 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
if ((ret = ff_parse_pixel_format(&pix_fmt, pix_fmt_name, ctx)) < 0)
return ret;
- format->listed_pix_fmt_flags[pix_fmt] = 1;
+ s->listed_pix_fmt_flags[pix_fmt] = 1;
}
return 0;
}
-static AVFilterFormats *make_format_list(FormatContext *format, int flag)
+static AVFilterFormats *make_format_list(FormatContext *s, int flag)
{
- AVFilterFormats *formats;
+ AVFilterFormats *formats = NULL;
enum AVPixelFormat pix_fmt;
- formats = av_mallocz(sizeof(AVFilterFormats));
- formats->formats = av_malloc(sizeof(enum AVPixelFormat) * AV_PIX_FMT_NB);
-
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
- if (format->listed_pix_fmt_flags[pix_fmt] == flag)
- formats->formats[formats->format_count++] = pix_fmt;
+ if (s->listed_pix_fmt_flags[pix_fmt] == flag) {
+ int ret = ff_add_format(&formats, pix_fmt);
+ if (ret < 0) {
+ ff_formats_unref(&formats);
+ return NULL;
+ }
+ }
return formats;
}
+#define OFFSET(x) offsetof(FormatContext, x)
+static const AVOption options[] = {
+ { "pix_fmts", "A '|'-separated list of pixel formats", OFFSET(pix_fmts), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM },
+ { NULL }
+};
+
#if CONFIG_FORMAT_FILTER
static int query_formats_format(AVFilterContext *ctx)
{
@@ -97,6 +108,9 @@ static int query_formats_format(AVFilterContext *ctx)
return 0;
}
+#define format_options options
+AVFILTER_DEFINE_CLASS(format);
+
static const AVFilterPad avfilter_vf_format_inputs[] = {
{
.name = "default",
@@ -114,18 +128,15 @@ static const AVFilterPad avfilter_vf_format_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_format = {
- .name = "format",
- .description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
-
- .init = init,
-
+AVFilter ff_vf_format = {
+ .name = "format",
+ .description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
+ .init = init,
.query_formats = query_formats_format,
-
- .priv_size = sizeof(FormatContext),
-
- .inputs = avfilter_vf_format_inputs,
- .outputs = avfilter_vf_format_outputs,
+ .priv_size = sizeof(FormatContext),
+ .priv_class = &format_class,
+ .inputs = avfilter_vf_format_inputs,
+ .outputs = avfilter_vf_format_outputs,
};
#endif /* CONFIG_FORMAT_FILTER */
@@ -136,6 +147,9 @@ static int query_formats_noformat(AVFilterContext *ctx)
return 0;
}
+#define noformat_options options
+AVFILTER_DEFINE_CLASS(noformat);
+
static const AVFilterPad avfilter_vf_noformat_inputs[] = {
{
.name = "default",
@@ -153,17 +167,14 @@ static const AVFilterPad avfilter_vf_noformat_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_noformat = {
- .name = "noformat",
- .description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
-
- .init = init,
-
+AVFilter ff_vf_noformat = {
+ .name = "noformat",
+ .description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
+ .init = init,
.query_formats = query_formats_noformat,
-
- .priv_size = sizeof(FormatContext),
-
- .inputs = avfilter_vf_noformat_inputs,
- .outputs = avfilter_vf_noformat_outputs,
+ .priv_size = sizeof(FormatContext),
+ .priv_class = &noformat_class,
+ .inputs = avfilter_vf_noformat_inputs,
+ .outputs = avfilter_vf_noformat_outputs,
};
#endif /* CONFIG_NOFORMAT_FILTER */
diff --git a/ffmpeg/libavfilter/vf_fps.c b/ffmpeg/libavfilter/vf_fps.c
index 5952538..e6266cc 100644
--- a/ffmpeg/libavfilter/vf_fps.c
+++ b/ffmpeg/libavfilter/vf_fps.c
@@ -25,6 +25,9 @@
* a filter enforcing given constant framerate
*/
+#include <float.h>
+#include <stdint.h>
+
#include "libavutil/common.h"
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
@@ -44,8 +47,9 @@ typedef struct FPSContext {
int64_t first_pts; ///< pts of the first frame that arrived on this filter
int64_t pts; ///< pts of the first frame currently in the fifo
+ double start_time; ///< pts, in seconds, of the expected first frame
+
AVRational framerate; ///< target framerate
- char *fps; ///< a string describing target framerate
int rounding; ///< AVRounding method for timestamps
/* statistics */
@@ -59,31 +63,29 @@ typedef struct FPSContext {
#define V AV_OPT_FLAG_VIDEO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption fps_options[] = {
- { "fps", "A string describing desired output framerate", OFFSET(fps), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = V|F },
+ { "fps", "A string describing desired output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = V|F },
+ { "start_time", "Assume the first PTS should be this value.", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX}, -DBL_MAX, DBL_MAX, V },
{ "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
{ "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" },
{ "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" },
{ "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" },
{ "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" },
{ "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(fps);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
FPSContext *s = ctx->priv;
- int ret;
-
- if ((ret = av_parse_video_rate(&s->framerate, s->fps)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error parsing framerate %s.\n", s->fps);
- return ret;
- }
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*))))
return AVERROR(ENOMEM);
+ s->pts = AV_NOPTS_VALUE;
+ s->first_pts = AV_NOPTS_VALUE;
+
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den);
return 0;
}
@@ -118,7 +120,6 @@ static int config_props(AVFilterLink* link)
link->frame_rate= s->framerate;
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
- s->pts = AV_NOPTS_VALUE;
return 0;
}
@@ -184,7 +185,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (ret < 0)
return ret;
- s->first_pts = s->pts = buf->pts;
+ if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
+ double first_pts = s->start_time * AV_TIME_BASE;
+ first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
+ s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
+ inlink->time_base);
+ av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
+ s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
+ outlink->time_base));
+ } else {
+ s->first_pts = s->pts = buf->pts;
+ }
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
"timestamp.\n");
@@ -195,7 +206,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
/* now wait for the next timestamp */
- if (buf->pts == AV_NOPTS_VALUE) {
+ if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
return write_to_fifo(s->fifo, buf);
}
@@ -263,8 +274,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static const AVFilterPad avfilter_vf_fps_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -280,19 +291,13 @@ static const AVFilterPad avfilter_vf_fps_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "fps", "round", NULL };
-
-AVFilter avfilter_vf_fps = {
+AVFilter ff_vf_fps = {
.name = "fps",
- .description = NULL_IF_CONFIG_SMALL("Force constant framerate"),
-
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(FPSContext),
-
- .inputs = avfilter_vf_fps_inputs,
- .outputs = avfilter_vf_fps_outputs,
- .priv_class = &fps_class,
- .shorthand = shorthand,
+ .description = NULL_IF_CONFIG_SMALL("Force constant framerate."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(FPSContext),
+ .priv_class = &fps_class,
+ .inputs = avfilter_vf_fps_inputs,
+ .outputs = avfilter_vf_fps_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_framestep.c b/ffmpeg/libavfilter/vf_framestep.c
index ca68df6..9087f46 100644
--- a/ffmpeg/libavfilter/vf_framestep.c
+++ b/ffmpeg/libavfilter/vf_framestep.c
@@ -23,32 +23,25 @@
* Daniele Fornighieri <guru AT digitalfantasy it>.
*/
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct {
- int frame_step, frame_count, frame_selected;
+ const AVClass *class;
+ int frame_step;
} FrameStepContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- FrameStepContext *framestep = ctx->priv;
- char *tailptr;
- long int n = 1;
+#define OFFSET(x) offsetof(FrameStepContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
- if (args) {
- n = strtol(args, &tailptr, 10);
- if (*tailptr || n <= 0 || n >= INT_MAX) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid argument '%s', must be a positive integer <= INT_MAX\n", args);
- return AVERROR(EINVAL);
- }
- }
+static const AVOption framestep_options[] = {
+ { "step", "set frame step", OFFSET(frame_step), AV_OPT_TYPE_INT, {.i64=1}, 1, INT_MAX, FLAGS},
+ { NULL },
+};
- framestep->frame_step = n;
- return 0;
-}
+AVFILTER_DEFINE_CLASS(framestep);
static int config_output_props(AVFilterLink *outlink)
{
@@ -56,6 +49,7 @@ static int config_output_props(AVFilterLink *outlink)
FrameStepContext *framestep = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->frame_rate =
av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1});
@@ -70,55 +64,38 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
FrameStepContext *framestep = inlink->dst->priv;
- if (!(framestep->frame_count++ % framestep->frame_step)) {
- framestep->frame_selected = 1;
+ if (!(inlink->frame_count % framestep->frame_step)) {
return ff_filter_frame(inlink->dst->outputs[0], ref);
} else {
- framestep->frame_selected = 0;
av_frame_free(&ref);
return 0;
}
}
-static int request_frame(AVFilterLink *outlink)
-{
- FrameStepContext *framestep = outlink->src->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
- int ret;
-
- framestep->frame_selected = 0;
- do {
- ret = ff_request_frame(inlink);
- } while (!framestep->frame_selected && ret >= 0);
-
- return ret;
-}
-
static const AVFilterPad framestep_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad framestep_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_output_props,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output_props,
},
{ NULL }
};
-AVFilter avfilter_vf_framestep = {
- .name = "framestep",
+AVFilter ff_vf_framestep = {
+ .name = "framestep",
.description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."),
- .init = init,
- .priv_size = sizeof(FrameStepContext),
- .inputs = framestep_inputs,
- .outputs = framestep_outputs,
+ .priv_size = sizeof(FrameStepContext),
+ .priv_class = &framestep_class,
+ .inputs = framestep_inputs,
+ .outputs = framestep_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_frei0r.c b/ffmpeg/libavfilter/vf_frei0r.c
index d79dac1..a070eb4 100644
--- a/ffmpeg/libavfilter/vf_frei0r.c
+++ b/ffmpeg/libavfilter/vf_frei0r.c
@@ -22,8 +22,6 @@
* frei0r wrapper
*/
-/* #define DEBUG */
-
#include <dlfcn.h>
#include <frei0r.h>
#include <stdio.h>
@@ -35,6 +33,7 @@
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
+#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "formats.h"
@@ -53,6 +52,7 @@ typedef void (*f0r_set_param_value_f)(f0r_instance_t instance, f0r_param_t param
typedef void (*f0r_get_param_value_f)(f0r_instance_t instance, f0r_param_t param, int param_index);
typedef struct Frei0rContext {
+ const AVClass *class;
f0r_update_f update;
void *dl_handle; /* dynamic library handle */
f0r_instance_t instance;
@@ -64,7 +64,10 @@ typedef struct Frei0rContext {
f0r_construct_f construct;
f0r_destruct_f destruct;
f0r_deinit_f deinit;
- char params[256];
+
+ char *dl_name;
+ char *params;
+ AVRational framerate;
/* only used by the source */
int w, h;
@@ -74,8 +77,8 @@ typedef struct Frei0rContext {
static void *load_sym(AVFilterContext *ctx, const char *sym_name)
{
- Frei0rContext *frei0r = ctx->priv;
- void *sym = dlsym(frei0r->dl_handle, sym_name);
+ Frei0rContext *s = ctx->priv;
+ void *sym = dlsym(s->dl_handle, sym_name);
if (!sym)
av_log(ctx, AV_LOG_ERROR, "Could not find symbol '%s' in loaded module\n", sym_name);
return sym;
@@ -83,7 +86,7 @@ static void *load_sym(AVFilterContext *ctx, const char *sym_name)
static int set_param(AVFilterContext *ctx, f0r_param_info_t info, int index, char *param)
{
- Frei0rContext *frei0r = ctx->priv;
+ Frei0rContext *s = ctx->priv;
union {
double d;
f0r_param_color_t col;
@@ -121,7 +124,7 @@ static int set_param(AVFilterContext *ctx, f0r_param_info_t info, int index, cha
break;
}
- frei0r->set_param_value(frei0r->instance, &val, index);
+ s->set_param_value(s->instance, &val, index);
return 0;
fail:
@@ -132,20 +135,24 @@ fail:
static int set_params(AVFilterContext *ctx, const char *params)
{
- Frei0rContext *frei0r = ctx->priv;
+ Frei0rContext *s = ctx->priv;
int i;
- for (i = 0; i < frei0r->plugin_info.num_params; i++) {
+ if (!params)
+ return 0;
+
+ for (i = 0; i < s->plugin_info.num_params; i++) {
f0r_param_info_t info;
char *param;
int ret;
- frei0r->get_param_info(&info, i);
+ s->get_param_info(&info, i);
if (*params) {
- if (!(param = av_get_token(&params, ":")))
+ if (!(param = av_get_token(&params, "|")))
return AVERROR(ENOMEM);
- params++; /* skip ':' */
+ if (*params)
+ params++; /* skip ':' */
ret = set_param(ctx, info, i, param);
av_free(param);
if (ret < 0)
@@ -173,27 +180,27 @@ static int set_params(AVFilterContext *ctx, const char *params)
case F0R_PARAM_BOOL:
v = &d;
- frei0r->get_param_value(frei0r->instance, v, i);
+ s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%s", d >= 0.5 && d <= 1.0 ? "y" : "n");
break;
case F0R_PARAM_DOUBLE:
v = &d;
- frei0r->get_param_value(frei0r->instance, v, i);
+ s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%f", d);
break;
case F0R_PARAM_COLOR:
v = &col;
- frei0r->get_param_value(frei0r->instance, v, i);
+ s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%f/%f/%f", col.r, col.g, col.b);
break;
case F0R_PARAM_POSITION:
v = &pos;
- frei0r->get_param_value(frei0r->instance, v, i);
+ s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "%f/%f", pos.x, pos.y);
break;
default: /* F0R_PARAM_STRING */
v = s;
- frei0r->get_param_value(frei0r->instance, v, i);
+ s->get_param_value(s->instance, v, i);
av_log(ctx, AV_LOG_DEBUG, "'%s'\n", s);
break;
}
@@ -218,13 +225,18 @@ static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix
static av_cold int frei0r_init(AVFilterContext *ctx,
const char *dl_name, int type)
{
- Frei0rContext *frei0r = ctx->priv;
+ Frei0rContext *s = ctx->priv;
f0r_init_f f0r_init;
f0r_get_plugin_info_f f0r_get_plugin_info;
f0r_plugin_info_t *pi;
char *path;
int ret = 0;
+ if (!dl_name) {
+ av_log(ctx, AV_LOG_ERROR, "No filter name provided.\n");
+ return AVERROR(EINVAL);
+ }
+
/* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */
if ((path = av_strdup(getenv("FREI0R_PATH")))) {
#ifdef _WIN32
@@ -240,11 +252,11 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
ret = AVERROR(ENOMEM);
goto check_path_end;
}
- ret = load_path(ctx, &frei0r->dl_handle, p1, dl_name);
+ ret = load_path(ctx, &s->dl_handle, p1, dl_name);
av_free(p1);
if (ret < 0)
goto check_path_end;
- if (frei0r->dl_handle)
+ if (s->dl_handle)
break;
}
@@ -253,39 +265,39 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
if (ret < 0)
return ret;
}
- if (!frei0r->dl_handle && (path = getenv("HOME"))) {
+ if (!s->dl_handle && (path = getenv("HOME"))) {
char *prefix = av_asprintf("%s/.frei0r-1/lib/", path);
if (!prefix)
return AVERROR(ENOMEM);
- ret = load_path(ctx, &frei0r->dl_handle, prefix, dl_name);
+ ret = load_path(ctx, &s->dl_handle, prefix, dl_name);
av_free(prefix);
if (ret < 0)
return ret;
}
- if (!frei0r->dl_handle) {
- ret = load_path(ctx, &frei0r->dl_handle, "/usr/local/lib/frei0r-1/", dl_name);
+ if (!s->dl_handle) {
+ ret = load_path(ctx, &s->dl_handle, "/usr/local/lib/frei0r-1/", dl_name);
if (ret < 0)
return ret;
}
- if (!frei0r->dl_handle) {
- ret = load_path(ctx, &frei0r->dl_handle, "/usr/lib/frei0r-1/", dl_name);
+ if (!s->dl_handle) {
+ ret = load_path(ctx, &s->dl_handle, "/usr/lib/frei0r-1/", dl_name);
if (ret < 0)
return ret;
}
- if (!frei0r->dl_handle) {
+ if (!s->dl_handle) {
av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'\n", dl_name);
return AVERROR(EINVAL);
}
if (!(f0r_init = load_sym(ctx, "f0r_init" )) ||
!(f0r_get_plugin_info = load_sym(ctx, "f0r_get_plugin_info")) ||
- !(frei0r->get_param_info = load_sym(ctx, "f0r_get_param_info" )) ||
- !(frei0r->get_param_value = load_sym(ctx, "f0r_get_param_value")) ||
- !(frei0r->set_param_value = load_sym(ctx, "f0r_set_param_value")) ||
- !(frei0r->update = load_sym(ctx, "f0r_update" )) ||
- !(frei0r->construct = load_sym(ctx, "f0r_construct" )) ||
- !(frei0r->destruct = load_sym(ctx, "f0r_destruct" )) ||
- !(frei0r->deinit = load_sym(ctx, "f0r_deinit" )))
+ !(s->get_param_info = load_sym(ctx, "f0r_get_param_info" )) ||
+ !(s->get_param_value = load_sym(ctx, "f0r_get_param_value")) ||
+ !(s->set_param_value = load_sym(ctx, "f0r_set_param_value")) ||
+ !(s->update = load_sym(ctx, "f0r_update" )) ||
+ !(s->construct = load_sym(ctx, "f0r_construct" )) ||
+ !(s->destruct = load_sym(ctx, "f0r_destruct" )) ||
+ !(s->deinit = load_sym(ctx, "f0r_deinit" )))
return AVERROR(EINVAL);
if (f0r_init() < 0) {
@@ -293,8 +305,8 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
return AVERROR(EINVAL);
}
- f0r_get_plugin_info(&frei0r->plugin_info);
- pi = &frei0r->plugin_info;
+ f0r_get_plugin_info(&s->plugin_info);
+ pi = &s->plugin_info;
if (pi->plugin_type != type) {
av_log(ctx, AV_LOG_ERROR,
"Invalid type '%s' for the plugin\n",
@@ -317,53 +329,48 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
return 0;
}
-static av_cold int filter_init(AVFilterContext *ctx, const char *args)
+static av_cold int filter_init(AVFilterContext *ctx)
{
- Frei0rContext *frei0r = ctx->priv;
- char dl_name[1024], c;
- *frei0r->params = 0;
+ Frei0rContext *s = ctx->priv;
- if (args)
- sscanf(args, "%1023[^:=]%c%255c", dl_name, &c, frei0r->params);
-
- return frei0r_init(ctx, dl_name, F0R_PLUGIN_TYPE_FILTER);
+ return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_FILTER);
}
static av_cold void uninit(AVFilterContext *ctx)
{
- Frei0rContext *frei0r = ctx->priv;
-
- if (frei0r->destruct && frei0r->instance)
- frei0r->destruct(frei0r->instance);
- if (frei0r->deinit)
- frei0r->deinit();
- if (frei0r->dl_handle)
- dlclose(frei0r->dl_handle);
-
- memset(frei0r, 0, sizeof(*frei0r));
+ Frei0rContext *s = ctx->priv;
+
+ if (s->destruct && s->instance)
+ s->destruct(s->instance);
+ if (s->deinit)
+ s->deinit();
+ if (s->dl_handle)
+ dlclose(s->dl_handle);
}
static int config_input_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- Frei0rContext *frei0r = ctx->priv;
+ Frei0rContext *s = ctx->priv;
- if (!(frei0r->instance = frei0r->construct(inlink->w, inlink->h))) {
+ if (s->destruct && s->instance)
+ s->destruct(s->instance);
+ if (!(s->instance = s->construct(inlink->w, inlink->h))) {
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n");
return AVERROR(EINVAL);
}
- return set_params(ctx, frei0r->params);
+ return set_params(ctx, s->params);
}
static int query_formats(AVFilterContext *ctx)
{
- Frei0rContext *frei0r = ctx->priv;
+ Frei0rContext *s = ctx->priv;
AVFilterFormats *formats = NULL;
- if (frei0r->plugin_info.color_model == F0R_COLOR_MODEL_BGRA8888) {
+ if (s->plugin_info.color_model == F0R_COLOR_MODEL_BGRA8888) {
ff_add_format(&formats, AV_PIX_FMT_BGRA);
- } else if (frei0r->plugin_info.color_model == F0R_COLOR_MODEL_RGBA8888) {
+ } else if (s->plugin_info.color_model == F0R_COLOR_MODEL_RGBA8888) {
ff_add_format(&formats, AV_PIX_FMT_RGBA);
} else { /* F0R_COLOR_MODEL_PACKED32 */
static const enum AVPixelFormat pix_fmts[] = {
@@ -381,7 +388,7 @@ static int query_formats(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- Frei0rContext *frei0r = inlink->dst->priv;
+ Frei0rContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
@@ -392,7 +399,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- frei0r->update(frei0r->instance, in->pts * av_q2d(inlink->time_base) * 1000,
+ s->update(s->instance, in->pts * av_q2d(inlink->time_base) * 1000,
(const uint32_t *)in->data[0],
(uint32_t *)out->data[0]);
@@ -401,6 +408,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(Frei0rContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption frei0r_options[] = {
+ { "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(frei0r);
+
static const AVFilterPad avfilter_vf_frei0r_inputs[] = {
{
.name = "default",
@@ -419,87 +436,77 @@ static const AVFilterPad avfilter_vf_frei0r_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_frei0r = {
- .name = "frei0r",
- .description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
-
+AVFilter ff_vf_frei0r = {
+ .name = "frei0r",
+ .description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
.query_formats = query_formats,
- .init = filter_init,
- .uninit = uninit,
-
- .priv_size = sizeof(Frei0rContext),
-
- .inputs = avfilter_vf_frei0r_inputs,
-
- .outputs = avfilter_vf_frei0r_outputs,
+ .init = filter_init,
+ .uninit = uninit,
+ .priv_size = sizeof(Frei0rContext),
+ .priv_class = &frei0r_class,
+ .inputs = avfilter_vf_frei0r_inputs,
+ .outputs = avfilter_vf_frei0r_outputs,
};
-static av_cold int source_init(AVFilterContext *ctx, const char *args)
+static av_cold int source_init(AVFilterContext *ctx)
{
- Frei0rContext *frei0r = ctx->priv;
- char dl_name[1024], c;
- char frame_size[128] = "";
- char frame_rate[128] = "";
- AVRational frame_rate_q;
-
- memset(frei0r->params, 0, sizeof(frei0r->params));
-
- if (args)
- sscanf(args, "%127[^:]:%127[^:]:%1023[^:=]%c%255c",
- frame_size, frame_rate, dl_name, &c, frei0r->params);
-
- if (av_parse_video_size(&frei0r->w, &frei0r->h, frame_size) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'\n", frame_size);
- return AVERROR(EINVAL);
- }
+ Frei0rContext *s = ctx->priv;
- if (av_parse_video_rate(&frame_rate_q, frame_rate) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", frame_rate);
- return AVERROR(EINVAL);
- }
- frei0r->time_base.num = frame_rate_q.den;
- frei0r->time_base.den = frame_rate_q.num;
+ s->time_base.num = s->framerate.den;
+ s->time_base.den = s->framerate.num;
- return frei0r_init(ctx, dl_name, F0R_PLUGIN_TYPE_SOURCE);
+ return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_SOURCE);
}
static int source_config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
- Frei0rContext *frei0r = ctx->priv;
+ Frei0rContext *s = ctx->priv;
- if (av_image_check_size(frei0r->w, frei0r->h, 0, ctx) < 0)
+ if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
return AVERROR(EINVAL);
- outlink->w = frei0r->w;
- outlink->h = frei0r->h;
- outlink->time_base = frei0r->time_base;
+ outlink->w = s->w;
+ outlink->h = s->h;
+ outlink->time_base = s->time_base;
outlink->sample_aspect_ratio = (AVRational){1,1};
- if (!(frei0r->instance = frei0r->construct(outlink->w, outlink->h))) {
+ if (s->destruct && s->instance)
+ s->destruct(s->instance);
+ if (!(s->instance = s->construct(outlink->w, outlink->h))) {
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n");
return AVERROR(EINVAL);
}
- return set_params(ctx, frei0r->params);
+ return set_params(ctx, s->params);
}
static int source_request_frame(AVFilterLink *outlink)
{
- Frei0rContext *frei0r = outlink->src->priv;
+ Frei0rContext *s = outlink->src->priv;
AVFrame *frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!frame)
return AVERROR(ENOMEM);
frame->sample_aspect_ratio = (AVRational) {1, 1};
- frame->pts = frei0r->pts++;
+ frame->pts = s->pts++;
- frei0r->update(frei0r->instance, av_rescale_q(frame->pts, frei0r->time_base, (AVRational){1,1000}),
+ s->update(s->instance, av_rescale_q(frame->pts, s->time_base, (AVRational){1,1000}),
NULL, (uint32_t *)frame->data[0]);
return ff_filter_frame(outlink, frame);
}
+static const AVOption frei0r_src_options[] = {
+ { "size", "Dimensions of the generated video.", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, { .str = "320x240" }, .flags = FLAGS },
+ { "framerate", NULL, OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = FLAGS },
+ { "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(frei0r_src);
+
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
{
.name = "default",
@@ -510,17 +517,14 @@ static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vsrc_frei0r_src = {
- .name = "frei0r_src",
- .description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
-
- .priv_size = sizeof(Frei0rContext),
- .init = source_init,
- .uninit = uninit,
-
+AVFilter ff_vsrc_frei0r_src = {
+ .name = "frei0r_src",
+ .description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
+ .priv_size = sizeof(Frei0rContext),
+ .priv_class = &frei0r_src_class,
+ .init = source_init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_frei0r_src_outputs,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_frei0r_src_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_geq.c b/ffmpeg/libavfilter/vf_geq.c
index 373f0f0..49a3e62 100644
--- a/ffmpeg/libavfilter/vf_geq.c
+++ b/ffmpeg/libavfilter/vf_geq.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
- * Copyright (C) 2012 Clément Bœsch <ubitux@gmail.com>
+ * Copyright (C) 2012 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
@@ -35,21 +35,33 @@
typedef struct {
const AVClass *class;
AVExpr *e[4]; ///< expressions for each plane
- char *expr_str[4]; ///< expression strings for each plane
- int framenum; ///< frame counter
+ char *expr_str[4+3]; ///< expression strings for each plane
AVFrame *picref; ///< current input buffer
int hsub, vsub; ///< chroma subsampling
int planes; ///< number of planes
+ int is_rgb;
} GEQContext;
+enum { Y = 0, U, V, A, G, B, R };
+
#define OFFSET(x) offsetof(GEQContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption geq_options[] = {
- { "lum_expr", "set luminance expression", OFFSET(expr_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "cb_expr", "set chroma blue expression", OFFSET(expr_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "cr_expr", "set chroma red expression", OFFSET(expr_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "alpha_expr", "set alpha expression", OFFSET(expr_str[3]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{NULL},
};
@@ -62,8 +74,8 @@ static inline double getpix(void *priv, double x, double y, int plane)
AVFrame *picref = geq->picref;
const uint8_t *src = picref->data[plane];
const int linesize = picref->linesize[plane];
- const int w = picref->width >> ((plane == 1 || plane == 2) ? geq->hsub : 0);
- const int h = picref->height >> ((plane == 1 || plane == 2) ? geq->vsub : 0);
+ const int w = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
+ const int h = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
if (!src)
return 0;
@@ -88,48 +100,59 @@ static double alpha(void *priv, double x, double y) { return getpix(priv, x, y,
static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_SW, VAR_SH, VAR_T, VAR_VARS_NB };
-static av_cold int geq_init(AVFilterContext *ctx, const char *args)
+static av_cold int geq_init(AVFilterContext *ctx)
{
GEQContext *geq = ctx->priv;
int plane, ret = 0;
- static const char *shorthand[] = { "lum_expr", "cb_expr", "cr_expr", "alpha_expr", NULL };
-
- geq->class = &geq_class;
- av_opt_set_defaults(geq);
- if ((ret = av_opt_set_from_string(geq, args, shorthand, "=", ":")) < 0)
- return ret;
+ if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
+ av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ geq->is_rgb = !geq->expr_str[Y];
- if (!geq->expr_str[0]) {
- av_log(ctx, AV_LOG_ERROR, "Luminance expression is mandatory\n");
+ if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
+ av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
ret = AVERROR(EINVAL);
goto end;
}
- if (!geq->expr_str[1] && !geq->expr_str[2]) {
+ if (!geq->expr_str[U] && !geq->expr_str[V]) {
/* No chroma at all: fallback on luma */
- geq->expr_str[1] = av_strdup(geq->expr_str[0]);
- geq->expr_str[2] = av_strdup(geq->expr_str[0]);
+ geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
+ geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
} else {
/* One chroma unspecified, fallback on the other */
- if (!geq->expr_str[1]) geq->expr_str[1] = av_strdup(geq->expr_str[2]);
- if (!geq->expr_str[2]) geq->expr_str[2] = av_strdup(geq->expr_str[1]);
+ if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
+ if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
}
- if (!geq->expr_str[3])
- geq->expr_str[3] = av_strdup("255");
-
- if (!geq->expr_str[1] || !geq->expr_str[2] || !geq->expr_str[3]) {
+ if (!geq->expr_str[A])
+ geq->expr_str[A] = av_strdup("255");
+ if (!geq->expr_str[G])
+ geq->expr_str[G] = av_strdup("g(X,Y)");
+ if (!geq->expr_str[B])
+ geq->expr_str[B] = av_strdup("b(X,Y)");
+ if (!geq->expr_str[R])
+ geq->expr_str[R] = av_strdup("r(X,Y)");
+
+ if (geq->is_rgb ?
+ (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
+ :
+ (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
ret = AVERROR(ENOMEM);
goto end;
}
for (plane = 0; plane < 4; plane++) {
static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
- static const char *const func2_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
+ static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
+ static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
+ const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
- ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane], var_names,
+ ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
NULL, NULL, func2_names, func2, 0, ctx);
if (ret < 0)
break;
@@ -141,14 +164,22 @@ end:
static int geq_query_formats(AVFilterContext *ctx)
{
- static const enum PixelFormat pix_fmts[] = {
+ GEQContext *geq = ctx->priv;
+ static const enum PixelFormat yuv_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ static const enum PixelFormat rgb_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_NONE
+ };
+ if (geq->is_rgb) {
+ ff_set_common_formats(ctx, ff_make_format_list(rgb_pix_fmts));
+ } else
+ ff_set_common_formats(ctx, ff_make_format_list(yuv_pix_fmts));
return 0;
}
@@ -170,7 +201,7 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
double values[VAR_VARS_NB] = {
- [VAR_N] = geq->framenum++,
+ [VAR_N] = inlink->frame_count,
[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
};
@@ -186,8 +217,8 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
int x, y;
uint8_t *dst = out->data[plane];
const int linesize = out->linesize[plane];
- const int w = inlink->w >> ((plane == 1 || plane == 2) ? geq->hsub : 0);
- const int h = inlink->h >> ((plane == 1 || plane == 2) ? geq->vsub : 0);
+ const int w = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
+ const int h = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
values[VAR_W] = w;
values[VAR_H] = h;
@@ -215,7 +246,6 @@ static av_cold void geq_uninit(AVFilterContext *ctx)
for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
av_expr_free(geq->e[i]);
- av_opt_free(geq);
}
static const AVFilterPad geq_inputs[] = {
@@ -236,7 +266,7 @@ static const AVFilterPad geq_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_geq = {
+AVFilter ff_vf_geq = {
.name = "geq",
.description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
.priv_size = sizeof(GEQContext),
@@ -246,4 +276,5 @@ AVFilter avfilter_vf_geq = {
.inputs = geq_inputs,
.outputs = geq_outputs,
.priv_class = &geq_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_gradfun.c b/ffmpeg/libavfilter/vf_gradfun.c
index 9e37761..0da9e0b 100644
--- a/ffmpeg/libavfilter/vf_gradfun.c
+++ b/ffmpeg/libavfilter/vf_gradfun.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
- * Copyright (c) 2009 Loren Merritt <lorenm@u.washignton.edu>
+ * Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
@@ -25,7 +25,7 @@
* libmpcodecs/vf_gradfun.c
*
* Apply a boxblur debanding algorithm (based on the gradfun2db
- * Avisynth filter by prunedtree).
+ * AviSynth filter by prunedtree).
* Foreach pixel, if it's within threshold of the blurred value, make it closer.
* So now we have a smoothed and higher bitdepth version of all the shallow
* gradients, while leaving detailed areas untouched.
@@ -35,25 +35,14 @@
#include "libavutil/imgutils.h"
#include "libavutil/common.h"
#include "libavutil/cpu.h"
-#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "gradfun.h"
#include "internal.h"
#include "video.h"
-#define OFFSET(x) offsetof(GradFunContext, x)
-#define F AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-static const AVOption gradfun_options[] = {
- { "strength", "set the maximum amount by which the filter will change any one pixel", OFFSET(strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.2}, 0.51, 64, F },
- { "radius", "set the neighborhood to fit the gradient to", OFFSET(radius), AV_OPT_TYPE_INT, {.i64 = 16}, 4, 32, F },
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(gradfun);
-
DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
{0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E},
{0x40,0x20,0x58,0x38,0x46,0x26,0x5E,0x3E},
@@ -129,48 +118,41 @@ static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int wi
ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
if (++y >= height) break;
}
+ emms_c();
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- int ret;
- GradFunContext *gf = ctx->priv;
- static const char *shorthand[] = { "strength", "radius", NULL };
-
- gf->class = &gradfun_class;
- av_opt_set_defaults(gf);
+ GradFunContext *s = ctx->priv;
- if ((ret = av_opt_set_from_string(gf, args, shorthand, "=", ":")) < 0)
- return ret;
+ s->thresh = (1 << 15) / s->strength;
+ s->radius = av_clip((s->radius + 1) & ~1, 4, 32);
- gf->thresh = (1 << 15) / gf->strength;
- gf->radius = av_clip((gf->radius + 1) & ~1, 4, 32);
-
- gf->blur_line = ff_gradfun_blur_line_c;
- gf->filter_line = ff_gradfun_filter_line_c;
+ s->blur_line = ff_gradfun_blur_line_c;
+ s->filter_line = ff_gradfun_filter_line_c;
if (ARCH_X86)
- ff_gradfun_init_x86(gf);
+ ff_gradfun_init_x86(s);
- av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", gf->strength, gf->radius);
+ av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", s->strength, s->radius);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
- GradFunContext *gf = ctx->priv;
- av_freep(&gf->buf);
+ GradFunContext *s = ctx->priv;
+ av_freep(&s->buf);
}
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12,
- AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_GBRP,
AV_PIX_FMT_NONE
};
@@ -181,25 +163,26 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
{
- GradFunContext *gf = inlink->dst->priv;
+ GradFunContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int hsub = desc->log2_chroma_w;
int vsub = desc->log2_chroma_h;
- gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t));
- if (!gf->buf)
+ av_freep(&s->buf);
+ s->buf = av_calloc((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32), sizeof(*s->buf));
+ if (!s->buf)
return AVERROR(ENOMEM);
- gf->chroma_w = -((-inlink->w) >> hsub);
- gf->chroma_h = -((-inlink->h) >> vsub);
- gf->chroma_r = av_clip(((((gf->radius >> hsub) + (gf->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
+ s->chroma_w = FF_CEIL_RSHIFT(inlink->w, hsub);
+ s->chroma_h = FF_CEIL_RSHIFT(inlink->h, vsub);
+ s->chroma_r = av_clip(((((s->radius >> hsub) + (s->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- GradFunContext *gf = inlink->dst->priv;
+ GradFunContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int p, direct;
@@ -217,18 +200,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
}
- for (p = 0; p < 4 && in->data[p]; p++) {
+ for (p = 0; p < 4 && in->data[p] && in->linesize[p]; p++) {
int w = inlink->w;
int h = inlink->h;
- int r = gf->radius;
+ int r = s->radius;
if (p) {
- w = gf->chroma_w;
- h = gf->chroma_h;
- r = gf->chroma_r;
+ w = s->chroma_w;
+ h = s->chroma_h;
+ r = s->chroma_r;
}
if (FFMIN(w, h) > 2 * r)
- filter(gf, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r);
+ filter(s, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r);
else if (out->data[p] != in->data[p])
av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h);
}
@@ -239,6 +222,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(GradFunContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption gradfun_options[] = {
+ { "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
+ { "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(gradfun);
+
static const AVFilterPad avfilter_vf_gradfun_inputs[] = {
{
.name = "default",
@@ -257,14 +251,15 @@ static const AVFilterPad avfilter_vf_gradfun_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_gradfun = {
+AVFilter ff_vf_gradfun = {
.name = "gradfun",
.description = NULL_IF_CONFIG_SMALL("Debands video quickly using gradients."),
.priv_size = sizeof(GradFunContext),
+ .priv_class = &gradfun_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_vf_gradfun_inputs,
.outputs = avfilter_vf_gradfun_outputs,
- .priv_class = &gradfun_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_hflip.c b/ffmpeg/libavfilter/vf_hflip.c
index cb51981..19b6606 100644
--- a/ffmpeg/libavfilter/vf_hflip.c
+++ b/ffmpeg/libavfilter/vf_hflip.c
@@ -37,7 +37,8 @@
typedef struct {
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
- int hsub, vsub; ///< chroma subsampling
+ int planewidth[4]; ///< width of each plane
+ int planeheight[4]; ///< height of each plane
} FlipContext;
static int query_formats(AVFilterContext *ctx)
@@ -47,8 +48,8 @@ static int query_formats(AVFilterContext *ctx)
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
- if (!(desc->flags & PIX_FMT_HWACCEL ||
- desc->flags & PIX_FMT_BITSTREAM ||
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
(desc->log2_chroma_w != desc->log2_chroma_h &&
desc->comp[0].plane == desc->comp[1].plane)))
ff_add_format(&pix_fmts, fmt);
@@ -60,47 +61,47 @@ static int query_formats(AVFilterContext *ctx)
static int config_props(AVFilterLink *inlink)
{
- FlipContext *flip = inlink->dst->priv;
+ FlipContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int hsub = pix_desc->log2_chroma_w;
+ const int vsub = pix_desc->log2_chroma_h;
- av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc);
- flip->hsub = pix_desc->log2_chroma_w;
- flip->vsub = pix_desc->log2_chroma_h;
+ av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, hsub);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
- AVFilterContext *ctx = inlink->dst;
- FlipContext *flip = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *out;
+ FlipContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
uint8_t *inrow, *outrow;
- int i, j, plane, step, hsub, vsub;
-
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
- av_frame_copy_props(out, in);
+ int i, j, plane, step;
- /* copy palette if required */
- if (av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_PAL)
- memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ const int width = s->planewidth[plane];
+ const int height = s->planeheight[plane];
+ const int start = (height * job ) / nb_jobs;
+ const int end = (height * (job+1)) / nb_jobs;
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
- step = flip->max_step[plane];
- hsub = (plane == 1 || plane == 2) ? flip->hsub : 0;
- vsub = (plane == 1 || plane == 2) ? flip->vsub : 0;
+ step = s->max_step[plane];
- outrow = out->data[plane];
- inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step;
- for (i = 0; i < in->height >> vsub; i++) {
+ outrow = out->data[plane] + start * out->linesize[plane];
+ inrow = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step;
+ for (i = start; i < end; i++) {
switch (step) {
case 1:
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow[j] = inrow[-j];
break;
@@ -108,7 +109,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint16_t *outrow16 = (uint16_t *)outrow;
uint16_t * inrow16 = (uint16_t *) inrow;
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow16[j] = inrow16[-j];
}
break;
@@ -117,7 +118,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint8_t *in = inrow;
uint8_t *out = outrow;
- for (j = 0; j < (inlink->w >> hsub); j++, out += 3, in -= 3) {
+ for (j = 0; j < width; j++, out += 3, in -= 3) {
int32_t v = AV_RB24(in);
AV_WB24(out, v);
}
@@ -128,13 +129,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint32_t *outrow32 = (uint32_t *)outrow;
uint32_t * inrow32 = (uint32_t *) inrow;
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow32[j] = inrow32[-j];
}
break;
default:
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
memcpy(outrow + j*step, inrow - j*step, step);
}
@@ -143,6 +144,30 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ /* copy palette if required */
+ if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
+ memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
+
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@@ -165,12 +190,12 @@ static const AVFilterPad avfilter_vf_hflip_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_hflip = {
- .name = "hflip",
- .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
- .priv_size = sizeof(FlipContext),
+AVFilter ff_vf_hflip = {
+ .name = "hflip",
+ .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
+ .priv_size = sizeof(FlipContext),
.query_formats = query_formats,
-
- .inputs = avfilter_vf_hflip_inputs,
- .outputs = avfilter_vf_hflip_outputs,
+ .inputs = avfilter_vf_hflip_inputs,
+ .outputs = avfilter_vf_hflip_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/ffmpeg/libavfilter/vf_histeq.c b/ffmpeg/libavfilter/vf_histeq.c
index a9cb60e..6fdb7be 100644
--- a/ffmpeg/libavfilter/vf_histeq.c
+++ b/ffmpeg/libavfilter/vf_histeq.c
@@ -58,7 +58,6 @@ typedef struct {
float strength;
float intensity;
enum HisteqAntibanding antibanding;
- char* antibanding_str;
int in_histogram [256]; ///< input histogram
int out_histogram[256]; ///< output histogram
int LUT[256]; ///< lookup table derived from histogram[]
@@ -82,17 +81,9 @@ static const AVOption histeq_options[] = {
AVFILTER_DEFINE_CLASS(histeq);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
HisteqContext *histeq = ctx->priv;
- const char *shorthand[] = { "strength", "intensity", "antibanding", NULL };
- int ret;
-
- histeq->class = &histeq_class;
- av_opt_set_defaults(histeq);
-
- if ((ret = av_opt_set_from_string(histeq, args, shorthand, "=", ":")) < 0)
- return ret;
av_log(ctx, AV_LOG_VERBOSE,
"strength:%0.3f intensity:%0.3f antibanding:%d\n",
@@ -101,12 +92,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
-static av_cold void uninit(AVFilterContext *ctx)
-{
- HisteqContext *histeq = ctx->priv;
- av_opt_free(histeq);
-}
-
static int query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
@@ -249,7 +234,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
dst[x + histeq->rgba_map[R]] = r;
dst[x + histeq->rgba_map[G]] = g;
dst[x + histeq->rgba_map[B]] = b;
- oluma = (55 * r + 182 * g + 19 * b) >> 8;
+ oluma = av_clip_uint8((55 * r + 182 * g + 19 * b) >> 8);
histeq->out_histogram[oluma]++;
}
}
@@ -267,31 +252,30 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
static const AVFilterPad histeq_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad histeq_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
-AVFilter avfilter_vf_histeq = {
+AVFilter ff_vf_histeq = {
.name = "histeq",
.description = NULL_IF_CONFIG_SMALL("Apply global color histogram equalization."),
.priv_size = sizeof(HisteqContext),
.init = init,
- .uninit = uninit,
.query_formats = query_formats,
-
.inputs = histeq_inputs,
.outputs = histeq_outputs,
.priv_class = &histeq_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_histogram.c b/ffmpeg/libavfilter/vf_histogram.c
index 37e1986..34656b5 100644
--- a/ffmpeg/libavfilter/vf_histogram.c
+++ b/ffmpeg/libavfilter/vf_histogram.c
@@ -39,7 +39,6 @@ typedef struct HistogramContext {
const AVClass *class; ///< AVClass context for log and options purpose
enum HistogramMode mode;
unsigned histogram[256];
- unsigned max_hval;
int ncomp;
const uint8_t *bg_color;
const uint8_t *fg_color;
@@ -47,7 +46,10 @@ typedef struct HistogramContext {
int scale_height;
int step;
int waveform_mode;
+ int waveform_mirror;
int display_mode;
+ int levels_mode;
+ const AVPixFmtDescriptor *desc;
} HistogramContext;
#define OFFSET(x) offsetof(HistogramContext, x)
@@ -65,28 +67,18 @@ static const AVOption histogram_options[] = {
{ "waveform_mode", "set waveform mode", OFFSET(waveform_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "waveform_mode"},
{ "row", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "waveform_mode" },
{ "column", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "waveform_mode" },
+ { "waveform_mirror", "set waveform mirroring", OFFSET(waveform_mirror), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "waveform_mirror"},
{ "display_mode", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "display_mode"},
{ "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "display_mode" },
{ "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "display_mode" },
- { NULL },
+ { "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"},
+ { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" },
+ { "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(histogram);
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- HistogramContext *h = ctx->priv;
- int ret;
-
- h->class = &histogram_class;
- av_opt_set_defaults(h);
-
- if ((ret = (av_set_options_string(h, args, "=", ":"))) < 0)
- return ret;
-
- return 0;
-}
-
static const enum AVPixelFormat color_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_NONE
@@ -94,7 +86,19 @@ static const enum AVPixelFormat color_pix_fmts[] = {
static const enum AVPixelFormat levels_pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVJ444P,
- AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_NONE
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat waveform_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
};
static int query_formats(AVFilterContext *ctx)
@@ -104,6 +108,8 @@ static int query_formats(AVFilterContext *ctx)
switch (h->mode) {
case MODE_WAVEFORM:
+ pix_fmts = waveform_pix_fmts;
+ break;
case MODE_LEVELS:
pix_fmts = levels_pix_fmts;
break;
@@ -128,11 +134,12 @@ static const uint8_t white_gbrp_color[4] = { 255, 255, 255, 255 };
static int config_input(AVFilterLink *inlink)
{
HistogramContext *h = inlink->dst->priv;
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- h->ncomp = desc->nb_components;
+ h->desc = av_pix_fmt_desc_get(inlink->format);
+ h->ncomp = h->desc->nb_components;
switch (inlink->format) {
+ case AV_PIX_FMT_GBRAP:
case AV_PIX_FMT_GBRP:
h->bg_color = black_gbrp_color;
h->fg_color = white_gbrp_color;
@@ -174,6 +181,53 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
+static void gen_waveform(HistogramContext *h, AVFrame *inpicref, AVFrame *outpicref,
+ int component, int intensity, int offset, int col_mode)
+{
+ const int plane = h->desc->comp[component].plane;
+ const int mirror = h->waveform_mirror;
+ const int is_chroma = (component == 1 || component == 2);
+ const int shift_w = (is_chroma ? h->desc->log2_chroma_w : 0);
+ const int shift_h = (is_chroma ? h->desc->log2_chroma_h : 0);
+ const int src_linesize = inpicref->linesize[plane];
+ const int dst_linesize = outpicref->linesize[plane];
+ const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
+ uint8_t *src_data = inpicref->data[plane];
+ uint8_t *dst_data = outpicref->data[plane] + (col_mode ? (offset >> shift_h) * dst_linesize : offset >> shift_w);
+ uint8_t * const dst_bottom_line = dst_data + dst_linesize * ((256 >> shift_h) - 1);
+ uint8_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
+ const uint8_t max = 255 - intensity;
+ const int src_h = FF_CEIL_RSHIFT(inpicref->height, shift_h);
+ const int src_w = FF_CEIL_RSHIFT(inpicref->width, shift_w);
+ uint8_t *dst, *p;
+ int y;
+
+ if (!col_mode && mirror)
+ dst_data += 256 >> shift_w;
+ for (y = 0; y < src_h; y++) {
+ const uint8_t *src_data_end = src_data + src_w;
+ dst = dst_line;
+ for (p = src_data; p < src_data_end; p++) {
+ uint8_t *target;
+ if (col_mode) {
+ target = dst++ + dst_signed_linesize * (*p >> shift_h);
+ } else {
+ if (mirror)
+ target = dst_data - (*p >> shift_w);
+ else
+ target = dst_data + (*p >> shift_w);
+ }
+ if (*target <= max)
+ *target += intensity;
+ else
+ *target = 255;
+ }
+ src_data += src_linesize;
+ dst_data += dst_linesize;
+ }
+}
+
+
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
HistogramContext *h = inlink->dst->priv;
@@ -182,7 +236,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFrame *out;
const uint8_t *src;
uint8_t *dst;
- int i, j, k, l, ret;
+ int i, j, k, l;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -192,80 +246,69 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
out->pts = in->pts;
- for (k = 0; k < h->ncomp; k++)
- for (i = 0; i < outlink->h; i++)
- memset(out->data[k] + i * out->linesize[k], h->bg_color[k], outlink->w);
+ for (k = 0; k < h->ncomp; k++) {
+ const int is_chroma = (k == 1 || k == 2);
+ const int dst_h = FF_CEIL_RSHIFT(outlink->h, (is_chroma ? h->desc->log2_chroma_h : 0));
+ const int dst_w = FF_CEIL_RSHIFT(outlink->w, (is_chroma ? h->desc->log2_chroma_w : 0));
+ for (i = 0; i < dst_h ; i++)
+ memset(out->data[h->desc->comp[k].plane] +
+ i * out->linesize[h->desc->comp[k].plane],
+ h->bg_color[k], dst_w);
+ }
switch (h->mode) {
case MODE_LEVELS:
for (k = 0; k < h->ncomp; k++) {
- int start = k * (h->level_height + h->scale_height) * h->display_mode;
+ const int p = h->desc->comp[k].plane;
+ const int start = k * (h->level_height + h->scale_height) * h->display_mode;
+ double max_hval_log;
+ unsigned max_hval = 0;
for (i = 0; i < in->height; i++) {
- src = in->data[k] + i * in->linesize[k];
+ src = in->data[p] + i * in->linesize[p];
for (j = 0; j < in->width; j++)
h->histogram[src[j]]++;
}
for (i = 0; i < 256; i++)
- h->max_hval = FFMAX(h->max_hval, h->histogram[i]);
+ max_hval = FFMAX(max_hval, h->histogram[i]);
+ max_hval_log = log2(max_hval + 1);
for (i = 0; i < outlink->w; i++) {
- int col_height = h->level_height - (float)h->histogram[i] / h->max_hval * h->level_height;
+ int col_height;
+
+ if (h->levels_mode)
+ col_height = round(h->level_height * (1. - (log2(h->histogram[i] + 1) / max_hval_log)));
+ else
+ col_height = h->level_height - (h->histogram[i] * (int64_t)h->level_height + max_hval - 1) / max_hval;
for (j = h->level_height - 1; j >= col_height; j--) {
if (h->display_mode) {
for (l = 0; l < h->ncomp; l++)
out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];
} else {
- out->data[k][(j + start) * out->linesize[k] + i] = 255;
+ out->data[p][(j + start) * out->linesize[p] + i] = 255;
}
}
for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
- out->data[k][(j + start) * out->linesize[k] + i] = i;
+ out->data[p][(j + start) * out->linesize[p] + i] = i;
}
memset(h->histogram, 0, 256 * sizeof(unsigned));
- h->max_hval = 0;
}
break;
case MODE_WAVEFORM:
- if (h->waveform_mode) {
- for (k = 0; k < h->ncomp; k++) {
- int offset = k * 256 * h->display_mode;
- for (i = 0; i < inlink->w; i++) {
- for (j = 0; j < inlink->h; j++) {
- int pos = (offset +
- in->data[k][j * in->linesize[k] + i]) *
- out->linesize[k] + i;
- unsigned value = out->data[k][pos];
- value = FFMIN(value + h->step, 255);
- out->data[k][pos] = value;
- }
- }
- }
- } else {
- for (k = 0; k < h->ncomp; k++) {
- int offset = k * 256 * h->display_mode;
- for (i = 0; i < inlink->h; i++) {
- src = in ->data[k] + i * in ->linesize[k];
- dst = out->data[k] + i * out->linesize[k];
- for (j = 0; j < inlink->w; j++) {
- int pos = src[j] + offset;
- unsigned value = dst[pos];
- value = FFMIN(value + h->step, 255);
- dst[pos] = value;
- }
- }
- }
+ for (k = 0; k < h->ncomp; k++) {
+ const int offset = k * 256 * h->display_mode;
+ gen_waveform(h, in, out, k, h->step, offset, h->waveform_mode);
}
break;
case MODE_COLOR:
for (i = 0; i < inlink->h; i++) {
- int iw1 = i * in->linesize[1];
- int iw2 = i * in->linesize[2];
+ const int iw1 = i * in->linesize[1];
+ const int iw2 = i * in->linesize[2];
for (j = 0; j < inlink->w; j++) {
- int pos = in->data[1][iw1 + j] * out->linesize[0] + in->data[2][iw2 + j];
+ const int pos = in->data[1][iw1 + j] * out->linesize[0] + in->data[2][iw2 + j];
if (out->data[0][pos] < 255)
out->data[0][pos]++;
}
@@ -282,12 +325,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
break;
case MODE_COLOR2:
for (i = 0; i < inlink->h; i++) {
- int iw1 = i * in->linesize[1];
- int iw2 = i * in->linesize[2];
+ const int iw1 = i * in->linesize[1];
+ const int iw2 = i * in->linesize[2];
for (j = 0; j < inlink->w; j++) {
- int u = in->data[1][iw1 + j];
- int v = in->data[2][iw2 + j];
- int pos = u * out->linesize[0] + v;
+ const int u = in->data[1][iw1 + j];
+ const int v = in->data[2][iw2 + j];
+ const int pos = u * out->linesize[0] + v;
if (!out->data[0][pos])
out->data[0][pos] = FFABS(128 - u) + FFABS(128 - v);
out->data[1][pos] = u;
@@ -299,18 +342,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_assert0(0);
}
- ret = ff_filter_frame(outlink, out);
av_frame_free(&in);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- HistogramContext *h = ctx->priv;
-
- av_opt_free(h);
+ return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
@@ -332,12 +365,10 @@ static const AVFilterPad outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_histogram = {
+AVFilter ff_vf_histogram = {
.name = "histogram",
.description = NULL_IF_CONFIG_SMALL("Compute and draw a histogram."),
.priv_size = sizeof(HistogramContext),
- .init = init,
- .uninit = uninit,
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
diff --git a/ffmpeg/libavfilter/vf_hqdn3d.c b/ffmpeg/libavfilter/vf_hqdn3d.c
index 8169cc7..518a23d 100644
--- a/ffmpeg/libavfilter/vf_hqdn3d.c
+++ b/ffmpeg/libavfilter/vf_hqdn3d.c
@@ -26,10 +26,15 @@
* libmpcodecs/vf_hqdn3d.c.
*/
+#include <float.h>
+
#include "config.h"
+#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/pixdesc.h"
#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -72,7 +77,7 @@ static void denoise_temporal(uint8_t *src, uint8_t *dst,
}
av_always_inline
-static void denoise_spatial(HQDN3DContext *hqdn3d,
+static void denoise_spatial(HQDN3DContext *s,
uint8_t *src, uint8_t *dst,
uint16_t *line_ant, uint16_t *frame_ant,
int w, int h, int sstride, int dstride,
@@ -98,8 +103,8 @@ static void denoise_spatial(HQDN3DContext *hqdn3d,
src += sstride;
dst += dstride;
frame_ant += w;
- if (hqdn3d->denoise_row[depth]) {
- hqdn3d->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal);
+ if (s->denoise_row[depth]) {
+ s->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal);
continue;
}
pixel_ant = LOAD(0);
@@ -116,7 +121,7 @@ static void denoise_spatial(HQDN3DContext *hqdn3d,
}
av_always_inline
-static void denoise_depth(HQDN3DContext *hqdn3d,
+static void denoise_depth(HQDN3DContext *s,
uint8_t *src, uint8_t *dst,
uint16_t *line_ant, uint16_t **frame_ant_ptr,
int w, int h, int sstride, int dstride,
@@ -137,7 +142,7 @@ static void denoise_depth(HQDN3DContext *hqdn3d,
}
if (spatial[0])
- denoise_spatial(hqdn3d, src, dst, line_ant, frame_ant,
+ denoise_spatial(s, src, dst, line_ant, frame_ant,
w, h, sstride, dstride, spatial, temporal, depth);
else
denoise_temporal(src, dst, frame_ant,
@@ -145,7 +150,7 @@ static void denoise_depth(HQDN3DContext *hqdn3d,
}
#define denoise(...) \
- switch (hqdn3d->depth) {\
+ switch (s->depth) {\
case 8: denoise_depth(__VA_ARGS__, 8); break;\
case 9: denoise_depth(__VA_ARGS__, 9); break;\
case 10: denoise_depth(__VA_ARGS__, 10); break;\
@@ -177,76 +182,38 @@ static int16_t *precalc_coefs(double dist25, int depth)
#define PARAM2_DEFAULT 3.0
#define PARAM3_DEFAULT 6.0
-static int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- HQDN3DContext *hqdn3d = ctx->priv;
- double lum_spac, lum_tmp, chrom_spac, chrom_tmp;
- double param1, param2, param3, param4;
-
- lum_spac = PARAM1_DEFAULT;
- chrom_spac = PARAM2_DEFAULT;
- lum_tmp = PARAM3_DEFAULT;
- chrom_tmp = lum_tmp * chrom_spac / lum_spac;
-
- if (args) {
- switch (sscanf(args, "%lf:%lf:%lf:%lf",
- &param1, &param2, &param3, &param4)) {
- case 1:
- lum_spac = param1;
- chrom_spac = PARAM2_DEFAULT * param1 / PARAM1_DEFAULT;
- lum_tmp = PARAM3_DEFAULT * param1 / PARAM1_DEFAULT;
- chrom_tmp = lum_tmp * chrom_spac / lum_spac;
- break;
- case 2:
- lum_spac = param1;
- chrom_spac = param2;
- lum_tmp = PARAM3_DEFAULT * param1 / PARAM1_DEFAULT;
- chrom_tmp = lum_tmp * chrom_spac / lum_spac;
- break;
- case 3:
- lum_spac = param1;
- chrom_spac = param2;
- lum_tmp = param3;
- chrom_tmp = lum_tmp * chrom_spac / lum_spac;
- break;
- case 4:
- lum_spac = param1;
- chrom_spac = param2;
- lum_tmp = param3;
- chrom_tmp = param4;
- break;
- }
- }
+ HQDN3DContext *s = ctx->priv;
- hqdn3d->strength[0] = lum_spac;
- hqdn3d->strength[1] = lum_tmp;
- hqdn3d->strength[2] = chrom_spac;
- hqdn3d->strength[3] = chrom_tmp;
+ if (!s->strength[LUMA_SPATIAL])
+ s->strength[LUMA_SPATIAL] = PARAM1_DEFAULT;
+ if (!s->strength[CHROMA_SPATIAL])
+ s->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
+ if (!s->strength[LUMA_TMP])
+ s->strength[LUMA_TMP] = PARAM3_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
+ if (!s->strength[CHROMA_TMP])
+ s->strength[CHROMA_TMP] = s->strength[LUMA_TMP] * s->strength[CHROMA_SPATIAL] / s->strength[LUMA_SPATIAL];
av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n",
- lum_spac, chrom_spac, lum_tmp, chrom_tmp);
- if (lum_spac < 0 || chrom_spac < 0 || isnan(chrom_tmp)) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid negative value for luma or chroma spatial strength, "
- "or resulting value for chroma temporal strength is nan.\n");
- return AVERROR(EINVAL);
- }
+ s->strength[LUMA_SPATIAL], s->strength[CHROMA_SPATIAL],
+ s->strength[LUMA_TMP], s->strength[CHROMA_TMP]);
return 0;
}
-static void uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
- HQDN3DContext *hqdn3d = ctx->priv;
-
- av_freep(&hqdn3d->coefs[0]);
- av_freep(&hqdn3d->coefs[1]);
- av_freep(&hqdn3d->coefs[2]);
- av_freep(&hqdn3d->coefs[3]);
- av_freep(&hqdn3d->line);
- av_freep(&hqdn3d->frame_prev[0]);
- av_freep(&hqdn3d->frame_prev[1]);
- av_freep(&hqdn3d->frame_prev[2]);
+ HQDN3DContext *s = ctx->priv;
+
+ av_freep(&s->coefs[0]);
+ av_freep(&s->coefs[1]);
+ av_freep(&s->coefs[2]);
+ av_freep(&s->coefs[3]);
+ av_freep(&s->line);
+ av_freep(&s->frame_prev[0]);
+ av_freep(&s->frame_prev[1]);
+ av_freep(&s->frame_prev[2]);
}
static int query_formats(AVFilterContext *ctx)
@@ -262,15 +229,15 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUV420P9LE ),
- AV_NE( AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUV422P9LE ),
- AV_NE( AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUV444P9LE ),
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_NONE
};
@@ -281,39 +248,42 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
{
- HQDN3DContext *hqdn3d = inlink->dst->priv;
+ HQDN3DContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int i;
- hqdn3d->hsub = desc->log2_chroma_w;
- hqdn3d->vsub = desc->log2_chroma_h;
- hqdn3d->depth = desc->comp[0].depth_minus1+1;
+ uninit(inlink->dst);
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->depth = desc->comp[0].depth_minus1+1;
- hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line));
- if (!hqdn3d->line)
+ s->line = av_malloc(inlink->w * sizeof(*s->line));
+ if (!s->line)
return AVERROR(ENOMEM);
for (i = 0; i < 4; i++) {
- hqdn3d->coefs[i] = precalc_coefs(hqdn3d->strength[i], hqdn3d->depth);
- if (!hqdn3d->coefs[i])
+ s->coefs[i] = precalc_coefs(s->strength[i], s->depth);
+ if (!s->coefs[i])
return AVERROR(ENOMEM);
}
if (ARCH_X86)
- ff_hqdn3d_init_x86(hqdn3d);
+ ff_hqdn3d_init_x86(s);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- HQDN3DContext *hqdn3d = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterContext *ctx = inlink->dst;
+ HQDN3DContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
int direct, c;
- if (av_frame_is_writable(in)) {
+ if (av_frame_is_writable(in) && !ctx->is_disabled) {
direct = 1;
out = in;
} else {
@@ -328,12 +298,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
for (c = 0; c < 3; c++) {
- denoise(hqdn3d, in->data[c], out->data[c],
- hqdn3d->line, &hqdn3d->frame_prev[c],
- in->width >> (!!c * hqdn3d->hsub),
- in->height >> (!!c * hqdn3d->vsub),
+ denoise(s, in->data[c], out->data[c],
+ s->line, &s->frame_prev[c],
+ FF_CEIL_RSHIFT(in->width, (!!c * s->hsub)),
+ FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)),
in->linesize[c], out->linesize[c],
- hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
+ s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],
+ s->coefs[c ? CHROMA_TMP : LUMA_TMP]);
+ }
+
+ if (ctx->is_disabled) {
+ av_frame_free(&out);
+ return ff_filter_frame(outlink, in);
}
if (!direct)
@@ -342,6 +318,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(HQDN3DContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption hqdn3d_options[] = {
+ { "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
+ { "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
+ { "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
+ { "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hqdn3d);
+
static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
{
.name = "default",
@@ -361,16 +349,15 @@ static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_hqdn3d = {
+AVFilter ff_vf_hqdn3d = {
.name = "hqdn3d",
.description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
-
.priv_size = sizeof(HQDN3DContext),
+ .priv_class = &hqdn3d_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_hqdn3d_inputs,
-
- .outputs = avfilter_vf_hqdn3d_outputs,
+ .inputs = avfilter_vf_hqdn3d_inputs,
+ .outputs = avfilter_vf_hqdn3d_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/ffmpeg/libavfilter/vf_hqdn3d.h b/ffmpeg/libavfilter/vf_hqdn3d.h
index dfc69e1..268ac9a 100644
--- a/ffmpeg/libavfilter/vf_hqdn3d.h
+++ b/ffmpeg/libavfilter/vf_hqdn3d.h
@@ -26,7 +26,10 @@
#include <stddef.h>
#include <stdint.h>
+#include "libavutil/opt.h"
+
typedef struct {
+ const AVClass *class;
int16_t *coefs[4];
uint16_t *line;
uint16_t *frame_prev[3];
@@ -36,6 +39,11 @@ typedef struct {
void (*denoise_row[17])(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
} HQDN3DContext;
+#define LUMA_SPATIAL 0
+#define LUMA_TMP 1
+#define CHROMA_SPATIAL 2
+#define CHROMA_TMP 3
+
void ff_hqdn3d_init_x86(HQDN3DContext *hqdn3d);
#endif /* AVFILTER_VF_HQDN3D_H */
diff --git a/ffmpeg/libavfilter/vf_hue.c b/ffmpeg/libavfilter/vf_hue.c
index 478ce6e..7843673 100644
--- a/ffmpeg/libavfilter/vf_hue.c
+++ b/ffmpeg/libavfilter/vf_hue.c
@@ -4,19 +4,19 @@
*
* This file is part of FFmpeg.
*
- * FFmpeg is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
@@ -36,12 +36,6 @@
#include "internal.h"
#include "video.h"
-#define HUE_DEFAULT_VAL 0
-#define SAT_DEFAULT_VAL 1
-
-#define HUE_DEFAULT_VAL_STRING AV_STRINGIFY(HUE_DEFAULT_VAL)
-#define SAT_DEFAULT_VAL_STRING AV_STRINGIFY(SAT_DEFAULT_VAL)
-
#define SAT_MIN_VAL -10
#define SAT_MAX_VAL 10
@@ -74,12 +68,18 @@ typedef struct {
float saturation;
char *saturation_expr;
AVExpr *saturation_pexpr;
+ float brightness;
+ char *brightness_expr;
+ AVExpr *brightness_pexpr;
int hsub;
int vsub;
+ int is_first;
int32_t hue_sin;
int32_t hue_cos;
- int flat_syntax;
double var_values[VAR_NB];
+ uint8_t lut_l[256];
+ uint8_t lut_u[256][256];
+ uint8_t lut_v[256][256];
} HueContext;
#define OFFSET(x) offsetof(HueContext, x)
@@ -87,10 +87,12 @@ typedef struct {
static const AVOption hue_options[] = {
{ "h", "set the hue angle degrees expression", OFFSET(hue_deg_expr), AV_OPT_TYPE_STRING,
{ .str = NULL }, .flags = FLAGS },
- { "H", "set the hue angle radians expression", OFFSET(hue_expr), AV_OPT_TYPE_STRING,
- { .str = NULL }, .flags = FLAGS },
{ "s", "set the saturation expression", OFFSET(saturation_expr), AV_OPT_TYPE_STRING,
+ { .str = "1" }, .flags = FLAGS },
+ { "H", "set the hue angle radians expression", OFFSET(hue_expr), AV_OPT_TYPE_STRING,
{ .str = NULL }, .flags = FLAGS },
+ { "b", "set the brightness expression", OFFSET(brightness_expr), AV_OPT_TYPE_STRING,
+ { .str = "0" }, .flags = FLAGS },
{ NULL }
};
@@ -101,105 +103,123 @@ static inline void compute_sin_and_cos(HueContext *hue)
/*
* Scale the value to the norm of the resulting (U,V) vector, that is
* the saturation.
- * This will be useful in the process_chrominance function.
+ * This will be useful in the apply_lut function.
*/
hue->hue_sin = rint(sin(hue->hue) * (1 << 16) * hue->saturation);
hue->hue_cos = rint(cos(hue->hue) * (1 << 16) * hue->saturation);
}
-#define SET_EXPRESSION(attr, name) do { \
- if (hue->attr##_expr) { \
- if ((ret = av_expr_parse(&hue->attr##_pexpr, hue->attr##_expr, var_names, \
- NULL, NULL, NULL, NULL, 0, ctx)) < 0) { \
- av_log(ctx, AV_LOG_ERROR, \
- "Parsing failed for expression " #name "='%s'", \
- hue->attr##_expr); \
- hue->attr##_expr = old_##attr##_expr; \
- hue->attr##_pexpr = old_##attr##_pexpr; \
- return AVERROR(EINVAL); \
- } else if (old_##attr##_pexpr) { \
- av_freep(&old_##attr##_expr); \
- av_expr_free(old_##attr##_pexpr); \
- old_##attr##_pexpr = NULL; \
- } \
- } else { \
- hue->attr##_expr = old_##attr##_expr; \
- } \
-} while (0)
-
-static inline int set_options(AVFilterContext *ctx, const char *args)
+static inline void create_luma_lut(HueContext *h)
{
- HueContext *hue = ctx->priv;
- int ret;
- char *old_hue_expr, *old_hue_deg_expr, *old_saturation_expr;
- AVExpr *old_hue_pexpr, *old_hue_deg_pexpr, *old_saturation_pexpr;
- static const char *shorthand[] = { "h", "s", NULL };
- old_hue_expr = hue->hue_expr;
- old_hue_deg_expr = hue->hue_deg_expr;
- old_saturation_expr = hue->saturation_expr;
-
- old_hue_pexpr = hue->hue_pexpr;
- old_hue_deg_pexpr = hue->hue_deg_pexpr;
- old_saturation_pexpr = hue->saturation_pexpr;
-
- hue->hue_expr = NULL;
- hue->hue_deg_expr = NULL;
- hue->saturation_expr = NULL;
-
- if ((ret = av_opt_set_from_string(hue, args, shorthand, "=", ":")) < 0)
- return ret;
- if (hue->hue_expr && hue->hue_deg_expr) {
- av_log(ctx, AV_LOG_ERROR,
- "H and h options are incompatible and cannot be specified "
- "at the same time\n");
- hue->hue_expr = old_hue_expr;
- hue->hue_deg_expr = old_hue_deg_expr;
+ const float b = h->brightness;
+ int i;
- return AVERROR(EINVAL);
+ for (i = 0; i < 256; i++) {
+ h->lut_l[i] = av_clip_uint8(i + b * 25.5);
}
+}
- SET_EXPRESSION(hue_deg, h);
- SET_EXPRESSION(hue, H);
- SET_EXPRESSION(saturation, s);
+static inline void create_chrominance_lut(HueContext *h, const int32_t c,
+ const int32_t s)
+{
+ int32_t i, j, u, v, new_u, new_v;
+
+ /*
+ * If we consider U and V as the components of a 2D vector then its angle
+ * is the hue and the norm is the saturation
+ */
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ /* Normalize the components from range [16;140] to [-112;112] */
+ u = i - 128;
+ v = j - 128;
+ /*
+ * Apply the rotation of the vector : (c * u) - (s * v)
+ * (s * u) + (c * v)
+ * De-normalize the components (without forgetting to scale 128
+ * by << 16)
+ * Finally scale back the result by >> 16
+ */
+ new_u = ((c * u) - (s * v) + (1 << 15) + (128 << 16)) >> 16;
+ new_v = ((s * u) + (c * v) + (1 << 15) + (128 << 16)) >> 16;
- hue->flat_syntax = 0;
+ /* Prevent a potential overflow */
+ h->lut_u[i][j] = av_clip_uint8(new_u);
+ h->lut_v[i][j] = av_clip_uint8(new_v);
+ }
+ }
+}
- av_log(ctx, AV_LOG_VERBOSE,
- "H_expr:%s h_deg_expr:%s s_expr:%s\n",
- hue->hue_expr, hue->hue_deg_expr, hue->saturation_expr);
+static int set_expr(AVExpr **pexpr_ptr, char **expr_ptr,
+ const char *expr, const char *option, void *log_ctx)
+{
+ int ret;
+ AVExpr *new_pexpr;
+ char *new_expr;
+
+ new_expr = av_strdup(expr);
+ if (!new_expr)
+ return AVERROR(ENOMEM);
+ ret = av_expr_parse(&new_pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for %s\n",
+ expr, option);
+ av_free(new_expr);
+ return ret;
+ }
- compute_sin_and_cos(hue);
+ if (*pexpr_ptr)
+ av_expr_free(*pexpr_ptr);
+ *pexpr_ptr = new_pexpr;
+ av_freep(expr_ptr);
+ *expr_ptr = new_expr;
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
HueContext *hue = ctx->priv;
+ int ret;
- hue->class = &hue_class;
- av_opt_set_defaults(hue);
+ if (hue->hue_expr && hue->hue_deg_expr) {
+ av_log(ctx, AV_LOG_ERROR,
+ "H and h options are incompatible and cannot be specified "
+ "at the same time\n");
+ return AVERROR(EINVAL);
+ }
+
+#define SET_EXPR(expr, option) \
+ if (hue->expr##_expr) do { \
+ ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
+ hue->expr##_expr, option, ctx); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+ SET_EXPR(brightness, "b");
+ SET_EXPR(saturation, "s");
+ SET_EXPR(hue_deg, "h");
+ SET_EXPR(hue, "H");
+#undef SET_EXPR
- hue->saturation = SAT_DEFAULT_VAL;
- hue->hue = HUE_DEFAULT_VAL;
- hue->hue_deg_pexpr = NULL;
- hue->hue_pexpr = NULL;
- hue->flat_syntax = 1;
+ av_log(ctx, AV_LOG_VERBOSE,
+ "H_expr:%s h_deg_expr:%s s_expr:%s b_expr:%s\n",
+ hue->hue_expr, hue->hue_deg_expr, hue->saturation_expr, hue->brightness_expr);
+ compute_sin_and_cos(hue);
+ hue->is_first = 1;
- return set_options(ctx, args);
+ return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
HueContext *hue = ctx->priv;
- av_opt_free(hue);
-
- av_free(hue->hue_deg_expr);
+ av_expr_free(hue->brightness_pexpr);
av_expr_free(hue->hue_deg_pexpr);
- av_free(hue->hue_expr);
av_expr_free(hue->hue_pexpr);
- av_free(hue->saturation_expr);
av_expr_free(hue->saturation_pexpr);
}
@@ -209,6 +229,7 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
@@ -234,36 +255,36 @@ static int config_props(AVFilterLink *inlink)
return 0;
}
-static void process_chrominance(uint8_t *udst, uint8_t *vdst, const int dst_linesize,
- uint8_t *usrc, uint8_t *vsrc, const int src_linesize,
- int w, int h,
- const int32_t c, const int32_t s)
+static void apply_luma_lut(HueContext *s,
+ uint8_t *ldst, const int dst_linesize,
+ uint8_t *lsrc, const int src_linesize,
+ int w, int h)
+{
+ int i;
+
+ while (h--) {
+ for (i = 0; i < w; i++)
+ ldst[i] = s->lut_l[lsrc[i]];
+
+ lsrc += src_linesize;
+ ldst += dst_linesize;
+ }
+}
+
+static void apply_lut(HueContext *s,
+ uint8_t *udst, uint8_t *vdst, const int dst_linesize,
+ uint8_t *usrc, uint8_t *vsrc, const int src_linesize,
+ int w, int h)
{
- int32_t u, v, new_u, new_v;
int i;
- /*
- * If we consider U and V as the components of a 2D vector then its angle
- * is the hue and the norm is the saturation
- */
while (h--) {
for (i = 0; i < w; i++) {
- /* Normalize the components from range [16;140] to [-112;112] */
- u = usrc[i] - 128;
- v = vsrc[i] - 128;
- /*
- * Apply the rotation of the vector : (c * u) - (s * v)
- * (s * u) + (c * v)
- * De-normalize the components (without forgetting to scale 128
- * by << 16)
- * Finally scale back the result by >> 16
- */
- new_u = ((c * u) - (s * v) + (1 << 15) + (128 << 16)) >> 16;
- new_v = ((s * u) + (c * v) + (1 << 15) + (128 << 16)) >> 16;
+ const int u = usrc[i];
+ const int v = vsrc[i];
- /* Prevent a potential overflow */
- udst[i] = av_clip_uint8_c(new_u);
- vdst[i] = av_clip_uint8_c(new_v);
+ udst[i] = s->lut_u[u][v];
+ vdst[i] = s->lut_v[u][v];
}
usrc += src_linesize;
@@ -281,6 +302,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
HueContext *hue = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
+ const int32_t old_hue_sin = hue->hue_sin, old_hue_cos = hue->hue_cos;
+ const float old_brightness = hue->brightness;
int direct = 0;
if (av_frame_is_writable(inpic)) {
@@ -295,60 +318,106 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
av_frame_copy_props(outpic, inpic);
}
- if (!hue->flat_syntax) {
- hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
- hue->var_values[VAR_PTS] = TS2D(inpic->pts);
+ hue->var_values[VAR_N] = inlink->frame_count;
+ hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
+ hue->var_values[VAR_PTS] = TS2D(inpic->pts);
- if (hue->saturation_expr) {
- hue->saturation = av_expr_eval(hue->saturation_pexpr, hue->var_values, NULL);
+ if (hue->saturation_expr) {
+ hue->saturation = av_expr_eval(hue->saturation_pexpr, hue->var_values, NULL);
- if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) {
- hue->saturation = av_clip(hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL);
- av_log(inlink->dst, AV_LOG_WARNING,
- "Saturation value not in range [%d,%d]: clipping value to %0.1f\n",
- SAT_MIN_VAL, SAT_MAX_VAL, hue->saturation);
- }
+ if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) {
+ hue->saturation = av_clip(hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL);
+ av_log(inlink->dst, AV_LOG_WARNING,
+ "Saturation value not in range [%d,%d]: clipping value to %0.1f\n",
+ SAT_MIN_VAL, SAT_MAX_VAL, hue->saturation);
}
+ }
- if (hue->hue_deg_expr) {
- hue->hue_deg = av_expr_eval(hue->hue_deg_pexpr, hue->var_values, NULL);
- hue->hue = hue->hue_deg * M_PI / 180;
- } else if (hue->hue_expr) {
- hue->hue = av_expr_eval(hue->hue_pexpr, hue->var_values, NULL);
- }
+ if (hue->brightness_expr) {
+ hue->brightness = av_expr_eval(hue->brightness_pexpr, hue->var_values, NULL);
- av_log(inlink->dst, AV_LOG_DEBUG,
- "H:%0.1f s:%0.f t:%0.1f n:%d\n",
- hue->hue, hue->saturation,
- hue->var_values[VAR_T], (int)hue->var_values[VAR_N]);
+ if (hue->brightness < -10 || hue->brightness > 10) {
+ hue->brightness = av_clipf(hue->brightness, -10, 10);
+ av_log(inlink->dst, AV_LOG_WARNING,
+ "Brightness value not in range [%d,%d]: clipping value to %0.1f\n",
+ -10, 10, hue->brightness);
+ }
+ }
- compute_sin_and_cos(hue);
+ if (hue->hue_deg_expr) {
+ hue->hue_deg = av_expr_eval(hue->hue_deg_pexpr, hue->var_values, NULL);
+ hue->hue = hue->hue_deg * M_PI / 180;
+ } else if (hue->hue_expr) {
+ hue->hue = av_expr_eval(hue->hue_pexpr, hue->var_values, NULL);
+ hue->hue_deg = hue->hue * 180 / M_PI;
}
- hue->var_values[VAR_N] += 1;
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "H:%0.1f*PI h:%0.1f s:%0.1f b:%0.f t:%0.1f n:%d\n",
+ hue->hue/M_PI, hue->hue_deg, hue->saturation, hue->brightness,
+ hue->var_values[VAR_T], (int)hue->var_values[VAR_N]);
- if (!direct)
- av_image_copy_plane(outpic->data[0], outpic->linesize[0],
- inpic->data[0], inpic->linesize[0],
- inlink->w, inlink->h);
+ compute_sin_and_cos(hue);
+ if (hue->is_first || (old_hue_sin != hue->hue_sin || old_hue_cos != hue->hue_cos))
+ create_chrominance_lut(hue, hue->hue_cos, hue->hue_sin);
+
+ if (hue->is_first || (old_brightness != hue->brightness && hue->brightness))
+ create_luma_lut(hue);
+
+ if (!direct) {
+ if (!hue->brightness)
+ av_image_copy_plane(outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h);
+ if (inpic->data[3])
+ av_image_copy_plane(outpic->data[3], outpic->linesize[3],
+ inpic->data[3], inpic->linesize[3],
+ inlink->w, inlink->h);
+ }
- process_chrominance(outpic->data[1], outpic->data[2], outpic->linesize[1],
- inpic->data[1], inpic->data[2], inpic->linesize[1],
- inlink->w >> hue->hsub, inlink->h >> hue->vsub,
- hue->hue_cos, hue->hue_sin);
+ apply_lut(hue, outpic->data[1], outpic->data[2], outpic->linesize[1],
+ inpic->data[1], inpic->data[2], inpic->linesize[1],
+ FF_CEIL_RSHIFT(inlink->w, hue->hsub),
+ FF_CEIL_RSHIFT(inlink->h, hue->vsub));
+ if (hue->brightness)
+ apply_luma_lut(hue, outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0], inlink->w, inlink->h);
if (!direct)
av_frame_free(&inpic);
+
+ hue->is_first = 0;
return ff_filter_frame(outlink, outpic);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
- if (!strcmp(cmd, "reinit"))
- return set_options(ctx, args);
- else
+ HueContext *hue = ctx->priv;
+ int ret;
+
+#define SET_EXPR(expr, option) \
+ do { \
+ ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
+ args, option, ctx); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+ if (!strcmp(cmd, "h")) {
+ SET_EXPR(hue_deg, "h");
+ av_freep(&hue->hue_expr);
+ } else if (!strcmp(cmd, "H")) {
+ SET_EXPR(hue, "H");
+ av_freep(&hue->hue_deg_expr);
+ } else if (!strcmp(cmd, "s")) {
+ SET_EXPR(saturation, "s");
+ } else if (!strcmp(cmd, "b")) {
+ SET_EXPR(brightness, "b");
+ } else
return AVERROR(ENOSYS);
+
+ return 0;
}
static const AVFilterPad hue_inputs[] = {
@@ -369,17 +438,16 @@ static const AVFilterPad hue_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_hue = {
- .name = "hue",
- .description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
-
- .priv_size = sizeof(HueContext),
-
- .init = init,
- .uninit = uninit,
- .query_formats = query_formats,
+AVFilter ff_vf_hue = {
+ .name = "hue",
+ .description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
+ .priv_size = sizeof(HueContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
.process_command = process_command,
.inputs = hue_inputs,
.outputs = hue_outputs,
.priv_class = &hue_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_idet.c b/ffmpeg/libavfilter/vf_idet.c
index f61ac5a..d441a5f 100644
--- a/ffmpeg/libavfilter/vf_idet.c
+++ b/ffmpeg/libavfilter/vf_idet.c
@@ -118,8 +118,8 @@ static void filter(AVFilterContext *ctx)
int refs = idet->cur->linesize[i];
if (i && i<3) {
- w >>= idet->csp->log2_chroma_w;
- h >>= idet->csp->log2_chroma_h;
+ w = FF_CEIL_RSHIFT(w, idet->csp->log2_chroma_w);
+ h = FF_CEIL_RSHIFT(h, idet->csp->log2_chroma_h);
}
for (y = 2; y < h - 2; y++) {
@@ -206,21 +206,6 @@ static int filter_frame(AVFilterLink *link, AVFrame *picref)
return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
}
-static int request_frame(AVFilterLink *link)
-{
- AVFilterContext *ctx = link->src;
- IDETContext *idet = ctx->priv;
-
- do {
- int ret;
-
- if ((ret = ff_request_frame(link->src->inputs[0])))
- return ret;
- } while (!idet->cur);
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
IDETContext *idet = ctx->priv;
@@ -255,15 +240,15 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
- AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
+ AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
@@ -273,17 +258,15 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static int config_output(AVFilterLink *outlink)
{
- IDETContext *idet = ctx->priv;
- static const char *shorthand[] = { "intl_thres", "prog_thres", NULL };
- int ret;
-
- idet->class = &idet_class;
- av_opt_set_defaults(idet);
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ return 0;
+}
- if ((ret = av_opt_set_from_string(idet, args, shorthand, "=", ":")) < 0)
- return ret;
+static av_cold int init(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
idet->last_type = UNDETERMINED;
memset(idet->history, UNDETERMINED, HIST_SIZE);
@@ -305,17 +288,16 @@ static const AVFilterPad idet_inputs[] = {
static const AVFilterPad idet_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
},
{ NULL }
};
-AVFilter avfilter_vf_idet = {
+AVFilter ff_vf_idet = {
.name = "idet",
.description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
-
.priv_size = sizeof(IDETContext),
.init = init,
.uninit = uninit,
diff --git a/ffmpeg/libavfilter/vf_il.c b/ffmpeg/libavfilter/vf_il.c
index 29c277c..e755caa 100644
--- a/ffmpeg/libavfilter/vf_il.c
+++ b/ffmpeg/libavfilter/vf_il.c
@@ -81,20 +81,6 @@ static const AVOption il_options[] = {
AVFILTER_DEFINE_CLASS(il);
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- IlContext *il = ctx->priv;
- int ret;
-
- il->class = &il_class;
- av_opt_set_defaults(il);
-
- if ((ret = av_set_options_string(il, args, "=", ":")) < 0)
- return ret;
-
- return 0;
-}
-
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
@@ -102,7 +88,7 @@ static int query_formats(AVFilterContext *ctx)
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
- if (!(desc->flags & PIX_FMT_PAL) && !(desc->flags & PIX_FMT_HWACCEL))
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL) && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
ff_add_format(&formats, fmt);
}
@@ -114,17 +100,15 @@ static int config_input(AVFilterLink *inlink)
{
IlContext *il = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- int i, ret;
+ int ret;
- for (i = 0; i < desc->nb_components; i++)
- il->nb_planes = FFMAX(il->nb_planes, desc->comp[i].plane);
- il->nb_planes++;
+ il->nb_planes = av_pix_fmt_count_planes(inlink->format);
- il->has_alpha = !!(desc->flags & PIX_FMT_ALPHA);
+ il->has_alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
if ((ret = av_image_fill_linesizes(il->linesize, inlink->format, inlink->w)) < 0)
return ret;
- il->chroma_height = inlink->h >> desc->log2_chroma_h;
+ il->chroma_height = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
return 0;
}
@@ -165,7 +149,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
IlContext *il = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
- int ret, comp;
+ int comp;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -187,44 +171,42 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
}
if (il->has_alpha) {
- int comp = il->nb_planes - 1;
+ comp = il->nb_planes - 1;
interleave(out->data[comp], inpicref->data[comp],
il->linesize[comp], inlink->h,
out->linesize[comp], inpicref->linesize[comp],
il->alpha_mode, il->alpha_swap);
}
- ret = ff_filter_frame(outlink, out);
av_frame_free(&inpicref);
- return ret;
+ return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .config_props = config_input,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
-AVFilter avfilter_vf_il = {
+AVFilter ff_vf_il = {
.name = "il",
.description = NULL_IF_CONFIG_SMALL("Deinterleave or interleave fields."),
.priv_size = sizeof(IlContext),
- .init = init,
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
.priv_class = &il_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_kerndeint.c b/ffmpeg/libavfilter/vf_kerndeint.c
index 7e89648..1f8e091 100644
--- a/ffmpeg/libavfilter/vf_kerndeint.c
+++ b/ffmpeg/libavfilter/vf_kerndeint.c
@@ -59,23 +59,11 @@ static const AVOption kerndeint_options[] = {
AVFILTER_DEFINE_CLASS(kerndeint);
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- KerndeintContext *kerndeint = ctx->priv;
- const char const * shorthand[] = { "thresh", "map", "order", "sharp", "twoway", NULL };
-
- kerndeint->class = &kerndeint_class;
- av_opt_set_defaults(kerndeint);
-
- return av_opt_set_from_string(kerndeint, args, shorthand, "=", ":");
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
KerndeintContext *kerndeint = ctx->priv;
av_free(kerndeint->tmp_data[0]);
- av_opt_free(kerndeint);
}
static int query_formats(AVFilterContext *ctx)
@@ -101,7 +89,7 @@ static int config_props(AVFilterLink *inlink)
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
- kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_RGB;
+ kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_RGB;
kerndeint->vsub = desc->log2_chroma_h;
ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
@@ -162,8 +150,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
av_frame_copy_props(outpic, inpic);
outpic->interlaced_frame = 0;
- for (plane = 0; inpic->data[plane] && plane < 4; plane++) {
- h = plane == 0 ? inlink->h : inlink->h >> kerndeint->vsub;
+ for (plane = 0; plane < 4 && inpic->data[plane] && inpic->linesize[plane]; plane++) {
+ h = plane == 0 ? inlink->h : FF_CEIL_RSHIFT(inlink->h, kerndeint->vsub);
bwidth = kerndeint->tmp_bwidth[plane];
srcp = srcp_saved = inpic->data[plane];
@@ -317,16 +305,14 @@ static const AVFilterPad kerndeint_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_kerndeint = {
+
+AVFilter ff_vf_kerndeint = {
.name = "kerndeint",
.description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."),
.priv_size = sizeof(KerndeintContext),
- .init = init,
+ .priv_class = &kerndeint_class,
.uninit = uninit,
.query_formats = query_formats,
-
.inputs = kerndeint_inputs,
.outputs = kerndeint_outputs,
-
- .priv_class = &kerndeint_class,
};
diff --git a/ffmpeg/libavfilter/vf_libopencv.c b/ffmpeg/libavfilter/vf_libopencv.c
index 7174ccc..2306b09 100644
--- a/ffmpeg/libavfilter/vf_libopencv.c
+++ b/ffmpeg/libavfilter/vf_libopencv.c
@@ -23,13 +23,12 @@
* libopencv wrapper functions
*/
-/* #define DEBUG */
-
#include <opencv/cv.h>
#include <opencv/cxcore.h>
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/file.h"
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -70,7 +69,9 @@ static int query_formats(AVFilterContext *ctx)
}
typedef struct {
- const char *name;
+ const AVClass *class;
+ char *name;
+ char *params;
int (*init)(AVFilterContext *ctx, const char *args);
void (*uninit)(AVFilterContext *ctx);
void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
@@ -85,8 +86,8 @@ typedef struct {
static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
{
- OCVContext *ocv = ctx->priv;
- SmoothContext *smooth = ocv->priv;
+ OCVContext *s = ctx->priv;
+ SmoothContext *smooth = s->priv;
char type_str[128] = "gaussian";
smooth->param1 = 3;
@@ -95,7 +96,7 @@ static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
smooth->param4 = 0.0;
if (args)
- sscanf(args, "%127[^:]:%d:%d:%lf:%lf", type_str, &smooth->param1, &smooth->param2, &smooth->param3, &smooth->param4);
+ sscanf(args, "%127[^|]|%d|%d|%lf|%lf", type_str, &smooth->param1, &smooth->param2, &smooth->param3, &smooth->param4);
if (!strcmp(type_str, "blur" )) smooth->type = CV_BLUR;
else if (!strcmp(type_str, "blur_no_scale")) smooth->type = CV_BLUR_NO_SCALE;
@@ -128,8 +129,8 @@ static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
- OCVContext *ocv = ctx->priv;
- SmoothContext *smooth = ocv->priv;
+ OCVContext *s = ctx->priv;
+ SmoothContext *smooth = s->priv;
cvSmooth(inimg, outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
}
@@ -251,24 +252,25 @@ typedef struct {
static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
{
- OCVContext *ocv = ctx->priv;
- DilateContext *dilate = ocv->priv;
+ OCVContext *s = ctx->priv;
+ DilateContext *dilate = s->priv;
char default_kernel_str[] = "3x3+0x0/rect";
char *kernel_str;
const char *buf = args;
int ret;
- dilate->nb_iterations = 1;
-
if (args)
- kernel_str = av_get_token(&buf, ":");
- if ((ret = parse_iplconvkernel(&dilate->kernel,
- *kernel_str ? kernel_str : default_kernel_str,
- ctx)) < 0)
+ kernel_str = av_get_token(&buf, "|");
+ else
+ kernel_str = av_strdup(default_kernel_str);
+ if (!kernel_str)
+ return AVERROR(ENOMEM);
+ if ((ret = parse_iplconvkernel(&dilate->kernel, kernel_str, ctx)) < 0)
return ret;
av_free(kernel_str);
- sscanf(buf, ":%d", &dilate->nb_iterations);
+ if (!buf || sscanf(buf, "|%d", &dilate->nb_iterations) != 1)
+ dilate->nb_iterations = 1;
av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations);
if (dilate->nb_iterations <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n",
@@ -280,23 +282,23 @@ static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
static av_cold void dilate_uninit(AVFilterContext *ctx)
{
- OCVContext *ocv = ctx->priv;
- DilateContext *dilate = ocv->priv;
+ OCVContext *s = ctx->priv;
+ DilateContext *dilate = s->priv;
cvReleaseStructuringElement(&dilate->kernel);
}
static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
- OCVContext *ocv = ctx->priv;
- DilateContext *dilate = ocv->priv;
+ OCVContext *s = ctx->priv;
+ DilateContext *dilate = s->priv;
cvDilate(inimg, outimg, dilate->kernel, dilate->nb_iterations);
}
static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
- OCVContext *ocv = ctx->priv;
- DilateContext *dilate = ocv->priv;
+ OCVContext *s = ctx->priv;
+ DilateContext *dilate = s->priv;
cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations);
}
@@ -314,47 +316,45 @@ static OCVFilterEntry ocv_filter_entries[] = {
{ "smooth", sizeof(SmoothContext), smooth_init, NULL, smooth_end_frame_filter },
};
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- OCVContext *ocv = ctx->priv;
- char name[128], priv_args[1024];
+ OCVContext *s = ctx->priv;
int i;
- char c;
-
- sscanf(args, "%127[^=:]%c%1023s", name, &c, priv_args);
+ if (!s->name) {
+ av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n");
+ return AVERROR(EINVAL);
+ }
for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) {
OCVFilterEntry *entry = &ocv_filter_entries[i];
- if (!strcmp(name, entry->name)) {
- ocv->name = entry->name;
- ocv->init = entry->init;
- ocv->uninit = entry->uninit;
- ocv->end_frame_filter = entry->end_frame_filter;
+ if (!strcmp(s->name, entry->name)) {
+ s->init = entry->init;
+ s->uninit = entry->uninit;
+ s->end_frame_filter = entry->end_frame_filter;
- if (!(ocv->priv = av_mallocz(entry->priv_size)))
+ if (!(s->priv = av_mallocz(entry->priv_size)))
return AVERROR(ENOMEM);
- return ocv->init(ctx, priv_args);
+ return s->init(ctx, s->params);
}
}
- av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", name);
+ av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", s->name);
return AVERROR(EINVAL);
}
static av_cold void uninit(AVFilterContext *ctx)
{
- OCVContext *ocv = ctx->priv;
+ OCVContext *s = ctx->priv;
- if (ocv->uninit)
- ocv->uninit(ctx);
- av_free(ocv->priv);
- memset(ocv, 0, sizeof(*ocv));
+ if (s->uninit)
+ s->uninit(ctx);
+ av_free(s->priv);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
- OCVContext *ocv = ctx->priv;
+ OCVContext *s = ctx->priv;
AVFilterLink *outlink= inlink->dst->outputs[0];
AVFrame *out;
IplImage inimg, outimg;
@@ -368,7 +368,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
fill_iplimage_from_frame(&inimg , in , inlink->format);
fill_iplimage_from_frame(&outimg, out, inlink->format);
- ocv->end_frame_filter(ctx, &inimg, &outimg);
+ s->end_frame_filter(ctx, &inimg, &outimg);
fill_frame_from_iplimage(out, &outimg, inlink->format);
av_frame_free(&in);
@@ -376,10 +376,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(OCVContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ocv_options[] = {
+ { "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(ocv);
+
static const AVFilterPad avfilter_vf_ocv_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -393,17 +403,14 @@ static const AVFilterPad avfilter_vf_ocv_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_ocv = {
- .name = "ocv",
- .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
-
- .priv_size = sizeof(OCVContext),
-
+AVFilter ff_vf_ocv = {
+ .name = "ocv",
+ .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
+ .priv_size = sizeof(OCVContext),
+ .priv_class = &ocv_class,
.query_formats = query_formats,
- .init = init,
- .uninit = uninit,
-
- .inputs = avfilter_vf_ocv_inputs,
-
- .outputs = avfilter_vf_ocv_outputs,
+ .init = init,
+ .uninit = uninit,
+ .inputs = avfilter_vf_ocv_inputs,
+ .outputs = avfilter_vf_ocv_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_lut.c b/ffmpeg/libavfilter/vf_lut.c
index 1738560..9f30ae0 100644
--- a/ffmpeg/libavfilter/vf_lut.c
+++ b/ffmpeg/libavfilter/vf_lut.c
@@ -24,6 +24,7 @@
* value, and apply it to input video.
*/
+#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
@@ -80,36 +81,36 @@ typedef struct {
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
- {"c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {"a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS},
- {NULL},
+ { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
+ { NULL }
};
static av_cold void uninit(AVFilterContext *ctx)
{
- LutContext *lut = ctx->priv;
+ LutContext *s = ctx->priv;
int i;
for (i = 0; i < 4; i++) {
- av_expr_free(lut->comp_expr[i]);
- lut->comp_expr[i] = NULL;
- av_freep(&lut->comp_expr_str[i]);
+ av_expr_free(s->comp_expr[i]);
+ s->comp_expr[i] = NULL;
+ av_freep(&s->comp_expr_str[i]);
}
}
#define YUV_FORMATS \
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
- AV_PIX_FMT_YUVA420P, \
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
AV_PIX_FMT_YUVJ440P
@@ -124,10 +125,11 @@ static const enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, AV_
static int query_formats(AVFilterContext *ctx)
{
- LutContext *lut = ctx->priv;
+ LutContext *s = ctx->priv;
- const enum AVPixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts :
- lut->is_yuv ? yuv_pix_fmts : all_pix_fmts;
+ const enum AVPixelFormat *pix_fmts = s->is_rgb ? rgb_pix_fmts :
+ s->is_yuv ? yuv_pix_fmts :
+ all_pix_fmts;
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
@@ -138,9 +140,9 @@ static int query_formats(AVFilterContext *ctx)
*/
static double clip(void *opaque, double val)
{
- LutContext *lut = opaque;
- double minval = lut->var_values[VAR_MINVAL];
- double maxval = lut->var_values[VAR_MAXVAL];
+ LutContext *s = opaque;
+ double minval = s->var_values[VAR_MINVAL];
+ double maxval = s->var_values[VAR_MAXVAL];
return av_clip(val, minval, maxval);
}
@@ -151,10 +153,10 @@ static double clip(void *opaque, double val)
*/
static double compute_gammaval(void *opaque, double gamma)
{
- LutContext *lut = opaque;
- double val = lut->var_values[VAR_CLIPVAL];
- double minval = lut->var_values[VAR_MINVAL];
- double maxval = lut->var_values[VAR_MAXVAL];
+ LutContext *s = opaque;
+ double val = s->var_values[VAR_CLIPVAL];
+ double minval = s->var_values[VAR_MINVAL];
+ double maxval = s->var_values[VAR_MAXVAL];
return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
}
@@ -174,17 +176,17 @@ static const char * const funcs1_names[] = {
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- LutContext *lut = ctx->priv;
+ LutContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint8_t rgba_map[4]; /* component index -> RGBA color index map */
int min[4], max[4];
int val, color, ret;
- lut->hsub = desc->log2_chroma_w;
- lut->vsub = desc->log2_chroma_h;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
- lut->var_values[VAR_W] = inlink->w;
- lut->var_values[VAR_H] = inlink->h;
+ s->var_values[VAR_W] = inlink->w;
+ s->var_values[VAR_H] = inlink->h;
switch (inlink->format) {
case AV_PIX_FMT_YUV410P:
@@ -194,6 +196,8 @@ static int config_props(AVFilterLink *inlink)
case AV_PIX_FMT_YUV440P:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUVA420P:
+ case AV_PIX_FMT_YUVA422P:
+ case AV_PIX_FMT_YUVA444P:
min[Y] = min[U] = min[V] = 16;
max[Y] = 235;
max[U] = max[V] = 240;
@@ -204,49 +208,51 @@ static int config_props(AVFilterLink *inlink)
max[0] = max[1] = max[2] = max[3] = 255;
}
- lut->is_yuv = lut->is_rgb = 0;
- if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1;
- else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1;
+ s->is_yuv = s->is_rgb = 0;
+ if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1;
+ else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;
- if (lut->is_rgb) {
+ if (s->is_rgb) {
ff_fill_rgba_map(rgba_map, inlink->format);
- lut->step = av_get_bits_per_pixel(desc) >> 3;
+ s->step = av_get_bits_per_pixel(desc) >> 3;
}
for (color = 0; color < desc->nb_components; color++) {
double res;
- int comp = lut->is_rgb ? rgba_map[color] : color;
+ int comp = s->is_rgb ? rgba_map[color] : color;
/* create the parsed expression */
- ret = av_expr_parse(&lut->comp_expr[color], lut->comp_expr_str[color],
+ av_expr_free(s->comp_expr[color]);
+ s->comp_expr[color] = NULL;
+ ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color],
var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error when parsing the expression '%s' for the component %d and color %d.\n",
- lut->comp_expr_str[comp], comp, color);
+ s->comp_expr_str[comp], comp, color);
return AVERROR(EINVAL);
}
/* compute the lut */
- lut->var_values[VAR_MAXVAL] = max[color];
- lut->var_values[VAR_MINVAL] = min[color];
+ s->var_values[VAR_MAXVAL] = max[color];
+ s->var_values[VAR_MINVAL] = min[color];
for (val = 0; val < 256; val++) {
- lut->var_values[VAR_VAL] = val;
- lut->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
- lut->var_values[VAR_NEGVAL] =
- av_clip(min[color] + max[color] - lut->var_values[VAR_VAL],
+ s->var_values[VAR_VAL] = val;
+ s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
+ s->var_values[VAR_NEGVAL] =
+ av_clip(min[color] + max[color] - s->var_values[VAR_VAL],
min[color], max[color]);
- res = av_expr_eval(lut->comp_expr[color], lut->var_values, lut);
+ res = av_expr_eval(s->comp_expr[color], s->var_values, s);
if (isnan(res)) {
av_log(ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s' for the value %d for the component %d.\n",
- lut->comp_expr_str[color], val, comp);
+ s->comp_expr_str[color], val, comp);
return AVERROR(EINVAL);
}
- lut->lut[comp][val] = av_clip((int)res, min[color], max[color]);
- av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]);
+ s->lut[comp][val] = av_clip((int)res, min[color], max[color]);
+ av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]);
}
}
@@ -256,58 +262,60 @@ static int config_props(AVFilterLink *inlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
- LutContext *lut = ctx->priv;
+ LutContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *inrow, *outrow, *inrow0, *outrow0;
- int i, j, plane;
+ int i, j, plane, direct = 0;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
}
- av_frame_copy_props(out, in);
- if (lut->is_rgb) {
+ if (s->is_rgb) {
/* packed */
inrow0 = in ->data[0];
outrow0 = out->data[0];
for (i = 0; i < in->height; i ++) {
int w = inlink->w;
- const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut;
+ const uint8_t (*tab)[256] = (const uint8_t (*)[256])s->lut;
inrow = inrow0;
outrow = outrow0;
for (j = 0; j < w; j++) {
- outrow[0] = tab[0][inrow[0]];
- if (lut->step>1) {
- outrow[1] = tab[1][inrow[1]];
- if (lut->step>2) {
- outrow[2] = tab[2][inrow[2]];
- if (lut->step>3) {
- outrow[3] = tab[3][inrow[3]];
- }
- }
+ switch (s->step) {
+ case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
+ case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
+ case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
+ default: outrow[0] = tab[0][inrow[0]];
}
- outrow += lut->step;
- inrow += lut->step;
+ outrow += s->step;
+ inrow += s->step;
}
inrow0 += in ->linesize[0];
outrow0 += out->linesize[0];
}
} else {
/* planar */
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
- int vsub = plane == 1 || plane == 2 ? lut->vsub : 0;
- int hsub = plane == 1 || plane == 2 ? lut->hsub : 0;
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
+ int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int h = FF_CEIL_RSHIFT(inlink->h, vsub);
+ int w = FF_CEIL_RSHIFT(inlink->w, hsub);
inrow = in ->data[plane];
outrow = out->data[plane];
- for (i = 0; i < (in->height + (1<<vsub) - 1)>>vsub; i ++) {
- const uint8_t *tab = lut->lut[plane];
- int w = (inlink->w + (1<<hsub) - 1)>>hsub;
+ for (i = 0; i < h; i++) {
+ const uint8_t *tab = s->lut[plane];
for (j = 0; j < w; j++)
outrow[j] = tab[inrow[j]];
inrow += in ->linesize[plane];
@@ -316,36 +324,39 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
}
- av_frame_free(&in);
+ if (!direct)
+ av_frame_free(&in);
+
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
- { .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .config_props = config_props,
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
},
- { .name = NULL}
+ { NULL }
};
static const AVFilterPad outputs[] = {
- { .name = "default",
- .type = AVMEDIA_TYPE_VIDEO, },
- { .name = NULL}
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
};
+
#define DEFINE_LUT_FILTER(name_, description_) \
- AVFilter avfilter_vf_##name_ = { \
+ AVFilter ff_vf_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(LutContext), \
- \
+ .priv_class = &name_ ## _class, \
.init = name_##_init, \
.uninit = uninit, \
.query_formats = query_formats, \
- \
.inputs = inputs, \
.outputs = outputs, \
- .priv_class = &name_##_class, \
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
}
#if CONFIG_LUT_FILTER
@@ -353,17 +364,8 @@ static const AVFilterPad outputs[] = {
#define lut_options options
AVFILTER_DEFINE_CLASS(lut);
-static int lut_init(AVFilterContext *ctx, const char *args)
+static int lut_init(AVFilterContext *ctx)
{
- LutContext *lut = ctx->priv;
- int ret;
-
- lut->class = &lut_class;
- av_opt_set_defaults(lut);
-
- if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0)
- return ret;
-
return 0;
}
@@ -375,17 +377,11 @@ DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input vi
#define lutyuv_options options
AVFILTER_DEFINE_CLASS(lutyuv);
-static int lutyuv_init(AVFilterContext *ctx, const char *args)
+static av_cold int lutyuv_init(AVFilterContext *ctx)
{
- LutContext *lut = ctx->priv;
- int ret;
-
- lut->class = &lutyuv_class;
- lut->is_yuv = 1;
- av_opt_set_defaults(lut);
+ LutContext *s = ctx->priv;
- if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0)
- return ret;
+ s->is_yuv = 1;
return 0;
}
@@ -398,17 +394,11 @@ DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input vid
#define lutrgb_options options
AVFILTER_DEFINE_CLASS(lutrgb);
-static int lutrgb_init(AVFilterContext *ctx, const char *args)
+static av_cold int lutrgb_init(AVFilterContext *ctx)
{
- LutContext *lut = ctx->priv;
- int ret;
+ LutContext *s = ctx->priv;
- lut->class = &lutrgb_class;
- lut->is_rgb = 1;
- av_opt_set_defaults(lut);
-
- if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0)
- return ret;
+ s->is_rgb = 1;
return 0;
}
@@ -418,26 +408,30 @@ DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input vid
#if CONFIG_NEGATE_FILTER
-#define negate_options options
+static const AVOption negate_options[] = {
+ { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
+};
+
AVFILTER_DEFINE_CLASS(negate);
-static int negate_init(AVFilterContext *ctx, const char *args)
+static av_cold int negate_init(AVFilterContext *ctx)
{
- LutContext *lut = ctx->priv;
- char lut_params[64];
-
- if (args)
- sscanf(args, "%d", &lut->negate_alpha);
-
- av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", lut->negate_alpha);
+ LutContext *s = ctx->priv;
+ int i;
- snprintf(lut_params, sizeof(lut_params), "c0=negval:c1=negval:c2=negval:a=%s",
- lut->negate_alpha ? "negval" : "val");
+ av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", s->negate_alpha);
- lut->class = &negate_class;
- av_opt_set_defaults(lut);
+ for (i = 0; i < 4; i++) {
+ s->comp_expr_str[i] = av_strdup((i == 3 && !s->negate_alpha) ?
+ "val" : "negval");
+ if (!s->comp_expr_str[i]) {
+ uninit(ctx);
+ return AVERROR(ENOMEM);
+ }
+ }
- return av_set_options_string(lut, lut_params, "=", ":");
+ return 0;
}
DEFINE_LUT_FILTER(negate, "Negate input video.");
diff --git a/ffmpeg/libavfilter/vf_mp.c b/ffmpeg/libavfilter/vf_mp.c
index 4bb5e40..2c145ab 100644
--- a/ffmpeg/libavfilter/vf_mp.c
+++ b/ffmpeg/libavfilter/vf_mp.c
@@ -32,12 +32,12 @@
#include "libavutil/pixdesc.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
#include "libmpcodecs/vf.h"
#include "libmpcodecs/img_format.h"
#include "libmpcodecs/cpudetect.h"
#include "libmpcodecs/av_helpers.h"
-#include "libmpcodecs/vf_scale.h"
#include "libmpcodecs/libvo/fastmemcpy.h"
#include "libswscale/swscale.h"
@@ -111,8 +111,11 @@ static const struct {
{IMGFMT_444P, AV_PIX_FMT_YUVJ444P},
{IMGFMT_440P, AV_PIX_FMT_YUVJ440P},
+#if FF_API_XVMC
{IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC},
{IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT},
+#endif /* FF_API_XVMC */
+
{IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1},
{IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2},
{IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264},
@@ -122,58 +125,22 @@ static const struct {
{0, AV_PIX_FMT_NONE}
};
-extern const vf_info_t ff_vf_info_detc;
-extern const vf_info_t ff_vf_info_dint;
-extern const vf_info_t ff_vf_info_divtc;
-extern const vf_info_t ff_vf_info_down3dright;
extern const vf_info_t ff_vf_info_eq2;
extern const vf_info_t ff_vf_info_eq;
-extern const vf_info_t ff_vf_info_fil;
-//extern const vf_info_t ff_vf_info_filmdint;
extern const vf_info_t ff_vf_info_fspp;
extern const vf_info_t ff_vf_info_ilpack;
-extern const vf_info_t ff_vf_info_ivtc;
-extern const vf_info_t ff_vf_info_mcdeint;
-extern const vf_info_t ff_vf_info_noise;
-extern const vf_info_t ff_vf_info_ow;
-extern const vf_info_t ff_vf_info_perspective;
-extern const vf_info_t ff_vf_info_phase;
extern const vf_info_t ff_vf_info_pp7;
-extern const vf_info_t ff_vf_info_pullup;
-extern const vf_info_t ff_vf_info_qp;
-extern const vf_info_t ff_vf_info_sab;
extern const vf_info_t ff_vf_info_softpulldown;
-extern const vf_info_t ff_vf_info_spp;
-extern const vf_info_t ff_vf_info_telecine;
-extern const vf_info_t ff_vf_info_tinterlace;
extern const vf_info_t ff_vf_info_uspp;
static const vf_info_t* const filters[]={
- &ff_vf_info_detc,
- &ff_vf_info_dint,
- &ff_vf_info_divtc,
- &ff_vf_info_down3dright,
&ff_vf_info_eq2,
&ff_vf_info_eq,
- &ff_vf_info_fil,
-// &ff_vf_info_filmdint, cmmx.h vd.h ‘opt_screen_size_x’
&ff_vf_info_fspp,
&ff_vf_info_ilpack,
- &ff_vf_info_ivtc,
- &ff_vf_info_mcdeint,
- &ff_vf_info_noise,
- &ff_vf_info_ow,
- &ff_vf_info_perspective,
- &ff_vf_info_phase,
&ff_vf_info_pp7,
- &ff_vf_info_pullup,
- &ff_vf_info_qp,
- &ff_vf_info_sab,
&ff_vf_info_softpulldown,
- &ff_vf_info_spp,
- &ff_vf_info_telecine,
- &ff_vf_info_tinterlace,
&ff_vf_info_uspp,
NULL
@@ -210,68 +177,25 @@ enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){
return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE;
}
-static void ff_sws_getFlagsAndFilterFromCmdLine(int *flags, SwsFilter **srcFilterParam, SwsFilter **dstFilterParam)
-{
- static int firstTime=1;
- *flags=0;
-
-#if ARCH_X86
- if(ff_gCpuCaps.hasMMX)
- __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
-#endif
- if(firstTime)
- {
- firstTime=0;
- *flags= SWS_PRINT_INFO;
- }
- else if( ff_mp_msg_test(MSGT_VFILTER,MSGL_DBG2) ) *flags= SWS_PRINT_INFO;
-
- switch(SWS_BILINEAR)
- {
- case 0: *flags|= SWS_FAST_BILINEAR; break;
- case 1: *flags|= SWS_BILINEAR; break;
- case 2: *flags|= SWS_BICUBIC; break;
- case 3: *flags|= SWS_X; break;
- case 4: *flags|= SWS_POINT; break;
- case 5: *flags|= SWS_AREA; break;
- case 6: *flags|= SWS_BICUBLIN; break;
- case 7: *flags|= SWS_GAUSS; break;
- case 8: *flags|= SWS_SINC; break;
- case 9: *flags|= SWS_LANCZOS; break;
- case 10:*flags|= SWS_SPLINE; break;
- default:*flags|= SWS_BILINEAR; break;
- }
-
- *srcFilterParam= NULL;
- *dstFilterParam= NULL;
-}
-
-//exact copy from vf_scale.c
-// will use sws_flags & src_filter (from cmd line)
-struct SwsContext *ff_sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat)
-{
- int flags, i;
- SwsFilter *dstFilterParam, *srcFilterParam;
- enum AVPixelFormat dfmt, sfmt;
-
- for(i=0; conversion_map[i].fmt && dstFormat != conversion_map[i].fmt; i++);
- dfmt= conversion_map[i].pix_fmt;
- for(i=0; conversion_map[i].fmt && srcFormat != conversion_map[i].fmt; i++);
- sfmt= conversion_map[i].pix_fmt;
-
- if (srcFormat == IMGFMT_RGB8 || srcFormat == IMGFMT_BGR8) sfmt = AV_PIX_FMT_PAL8;
- ff_sws_getFlagsAndFilterFromCmdLine(&flags, &srcFilterParam, &dstFilterParam);
-
- return sws_getContext(srcW, srcH, sfmt, dstW, dstH, dfmt, flags , srcFilterParam, dstFilterParam, NULL);
-}
-
typedef struct {
+ const AVClass *class;
vf_instance_t vf;
vf_instance_t next_vf;
AVFilterContext *avfctx;
int frame_returned;
+ char *filter;
+ enum AVPixelFormat in_pix_fmt;
} MPContext;
+#define OFFSET(x) offsetof(MPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mp_options[] = {
+ { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mp);
+
void ff_mp_msg(int mod, int lev, const char *format, ... ){
va_list va;
va_start(va, format);
@@ -534,10 +458,8 @@ mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgty
return mpi;
}
-static void dummy_free(void *opaque, uint8_t *data){}
-
int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
- MPContext *m= (void*)vf;
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
AVFilterLink *outlink = m->avfctx->outputs[0];
AVFrame *picref = av_frame_alloc();
int i;
@@ -557,19 +479,25 @@ int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++);
picref->format = conversion_map[i].pix_fmt;
+ for(i=0; conversion_map[i].fmt && m->in_pix_fmt != conversion_map[i].pix_fmt; i++);
+ if (mpi->imgfmt == conversion_map[i].fmt)
+ picref->format = conversion_map[i].pix_fmt;
+
memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride)));
for(i=0; i<4 && mpi->stride[i]; i++){
- picref->buf[i] = av_buffer_create(mpi->planes[i], mpi->stride[i], dummy_free, NULL,
- (mpi->flags & MP_IMGFLAG_PRESERVE) ? AV_BUFFER_FLAG_READONLY : 0);
- if (!picref->buf[i])
- goto fail;
- picref->data[i] = picref->buf[i]->data;
+ picref->data[i] = mpi->planes[i];
}
if(pts != MP_NOPTS_VALUE)
picref->pts= pts * av_q2d(outlink->time_base);
+ if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy
+ AVFrame *tofree = picref;
+ picref = av_frame_clone(picref);
+ av_frame_free(&tofree);
+ }
+
ff_filter_frame(outlink, picref);
m->frame_returned++;
@@ -609,13 +537,13 @@ int ff_vf_next_config(struct vf_instance *vf,
}
int ff_vf_next_control(struct vf_instance *vf, int request, void* data){
- MPContext *m= (void*)vf;
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request);
return 0;
}
static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
- MPContext *m= (void*)vf;
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
int i;
av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt);
@@ -627,14 +555,29 @@ static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
MPContext *m = ctx->priv;
+ int cpu_flags = av_get_cpu_flags();
char name[256];
+ const char *args;
int i;
+ ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX;
+ ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2;
+ ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE;
+ ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2;
+ ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3;
+ ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3;
+ ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4;
+ ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42;
+ ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX;
+ ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW;
+ ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT;
+
m->avfctx= ctx;
+ args = m->filter;
if(!args || 1!=sscanf(args, "%255[^:=]", name)){
av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n");
return AVERROR(EINVAL);
@@ -793,11 +736,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
ff_mp_image_setfmt(mpi,conversion_map[i].fmt);
+ m->in_pix_fmt = inlink->format;
memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes)));
memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride)));
- //FIXME pass interleced & tff flags around
+ if (inpic->interlaced_frame)
+ mpi->fields |= MP_IMGFIELD_INTERLACED;
+ if (inpic->top_field_first)
+ mpi->fields |= MP_IMGFIELD_TOP_FIRST;
+ if (inpic->repeat_pict)
+ mpi->fields |= MP_IMGFIELD_REPEAT_FIRST;
// mpi->flags|=MP_IMGFLAG_ALLOCATED; ?
mpi->flags |= MP_IMGFLAG_READABLE;
@@ -832,13 +781,14 @@ static const AVFilterPad mp_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_mp = {
- .name = "mp",
- .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
- .init = init,
- .uninit = uninit,
- .priv_size = sizeof(MPContext),
+AVFilter ff_vf_mp = {
+ .name = "mp",
+ .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(MPContext),
.query_formats = query_formats,
.inputs = mp_inputs,
.outputs = mp_outputs,
+ .priv_class = &mp_class,
};
diff --git a/ffmpeg/libavfilter/vf_noise.c b/ffmpeg/libavfilter/vf_noise.c
index 7095324..c29afa2 100644
--- a/ffmpeg/libavfilter/vf_noise.c
+++ b/ffmpeg/libavfilter/vf_noise.c
@@ -5,17 +5,17 @@
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
+ * modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with FFmpeg; if not, write to the Free Software
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,25 +29,24 @@
#include "libavutil/lfg.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/x86/asm.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
-#define MAX_NOISE 4096
+#define MAX_NOISE 5120
#define MAX_SHIFT 1024
#define MAX_RES (MAX_NOISE-MAX_SHIFT)
#define NOISE_UNIFORM 1
#define NOISE_TEMPORAL 2
-#define NOISE_QUALITY 4
#define NOISE_AVERAGED 8
#define NOISE_PATTERN 16
typedef struct {
int strength;
unsigned flags;
- int shiftptr;
AVLFG lfg;
int seed;
int8_t *noise;
@@ -57,14 +56,20 @@ typedef struct {
typedef struct {
const AVClass *class;
int nb_planes;
- int linesize[4];
+ int bytewidth[4];
int height[4];
FilterParams all;
FilterParams param[4];
int rand_shift[MAX_RES];
int rand_shift_init;
+ void (*line_noise)(uint8_t *dst, const uint8_t *src, int8_t *noise, int len, int shift);
+ void (*line_noise_avg)(uint8_t *dst, const uint8_t *src, int len, int8_t **shift);
} NoiseContext;
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
#define OFFSET(x) offsetof(NoiseContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
@@ -76,7 +81,6 @@ typedef struct {
{#name"f", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
{"a", "averaged noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_AVERAGED}, 0, 0, FLAGS, #name"_flags"}, \
{"p", "(semi)regular pattern", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_PATTERN}, 0, 0, FLAGS, #name"_flags"}, \
- {"q", "high quality", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_QUALITY}, 0, 0, FLAGS, #name"_flags"}, \
{"t", "temporal noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_TEMPORAL}, 0, 0, FLAGS, #name"_flags"}, \
{"u", "uniform noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_UNIFORM}, 0, 0, FLAGS, #name"_flags"},
@@ -94,7 +98,7 @@ AVFILTER_DEFINE_CLASS(noise);
static const int8_t patt[4] = { -1, 0, 1, 0 };
#define RAND_N(range) ((int) ((double) range * av_lfg_get(lfg) / (UINT_MAX + 1.0)))
-static int init_noise(NoiseContext *n, int comp)
+static av_cold int init_noise(NoiseContext *n, int comp)
{
int8_t *noise = av_malloc(MAX_NOISE * sizeof(int8_t));
FilterParams *fp = &n->param[comp];
@@ -128,8 +132,8 @@ static int init_noise(NoiseContext *n, int comp)
} else {
double x1, x2, w, y1;
do {
- x1 = 2.0 * av_lfg_get(lfg) / (float)RAND_MAX - 1.0;
- x2 = 2.0 * av_lfg_get(lfg) / (float)RAND_MAX - 1.0;
+ x1 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
+ x2 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
w = x1 * x1 + x2 * x2;
} while (w >= 1.0);
@@ -160,37 +164,6 @@ static int init_noise(NoiseContext *n, int comp)
}
fp->noise = noise;
- fp->shiftptr = 0;
- return 0;
-}
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- NoiseContext *n = ctx->priv;
- int ret, i;
-
- n->class = &noise_class;
- av_opt_set_defaults(n);
-
- if ((ret = av_set_options_string(n, args, "=", ":")) < 0)
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (n->all.seed >= 0)
- n->param[i].seed = n->all.seed;
- else
- n->param[i].seed = 123457;
- if (n->all.strength)
- n->param[i].strength = n->all.strength;
- if (n->all.flags)
- n->param[i].flags = n->all.flags;
- }
-
- for (i = 0; i < 4; i++) {
- if (n->param[i].strength && ((ret = init_noise(n, i)) < 0))
- return ret;
- }
-
return 0;
}
@@ -201,7 +174,7 @@ static int query_formats(AVFilterContext *ctx)
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
- if (desc->flags & PIX_FMT_PLANAR && !((desc->comp[0].depth_minus1 + 1) & 7))
+ if (desc->flags & AV_PIX_FMT_FLAG_PLANAR && !((desc->comp[0].depth_minus1 + 1) & 7))
ff_add_format(&formats, fmt);
}
@@ -213,22 +186,20 @@ static int config_input(AVFilterLink *inlink)
{
NoiseContext *n = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- int i, ret;
+ int ret;
- for (i = 0; i < desc->nb_components; i++)
- n->nb_planes = FFMAX(n->nb_planes, desc->comp[i].plane);
- n->nb_planes++;
+ n->nb_planes = av_pix_fmt_count_planes(inlink->format);
- if ((ret = av_image_fill_linesizes(n->linesize, inlink->format, inlink->w)) < 0)
+ if ((ret = av_image_fill_linesizes(n->bytewidth, inlink->format, inlink->w)) < 0)
return ret;
- n->height[1] = n->height[2] = inlink->h >> desc->log2_chroma_h;
+ n->height[1] = n->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
n->height[0] = n->height[3] = inlink->h;
return 0;
}
-static void line_noise(uint8_t *dst, const uint8_t *src, int8_t *noise,
+static inline void line_noise_c(uint8_t *dst, const uint8_t *src, int8_t *noise,
int len, int shift)
{
int i;
@@ -241,7 +212,69 @@ static void line_noise(uint8_t *dst, const uint8_t *src, int8_t *noise,
}
}
-static void line_noise_avg(uint8_t *dst, const uint8_t *src,
+#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
+
+static void line_noise_mmx(uint8_t *dst, const uint8_t *src,
+ int8_t *noise, int len, int shift)
+{
+#if HAVE_MMX_INLINE
+ x86_reg mmx_len= len&(~7);
+ noise+=shift;
+
+ __asm__ volatile(
+ "mov %3, %%"REG_a" \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "packsswb %%mm7, %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "paddsb %%mm1, %%mm0 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"REG_a
+ );
+ if (mmx_len!=len)
+ line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
+#endif
+}
+
+static void line_noise_mmxext(uint8_t *dst, const uint8_t *src,
+ int8_t *noise, int len, int shift)
+{
+#if HAVE_MMXEXT_INLINE
+ x86_reg mmx_len= len&(~7);
+ noise+=shift;
+
+ __asm__ volatile(
+ "mov %3, %%"REG_a" \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "packsswb %%mm7, %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "paddsb %%mm1, %%mm0 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movntq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"REG_a
+ );
+ if (mmx_len != len)
+ line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
+#endif
+}
+
+static inline void line_noise_avg_c(uint8_t *dst, const uint8_t *src,
int len, int8_t **shift)
{
int i;
@@ -253,57 +286,109 @@ static void line_noise_avg(uint8_t *dst, const uint8_t *src,
}
}
+static inline void line_noise_avg_mmx(uint8_t *dst, const uint8_t *src,
+ int len, int8_t **shift)
+{
+#if HAVE_MMX_INLINE
+ x86_reg mmx_len= len&(~7);
+
+ __asm__ volatile(
+ "mov %5, %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "paddb (%2, %%"REG_a"), %%mm1 \n\t"
+ "paddb (%3, %%"REG_a"), %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm2, %%mm2 \n\t"
+ "punpcklbw %%mm1, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "pmulhw %%mm0, %%mm1 \n\t"
+ "pmulhw %%mm2, %%mm3 \n\t"
+ "paddw %%mm1, %%mm1 \n\t"
+ "paddw %%mm3, %%mm3 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "psrlw $8, %%mm3 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ "movq %%mm1, (%4, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len),
+ "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"REG_a
+ );
+
+ if (mmx_len != len){
+ int8_t *shift2[3]={shift[0]+mmx_len, shift[1]+mmx_len, shift[2]+mmx_len};
+ line_noise_avg_c(dst+mmx_len, src+mmx_len, len-mmx_len, shift2);
+ }
+#endif
+}
+
static void noise(uint8_t *dst, const uint8_t *src,
int dst_linesize, int src_linesize,
- int width, int height, NoiseContext *n, int comp)
+ int width, int start, int end, NoiseContext *n, int comp)
{
- int8_t *noise = n->param[comp].noise;
- int flags = n->param[comp].flags;
- AVLFG *lfg = &n->param[comp].lfg;
+ FilterParams *p = &n->param[comp];
+ int8_t *noise = p->noise;
+ const int flags = p->flags;
+ AVLFG *lfg = &p->lfg;
int shift, y;
if (!noise) {
- if (dst != src) {
- for (y = 0; y < height; y++) {
- memcpy(dst, src, width);
- dst += dst_linesize;
- src += src_linesize;
- }
- }
-
+ if (dst != src)
+ av_image_copy_plane(dst, dst_linesize, src, src_linesize, width, end - start);
return;
}
- for (y = 0; y < height; y++) {
+ for (y = start; y < end; y++) {
+ const int ix = y & (MAX_RES - 1);
if (flags & NOISE_TEMPORAL)
shift = av_lfg_get(lfg) & (MAX_SHIFT - 1);
else
- shift = n->rand_shift[y];
-
- if (!(flags & NOISE_QUALITY))
- shift &= ~7;
+ shift = n->rand_shift[ix];
if (flags & NOISE_AVERAGED) {
- line_noise_avg(dst, src, width, n->param[comp].prev_shift[y]);
- n->param[comp].prev_shift[y][n->param[comp].shiftptr] = noise + shift;
+ n->line_noise_avg(dst, src, width, p->prev_shift[ix]);
+ p->prev_shift[ix][shift & 3] = noise + shift;
} else {
- line_noise(dst, src, noise, width, shift);
+ n->line_noise(dst, src, noise, width, shift);
}
dst += dst_linesize;
src += src_linesize;
}
+}
- n->param[comp].shiftptr++;
- if (n->param[comp].shiftptr == 3)
- n->param[comp].shiftptr = 0;
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ NoiseContext *s = ctx->priv;
+ ThreadData *td = arg;
+ int plane;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int height = s->height[plane];
+ const int start = (height * jobnr ) / nb_jobs;
+ const int end = (height * (jobnr+1)) / nb_jobs;
+ noise(td->out->data[plane] + start * td->out->linesize[plane],
+ td->in->data[plane] + start * td->in->linesize[plane],
+ td->out->linesize[plane], td->in->linesize[plane],
+ s->bytewidth[plane], start, end, s, plane);
+ }
+ return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
- NoiseContext *n = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ NoiseContext *n = ctx->priv;
+ ThreadData td;
AVFrame *out;
- int ret, i;
if (av_frame_is_writable(inpicref)) {
out = inpicref;
@@ -316,14 +401,50 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
av_frame_copy_props(out, inpicref);
}
- for (i = 0; i < n->nb_planes; i++)
- noise(out->data[i], inpicref->data[i], out->linesize[i],
- inpicref->linesize[i], n->linesize[i], n->height[i], n, i);
+ td.in = inpicref; td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(n->height[0], ctx->graph->nb_threads));
+ emms_c();
- ret = ff_filter_frame(outlink, out);
if (inpicref != out)
av_frame_free(&inpicref);
- return ret;
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ NoiseContext *n = ctx->priv;
+ int ret, i;
+ int cpu_flags = av_get_cpu_flags();
+
+ for (i = 0; i < 4; i++) {
+ if (n->all.seed >= 0)
+ n->param[i].seed = n->all.seed;
+ else
+ n->param[i].seed = 123457;
+ if (n->all.strength)
+ n->param[i].strength = n->all.strength;
+ if (n->all.flags)
+ n->param[i].flags = n->all.flags;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (n->param[i].strength && ((ret = init_noise(n, i)) < 0))
+ return ret;
+ }
+
+ n->line_noise = line_noise_c;
+ n->line_noise_avg = line_noise_avg_c;
+
+ if (HAVE_MMX_INLINE &&
+ cpu_flags & AV_CPU_FLAG_MMX) {
+ n->line_noise = line_noise_mmx;
+ n->line_noise_avg = line_noise_avg_mmx;
+ }
+ if (HAVE_MMXEXT_INLINE &&
+ cpu_flags & AV_CPU_FLAG_MMXEXT)
+ n->line_noise = line_noise_mmxext;
+
+ return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
@@ -333,29 +454,27 @@ static av_cold void uninit(AVFilterContext *ctx)
for (i = 0; i < 4; i++)
av_freep(&n->param[i].noise);
- av_opt_free(n);
}
static const AVFilterPad noise_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .config_props = config_input,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
static const AVFilterPad noise_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
-AVFilter avfilter_vf_noise = {
+AVFilter ff_vf_noise = {
.name = "noise",
.description = NULL_IF_CONFIG_SMALL("Add noise."),
.priv_size = sizeof(NoiseContext),
@@ -365,4 +484,5 @@ AVFilter avfilter_vf_noise = {
.inputs = noise_inputs,
.outputs = noise_outputs,
.priv_class = &noise_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/ffmpeg/libavfilter/vf_null.c b/ffmpeg/libavfilter/vf_null.c
index eafa268..2355615 100644
--- a/ffmpeg/libavfilter/vf_null.c
+++ b/ffmpeg/libavfilter/vf_null.c
@@ -28,9 +28,8 @@
static const AVFilterPad avfilter_vf_null_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
@@ -43,13 +42,9 @@ static const AVFilterPad avfilter_vf_null_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_null = {
- .name = "null",
+AVFilter ff_vf_null = {
+ .name = "null",
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
-
- .priv_size = 0,
-
- .inputs = avfilter_vf_null_inputs,
-
- .outputs = avfilter_vf_null_outputs,
+ .inputs = avfilter_vf_null_inputs,
+ .outputs = avfilter_vf_null_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_overlay.c b/ffmpeg/libavfilter/vf_overlay.c
index e7d213c..9047dee 100644
--- a/ffmpeg/libavfilter/vf_overlay.c
+++ b/ffmpeg/libavfilter/vf_overlay.c
@@ -25,20 +25,17 @@
* overlay one video on top of another
*/
-/* #define DEBUG */
-
#include "avfilter.h"
#include "formats.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/avstring.h"
-#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
-#include "libavutil/timestamp.h"
+#include "libavutil/opt.h"
#include "internal.h"
-#include "bufferqueue.h"
+#include "dualinput.h"
#include "drawutils.h"
#include "video.h"
@@ -47,6 +44,13 @@ static const char *const var_names[] = {
"main_h", "H", ///< height of the main video
"overlay_w", "w", ///< width of the overlay video
"overlay_h", "h", ///< height of the overlay video
+ "hsub",
+ "vsub",
+ "x",
+ "y",
+ "n", ///< number of frame
+ "pos", ///< position in the file
+ "t", ///< timestamp expressed in seconds
NULL
};
@@ -55,6 +59,13 @@ enum var_name {
VAR_MAIN_H, VAR_MH,
VAR_OVERLAY_W, VAR_OW,
VAR_OVERLAY_H, VAR_OH,
+ VAR_HSUB,
+ VAR_VSUB,
+ VAR_X,
+ VAR_Y,
+ VAR_N,
+ VAR_POS,
+ VAR_T,
VAR_VARS_NB
};
@@ -75,8 +86,6 @@ typedef struct {
int x, y; ///< position of overlayed picture
int allow_packed_rgb;
- uint8_t frame_requested;
- uint8_t overlay_eof;
uint8_t main_is_packed_rgb;
uint8_t main_rgba_map[4];
uint8_t main_has_alpha;
@@ -84,73 +93,95 @@ typedef struct {
uint8_t overlay_rgba_map[4];
uint8_t overlay_has_alpha;
enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format;
+ enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
- AVFrame *overpicref;
- struct FFBufQueue queue_main;
- struct FFBufQueue queue_over;
+ FFDualInputContext dinput;
int main_pix_step[4]; ///< steps per pixel for each plane of the main output
int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay
int hsub, vsub; ///< chroma subsampling values
- int shortest; ///< terminate stream when the shortest input terminates
+ double var_values[VAR_VARS_NB];
char *x_expr, *y_expr;
+ AVExpr *x_pexpr, *y_pexpr;
} OverlayContext;
-#define OFFSET(x) offsetof(OverlayContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ OverlayContext *s = ctx->priv;
-static const AVOption overlay_options[] = {
- { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "rgb", "force packed RGB in input and output (deprecated)", OFFSET(allow_packed_rgb), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
- { "shortest", "force termination when the shortest input terminates", OFFSET(shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ ff_dualinput_uninit(&s->dinput);
+ av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
+ av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
+}
- { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
- { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
- { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
- { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
+static inline int normalize_xy(double d, int chroma_sub)
+{
+ if (isnan(d))
+ return INT_MAX;
+ return (int)d & ~((1 << chroma_sub) - 1);
+}
- { NULL }
-};
+static void eval_expr(AVFilterContext *ctx)
+{
+ OverlayContext *s = ctx->priv;
-AVFILTER_DEFINE_CLASS(overlay);
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
+ s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
+}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
{
- OverlayContext *over = ctx->priv;
- static const char *shorthand[] = { "x", "y", NULL };
int ret;
-
- over->class = &overlay_class;
- av_opt_set_defaults(over);
-
- ret = av_opt_set_from_string(over, args, shorthand, "=", ":");
- if (ret < 0)
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for %s\n",
+ expr, option);
+ *pexpr = old;
return ret;
-
- if (over->allow_packed_rgb) {
- av_log(ctx, AV_LOG_WARNING,
- "The rgb option is deprecated and is overriding the format option, use format instead\n");
- over->format = OVERLAY_FORMAT_RGB;
}
+
+ av_expr_free(old);
return 0;
}
-static av_cold void uninit(AVFilterContext *ctx)
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
{
- OverlayContext *over = ctx->priv;
+ OverlayContext *s = ctx->priv;
+ int ret;
- av_opt_free(over);
+ if (!strcmp(cmd, "x"))
+ ret = set_expr(&s->x_pexpr, args, cmd, ctx);
+ else if (!strcmp(cmd, "y"))
+ ret = set_expr(&s->y_pexpr, args, cmd, ctx);
+ else
+ ret = AVERROR(ENOSYS);
- av_frame_free(&over->overpicref);
- ff_bufqueue_discard_all(&over->queue_main);
- ff_bufqueue_discard_all(&over->queue_over);
+ if (ret < 0)
+ return ret;
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
+ return ret;
}
static int query_formats(AVFilterContext *ctx)
{
- OverlayContext *over = ctx->priv;
+ OverlayContext *s = ctx->priv;
/* overlay formats contains alpha, for avoiding conversion with alpha information loss */
static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
@@ -182,7 +213,7 @@ static int query_formats(AVFilterContext *ctx)
AVFilterFormats *main_formats;
AVFilterFormats *overlay_formats;
- switch (over->format) {
+ switch (s->format) {
case OVERLAY_FORMAT_YUV420:
main_formats = ff_make_format_list(main_pix_fmts_yuv420);
overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420);
@@ -214,86 +245,75 @@ static const enum AVPixelFormat alpha_pix_fmts[] = {
static int config_input_main(AVFilterLink *inlink)
{
- OverlayContext *over = inlink->dst->priv;
+ OverlayContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
- av_image_fill_max_pixsteps(over->main_pix_step, NULL, pix_desc);
+ av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
- over->hsub = pix_desc->log2_chroma_w;
- over->vsub = pix_desc->log2_chroma_h;
+ s->hsub = pix_desc->log2_chroma_w;
+ s->vsub = pix_desc->log2_chroma_h;
- over->main_is_packed_rgb =
- ff_fill_rgba_map(over->main_rgba_map, inlink->format) >= 0;
- over->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
+ s->main_is_packed_rgb =
+ ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
+ s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
return 0;
}
static int config_input_overlay(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- OverlayContext *over = inlink->dst->priv;
- char *expr;
- double var_values[VAR_VARS_NB], res;
+ OverlayContext *s = inlink->dst->priv;
int ret;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
- av_image_fill_max_pixsteps(over->overlay_pix_step, NULL, pix_desc);
+ av_image_fill_max_pixsteps(s->overlay_pix_step, NULL, pix_desc);
/* Finish the configuration by evaluating the expressions
now when both inputs are configured. */
- var_values[VAR_MAIN_W ] = var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
- var_values[VAR_MAIN_H ] = var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
- var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
- var_values[VAR_OVERLAY_H] = var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
-
- if ((ret = av_expr_parse_and_eval(&res, (expr = over->x_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- over->x = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = over->y_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)))
- goto fail;
- over->y = res;
- /* x may depend on y */
- if ((ret = av_expr_parse_and_eval(&res, (expr = over->x_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- over->x = res;
-
- over->overlay_is_packed_rgb =
- ff_fill_rgba_map(over->overlay_rgba_map, inlink->format) >= 0;
- over->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
+ s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
+ s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
+ s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
+ s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
+ s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_N] = 0;
+ s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_POS] = NAN;
+
+ if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
+ (ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
+ return ret;
+
+ s->overlay_is_packed_rgb =
+ ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
+ s->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
av_log(ctx, AV_LOG_VERBOSE,
- "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n",
+ "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
- over->x, over->y,
ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
-
- if (over->x < 0 || over->y < 0 ||
- over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
- over->y + var_values[VAR_OVERLAY_H] > var_values[VAR_MAIN_H]) {
- av_log(ctx, AV_LOG_WARNING,
- "Overlay area with coordinates x1:%d y1:%d x2:%d y2:%d "
- "is not completely contained within the output with size %dx%d\n",
- over->x, over->y,
- (int)(over->x + var_values[VAR_OVERLAY_W]),
- (int)(over->y + var_values[VAR_OVERLAY_H]),
- (int)var_values[VAR_MAIN_W], (int)var_values[VAR_MAIN_H]);
- }
return 0;
-
-fail:
- av_log(NULL, AV_LOG_ERROR,
- "Error when evaluating the expression '%s'\n", expr);
- return ret;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ OverlayContext *s = ctx->priv;
+ int ret;
+
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h;
@@ -316,33 +336,33 @@ static int config_output(AVFilterLink *outlink)
* Blend image in src to destination buffer dst at position (x, y).
*/
static void blend_image(AVFilterContext *ctx,
- AVFrame *dst, AVFrame *src,
+ AVFrame *dst, const AVFrame *src,
int x, int y)
{
- OverlayContext *over = ctx->priv;
+ OverlayContext *s = ctx->priv;
int i, imax, j, jmax, k, kmax;
const int src_w = src->width;
const int src_h = src->height;
const int dst_w = dst->width;
const int dst_h = dst->height;
- if (x >= dst_w || x+dst_w < 0 ||
- y >= dst_h || y+dst_h < 0)
+ if (x >= dst_w || x+src_w < 0 ||
+ y >= dst_h || y+src_h < 0)
return; /* no intersection */
- if (over->main_is_packed_rgb) {
+ if (s->main_is_packed_rgb) {
uint8_t alpha; ///< the amount of overlay to blend on to main
- const int dr = over->main_rgba_map[R];
- const int dg = over->main_rgba_map[G];
- const int db = over->main_rgba_map[B];
- const int da = over->main_rgba_map[A];
- const int dstep = over->main_pix_step[0];
- const int sr = over->overlay_rgba_map[R];
- const int sg = over->overlay_rgba_map[G];
- const int sb = over->overlay_rgba_map[B];
- const int sa = over->overlay_rgba_map[A];
- const int sstep = over->overlay_pix_step[0];
- const int main_has_alpha = over->main_has_alpha;
+ const int dr = s->main_rgba_map[R];
+ const int dg = s->main_rgba_map[G];
+ const int db = s->main_rgba_map[B];
+ const int da = s->main_rgba_map[A];
+ const int dstep = s->main_pix_step[0];
+ const int sr = s->overlay_rgba_map[R];
+ const int sg = s->overlay_rgba_map[G];
+ const int sb = s->overlay_rgba_map[B];
+ const int sa = s->overlay_rgba_map[A];
+ const int sstep = s->overlay_pix_step[0];
+ const int main_has_alpha = s->main_has_alpha;
uint8_t *s, *sp, *d, *dp;
i = FFMAX(-y, 0);
@@ -398,7 +418,7 @@ static void blend_image(AVFilterContext *ctx,
sp += src->linesize[0];
}
} else {
- const int main_has_alpha = over->main_has_alpha;
+ const int main_has_alpha = s->main_has_alpha;
if (main_has_alpha) {
uint8_t alpha; ///< the amount of overlay to blend on to main
uint8_t *s, *sa, *d, *da;
@@ -436,12 +456,12 @@ static void blend_image(AVFilterContext *ctx,
}
}
for (i = 0; i < 3; i++) {
- int hsub = i ? over->hsub : 0;
- int vsub = i ? over->vsub : 0;
- int src_wp = FFALIGN(src_w, 1<<hsub) >> hsub;
- int src_hp = FFALIGN(src_h, 1<<vsub) >> vsub;
- int dst_wp = FFALIGN(dst_w, 1<<hsub) >> hsub;
- int dst_hp = FFALIGN(dst_h, 1<<vsub) >> vsub;
+ int hsub = i ? s->hsub : 0;
+ int vsub = i ? s->vsub : 0;
+ int src_wp = FF_CEIL_RSHIFT(src_w, hsub);
+ int src_hp = FF_CEIL_RSHIFT(src_h, vsub);
+ int dst_wp = FF_CEIL_RSHIFT(dst_w, hsub);
+ int dst_hp = FF_CEIL_RSHIFT(dst_h, vsub);
int yp = y>>vsub;
int xp = x>>hsub;
uint8_t *s, *sp, *d, *dp, *a, *ap;
@@ -503,149 +523,91 @@ static void blend_image(AVFilterContext *ctx,
}
}
-static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
+static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
+ const AVFrame *second)
{
- OverlayContext *over = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *next_overpic;
- int ret;
-
- /* Discard obsolete overlay frames: if there is a next overlay frame with pts
- * before the main frame, we can drop the current overlay. */
- while (1) {
- next_overpic = ff_bufqueue_peek(&over->queue_over, 0);
- if (!next_overpic || av_compare_ts(next_overpic->pts, ctx->inputs[OVERLAY]->time_base,
- mainpic->pts , ctx->inputs[MAIN]->time_base) > 0)
- break;
- ff_bufqueue_get(&over->queue_over);
- av_frame_free(&over->overpicref);
- over->overpicref = next_overpic;
- }
+ OverlayContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ /* TODO: reindent */
+ if (s->eval_mode == EVAL_MODE_FRAME) {
+ int64_t pos = av_frame_get_pkt_pos(mainpic);
+
+ s->var_values[VAR_N] = inlink->frame_count;
+ s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
+ NAN : mainpic->pts * av_q2d(inlink->time_base);
+ s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
- /* If there is no next frame and no EOF and the overlay frame is before
- * the main frame, we can not know yet if it will be superseded. */
- if (!over->queue_over.available && !over->overlay_eof &&
- (!over->overpicref || av_compare_ts(over->overpicref->pts, ctx->inputs[OVERLAY]->time_base,
- mainpic->pts , ctx->inputs[MAIN]->time_base) < 0))
- return AVERROR(EAGAIN);
-
- /* At this point, we know that the current overlay frame extends to the
- * time of the main frame. */
- av_dlog(ctx, "main_pts:%s main_pts_time:%s",
- av_ts2str(mainpic->pts), av_ts2timestr(mainpic->pts, &outlink->time_base));
- if (over->overpicref)
- av_dlog(ctx, " over_pts:%s over_pts_time:%s",
- av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base));
- av_dlog(ctx, "\n");
-
- if (over->overpicref)
- blend_image(ctx, mainpic, over->overpicref, over->x, over->y);
- ret = ff_filter_frame(ctx->outputs[0], mainpic);
- av_assert1(ret != AVERROR(EAGAIN));
- over->frame_requested = 0;
- return ret;
+ blend_image(ctx, mainpic, second, s->x, s->y);
+ return mainpic;
}
-static int try_filter_next_frame(AVFilterContext *ctx)
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
- OverlayContext *over = ctx->priv;
- AVFrame *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0);
- int ret;
-
- if (!next_mainpic)
- return AVERROR(EAGAIN);
- if ((ret = try_filter_frame(ctx, next_mainpic)) == AVERROR(EAGAIN))
- return ret;
- ff_bufqueue_get(&over->queue_main);
- return ret;
+ OverlayContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
-static int flush_frames(AVFilterContext *ctx)
+static int request_frame(AVFilterLink *outlink)
{
- int ret;
-
- while (!(ret = try_filter_next_frame(ctx)));
- return ret == AVERROR(EAGAIN) ? 0 : ret;
+ OverlayContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
}
-static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref)
+static av_cold int init(AVFilterContext *ctx)
{
- AVFilterContext *ctx = inlink->dst;
- OverlayContext *over = ctx->priv;
- int ret;
+ OverlayContext *s = ctx->priv;
- if ((ret = flush_frames(ctx)) < 0)
- return ret;
- if ((ret = try_filter_frame(ctx, inpicref)) < 0) {
- if (ret != AVERROR(EAGAIN))
- return ret;
- ff_bufqueue_add(ctx, &over->queue_main, inpicref);
+ if (s->allow_packed_rgb) {
+ av_log(ctx, AV_LOG_WARNING,
+ "The rgb option is deprecated and is overriding the format option, use format instead\n");
+ s->format = OVERLAY_FORMAT_RGB;
}
-
- if (!over->overpicref)
- return 0;
- flush_frames(ctx);
-
+ s->dinput.process = do_blend;
return 0;
}
-static int filter_frame_over(AVFilterLink *inlink, AVFrame *inpicref)
-{
- AVFilterContext *ctx = inlink->dst;
- OverlayContext *over = ctx->priv;
- int ret;
+#define OFFSET(x) offsetof(OverlayContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
- if ((ret = flush_frames(ctx)) < 0)
- return ret;
- ff_bufqueue_add(ctx, &over->queue_over, inpicref);
- ret = try_filter_next_frame(ctx);
- return ret == AVERROR(EAGAIN) ? 0 : ret;
-}
+static const AVOption overlay_options[] = {
+ { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { "rgb", "force packed RGB in input and output (deprecated)", OFFSET(allow_packed_rgb), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
+ { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
+ { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
+ { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
+ { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
-static int request_frame(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- OverlayContext *over = ctx->priv;
- int input, ret;
-
- if (!try_filter_next_frame(ctx))
- return 0;
- over->frame_requested = 1;
- while (over->frame_requested) {
- /* TODO if we had a frame duration, we could guess more accurately */
- input = !over->overlay_eof && (over->queue_main.available ||
- over->queue_over.available < 2) ?
- OVERLAY : MAIN;
- ret = ff_request_frame(ctx->inputs[input]);
- /* EOF on main is reported immediately */
- if (ret == AVERROR_EOF && input == OVERLAY) {
- over->overlay_eof = 1;
- if (over->shortest)
- return ret;
- if ((ret = try_filter_next_frame(ctx)) != AVERROR(EAGAIN))
- return ret;
- ret = 0; /* continue requesting frames on main */
- }
- if (ret < 0)
- return ret;
- }
- return 0;
-}
+AVFILTER_DEFINE_CLASS(overlay);
static const AVFilterPad avfilter_vf_overlay_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input_main,
- .filter_frame = filter_frame_main,
+ .filter_frame = filter_frame,
.needs_writable = 1,
},
{
.name = "overlay",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_overlay,
- .filter_frame = filter_frame_over,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -660,18 +622,16 @@ static const AVFilterPad avfilter_vf_overlay_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_overlay = {
- .name = "overlay",
- .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
-
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(OverlayContext),
-
+AVFilter ff_vf_overlay = {
+ .name = "overlay",
+ .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(OverlayContext),
+ .priv_class = &overlay_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_overlay_inputs,
- .outputs = avfilter_vf_overlay_outputs,
- .priv_class = &overlay_class,
+ .process_command = process_command,
+ .inputs = avfilter_vf_overlay_inputs,
+ .outputs = avfilter_vf_overlay_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/ffmpeg/libavfilter/vf_pad.c b/ffmpeg/libavfilter/vf_pad.c
index ed979bb..2962e20 100644
--- a/ffmpeg/libavfilter/vf_pad.c
+++ b/ffmpeg/libavfilter/vf_pad.c
@@ -35,9 +35,10 @@
#include "libavutil/colorspace.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
-#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
+
#include "drawutils.h"
static const char *const var_names[] = {
@@ -82,52 +83,25 @@ typedef struct {
int x, y; ///< offsets of the input area with respect to the padded area
int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues
- char *w_expr; ///< width expression string
- char *h_expr; ///< height expression string
- char *x_expr; ///< width expression string
- char *y_expr; ///< height expression string
- char *color_str;
+ char *w_expr; ///< width expression string
+ char *h_expr; ///< height expression string
+ char *x_expr; ///< width expression string
+ char *y_expr; ///< height expression string
uint8_t rgba_color[4]; ///< color for the padding area
FFDrawContext draw;
FFDrawColor color;
} PadContext;
-#define OFFSET(x) offsetof(PadContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption pad_options[] = {
- { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "color", "set the color of the padded area border", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str = "black"}, .flags = FLAGS },
- {NULL}
-};
-
-AVFILTER_DEFINE_CLASS(pad);
-
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- PadContext *pad = ctx->priv;
-
- if (av_parse_color(pad->rgba_color, pad->color_str, -1, ctx) < 0)
- return AVERROR(EINVAL);
-
- return 0;
-}
-
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- PadContext *pad = ctx->priv;
+ PadContext *s = ctx->priv;
int ret;
double var_values[VARS_NB], res;
char *expr;
- ff_draw_init(&pad->draw, inlink->format, 0);
- ff_draw_color(&pad->draw, &pad->color, pad->rgba_color);
+ ff_draw_init(&s->draw, inlink->format, 0);
+ ff_draw_color(&s->draw, &s->color, s->rgba_color);
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
@@ -137,72 +111,72 @@ static int config_input(AVFilterLink *inlink)
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
(double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
- var_values[VAR_HSUB] = 1 << pad->draw.hsub_max;
- var_values[VAR_VSUB] = 1 << pad->draw.vsub_max;
+ var_values[VAR_HSUB] = 1 << s->draw.hsub_max;
+ var_values[VAR_VSUB] = 1 << s->draw.vsub_max;
/* evaluate width and height */
- av_expr_parse_and_eval(&res, (expr = pad->w_expr),
+ av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
- pad->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = pad->h_expr),
+ s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
- pad->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
+ s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
/* evaluate the width again, as it may depend on the evaluated output height */
- if ((ret = av_expr_parse_and_eval(&res, (expr = pad->w_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
- pad->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
+ s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
/* evaluate x and y */
- av_expr_parse_and_eval(&res, (expr = pad->x_expr),
+ av_expr_parse_and_eval(&res, (expr = s->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
- pad->x = var_values[VAR_X] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = pad->y_expr),
+ s->x = var_values[VAR_X] = res;
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
- pad->y = var_values[VAR_Y] = res;
+ s->y = var_values[VAR_Y] = res;
/* evaluate x again, as it may depend on the evaluated y value */
- if ((ret = av_expr_parse_and_eval(&res, (expr = pad->x_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
- pad->x = var_values[VAR_X] = res;
+ s->x = var_values[VAR_X] = res;
/* sanity check params */
- if (pad->w < 0 || pad->h < 0 || pad->x < 0 || pad->y < 0) {
+ if (s->w < 0 || s->h < 0 || s->x < 0 || s->y < 0) {
av_log(ctx, AV_LOG_ERROR, "Negative values are not acceptable.\n");
return AVERROR(EINVAL);
}
- if (!pad->w)
- pad->w = inlink->w;
- if (!pad->h)
- pad->h = inlink->h;
+ if (!s->w)
+ s->w = inlink->w;
+ if (!s->h)
+ s->h = inlink->h;
- pad->w = ff_draw_round_to_sub(&pad->draw, 0, -1, pad->w);
- pad->h = ff_draw_round_to_sub(&pad->draw, 1, -1, pad->h);
- pad->x = ff_draw_round_to_sub(&pad->draw, 0, -1, pad->x);
- pad->y = ff_draw_round_to_sub(&pad->draw, 1, -1, pad->y);
- pad->in_w = ff_draw_round_to_sub(&pad->draw, 0, -1, inlink->w);
- pad->in_h = ff_draw_round_to_sub(&pad->draw, 1, -1, inlink->h);
+ s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
+ s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
+ s->x = ff_draw_round_to_sub(&s->draw, 0, -1, s->x);
+ s->y = ff_draw_round_to_sub(&s->draw, 1, -1, s->y);
+ s->in_w = ff_draw_round_to_sub(&s->draw, 0, -1, inlink->w);
+ s->in_h = ff_draw_round_to_sub(&s->draw, 1, -1, inlink->h);
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n",
- inlink->w, inlink->h, pad->w, pad->h, pad->x, pad->y,
- pad->rgba_color[0], pad->rgba_color[1], pad->rgba_color[2], pad->rgba_color[3]);
+ inlink->w, inlink->h, s->w, s->h, s->x, s->y,
+ s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], s->rgba_color[3]);
- if (pad->x < 0 || pad->y < 0 ||
- pad->w <= 0 || pad->h <= 0 ||
- (unsigned)pad->x + (unsigned)inlink->w > pad->w ||
- (unsigned)pad->y + (unsigned)inlink->h > pad->h) {
+ if (s->x < 0 || s->y < 0 ||
+ s->w <= 0 || s->h <= 0 ||
+ (unsigned)s->x + (unsigned)inlink->w > s->w ||
+ (unsigned)s->y + (unsigned)inlink->h > s->h) {
av_log(ctx, AV_LOG_ERROR,
"Input area %d:%d:%d:%d not within the padded area 0:0:%d:%d or zero-sized\n",
- pad->x, pad->y, pad->x + inlink->w, pad->y + inlink->h, pad->w, pad->h);
+ s->x, s->y, s->x + inlink->w, s->y + inlink->h, s->w, s->h);
return AVERROR(EINVAL);
}
@@ -217,20 +191,20 @@ eval_fail:
static int config_output(AVFilterLink *outlink)
{
- PadContext *pad = outlink->src->priv;
+ PadContext *s = outlink->src->priv;
- outlink->w = pad->w;
- outlink->h = pad->h;
+ outlink->w = s->w;
+ outlink->h = s->h;
return 0;
}
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
- PadContext *pad = inlink->dst->priv;
+ PadContext *s = inlink->dst->priv;
AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0],
- w + (pad->w - pad->in_w),
- h + (pad->h - pad->in_h));
+ w + (s->w - s->in_w),
+ h + (s->h - s->in_h));
int plane;
if (!frame)
@@ -239,11 +213,11 @@ static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
frame->width = w;
frame->height = h;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
- int hsub = pad->draw.hsub[plane];
- int vsub = pad->draw.vsub[plane];
- frame->data[plane] += (pad->x >> hsub) * pad->draw.pixelstep[plane] +
- (pad->y >> vsub) * frame->linesize[plane];
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
+ int hsub = s->draw.hsub[plane];
+ int vsub = s->draw.vsub[plane];
+ frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
+ (s->y >> vsub) * frame->linesize[plane];
}
return frame;
@@ -277,7 +251,7 @@ static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
(s->y >> vsub) * frame->linesize[planes[i]];
ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
s->draw.pixelstep[planes[i]] +
- (s->y >> vsub) * frame->linesize[planes[i]];
+ ((s->h - s->y - frame->height) >> vsub) * frame->linesize[planes[i]];
if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]])
return 1;
@@ -285,7 +259,6 @@ static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
(buf->data + buf->size) - end < req_end)
return 1;
-#define SIGN(x) ((x) > 0 ? 1 : -1)
for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) {
int vsub1 = s->draw.vsub[planes[j]];
uint8_t *start1 = frame->data[planes[j]];
@@ -294,8 +267,8 @@ static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
if (i == j)
continue;
- if (SIGN(start - end1) != SIGN(start - end1 - req_start) ||
- SIGN(end - start1) != SIGN(end - start1 + req_end))
+ if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) ||
+ FFSIGN(end - start1) != FFSIGN(end - start1 + req_end))
return 1;
}
}
@@ -318,15 +291,15 @@ static int frame_needs_copy(PadContext *s, AVFrame *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- PadContext *pad = inlink->dst->priv;
+ PadContext *s = inlink->dst->priv;
AVFrame *out;
- int needs_copy = frame_needs_copy(pad, in);
+ int needs_copy = frame_needs_copy(s, in);
if (needs_copy) {
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
out = ff_get_video_buffer(inlink->dst->outputs[0],
- FFMAX(inlink->w, pad->w),
- FFMAX(inlink->h, pad->h));
+ FFMAX(inlink->w, s->w),
+ FFMAX(inlink->h, s->h));
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
@@ -337,51 +310,67 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int i;
out = in;
- for (i = 0; i < 4 && out->data[i]; i++) {
- int hsub = pad->draw.hsub[i];
- int vsub = pad->draw.vsub[i];
- out->data[i] -= (pad->x >> hsub) * pad->draw.pixelstep[i] +
- (pad->y >> vsub) * out->linesize[i];
+ for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
+ int hsub = s->draw.hsub[i];
+ int vsub = s->draw.vsub[i];
+ out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
+ (s->y >> vsub) * out->linesize[i];
}
}
/* top bar */
- if (pad->y) {
- ff_fill_rectangle(&pad->draw, &pad->color,
+ if (s->y) {
+ ff_fill_rectangle(&s->draw, &s->color,
out->data, out->linesize,
- 0, 0, pad->w, pad->y);
+ 0, 0, s->w, s->y);
}
/* bottom bar */
- if (pad->h > pad->y + pad->in_h) {
- ff_fill_rectangle(&pad->draw, &pad->color,
+ if (s->h > s->y + s->in_h) {
+ ff_fill_rectangle(&s->draw, &s->color,
out->data, out->linesize,
- 0, pad->y + pad->in_h, pad->w, pad->h - pad->y - pad->in_h);
+ 0, s->y + s->in_h, s->w, s->h - s->y - s->in_h);
}
/* left border */
- ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize,
- 0, pad->y, pad->x, in->height);
+ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
+ 0, s->y, s->x, in->height);
if (needs_copy) {
- ff_copy_rectangle2(&pad->draw,
+ ff_copy_rectangle2(&s->draw,
out->data, out->linesize, in->data, in->linesize,
- pad->x, pad->y, 0, 0, in->width, in->height);
+ s->x, s->y, 0, 0, in->width, in->height);
}
/* right border */
- ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize,
- pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w,
+ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
+ s->x + s->in_w, s->y, s->w - s->x - s->in_w,
in->height);
- out->width = pad->w;
- out->height = pad->h;
+ out->width = s->w;
+ out->height = s->h;
if (in != out)
av_frame_free(&in);
return ff_filter_frame(inlink->dst->outputs[0], out);
}
+#define OFFSET(x) offsetof(PadContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption pad_options[] = {
+ { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pad);
+
static const AVFilterPad avfilter_vf_pad_inputs[] = {
{
.name = "default",
@@ -402,19 +391,12 @@ static const AVFilterPad avfilter_vf_pad_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "width", "height", "x", "y", "color", NULL };
-
-AVFilter avfilter_vf_pad = {
+AVFilter ff_vf_pad = {
.name = "pad",
- .description = NULL_IF_CONFIG_SMALL("Pad input image to width:height[:x:y[:color]] (default x and y: 0, default color: black)."),
-
+ .description = NULL_IF_CONFIG_SMALL("Pad the input video."),
.priv_size = sizeof(PadContext),
- .init = init,
+ .priv_class = &pad_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_pad_inputs,
-
- .outputs = avfilter_vf_pad_outputs,
- .priv_class = &pad_class,
- .shorthand = shorthand,
+ .inputs = avfilter_vf_pad_inputs,
+ .outputs = avfilter_vf_pad_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_pixdesctest.c b/ffmpeg/libavfilter/vf_pixdesctest.c
index 42afc63..54ddf89 100644
--- a/ffmpeg/libavfilter/vf_pixdesctest.c
+++ b/ffmpeg/libavfilter/vf_pixdesctest.c
@@ -46,6 +46,7 @@ static int config_props(AVFilterLink *inlink)
priv->pix_desc = av_pix_fmt_desc_get(inlink->format);
+ av_freep(&priv->line);
if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w)))
return AVERROR(ENOMEM);
@@ -58,6 +59,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int i, c, w = inlink->w, h = inlink->h;
+ const int cw = FF_CEIL_RSHIFT(w, priv->pix_desc->log2_chroma_w);
+ const int ch = FF_CEIL_RSHIFT(h, priv->pix_desc->log2_chroma_h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -68,23 +71,22 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
for (i = 0; i < 4; i++) {
- int h = outlink->h;
- h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
+ const int h1 = i == 1 || i == 2 ? ch : h;
if (out->data[i]) {
uint8_t *data = out->data[i] +
- (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h-1));
- memset(data, 0, FFABS(out->linesize[i]) * h);
+ (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h1-1));
+ memset(data, 0, FFABS(out->linesize[i]) * h1);
}
}
/* copy palette */
- if (priv->pix_desc->flags & PIX_FMT_PAL ||
- priv->pix_desc->flags & PIX_FMT_PSEUDOPAL)
+ if (priv->pix_desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ priv->pix_desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
for (c = 0; c < priv->pix_desc->nb_components; c++) {
- int w1 = c == 1 || c == 2 ? w>>priv->pix_desc->log2_chroma_w : w;
- int h1 = c == 1 || c == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
+ const int w1 = c == 1 || c == 2 ? cw : w;
+ const int h1 = c == 1 || c == 2 ? ch : h;
for (i = 0; i < h1; i++) {
av_read_image_line(priv->line,
@@ -123,14 +125,11 @@ static const AVFilterPad avfilter_vf_pixdesctest_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_pixdesctest = {
+AVFilter ff_vf_pixdesctest = {
.name = "pixdesctest",
.description = NULL_IF_CONFIG_SMALL("Test pixel format definitions."),
-
- .priv_size = sizeof(PixdescTestContext),
- .uninit = uninit,
-
- .inputs = avfilter_vf_pixdesctest_inputs,
-
- .outputs = avfilter_vf_pixdesctest_outputs,
+ .priv_size = sizeof(PixdescTestContext),
+ .uninit = uninit,
+ .inputs = avfilter_vf_pixdesctest_inputs,
+ .outputs = avfilter_vf_pixdesctest_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_pp.c b/ffmpeg/libavfilter/vf_pp.c
index b7f35d3..c72fdc6 100644
--- a/ffmpeg/libavfilter/vf_pp.c
+++ b/ffmpeg/libavfilter/vf_pp.c
@@ -31,21 +31,29 @@
#include "libpostproc/postprocess.h"
typedef struct {
+ const AVClass *class;
+ char *subfilters;
int mode_id;
pp_mode *modes[PP_QUALITY_MAX + 1];
void *pp_ctx;
} PPFilterContext;
-static av_cold int pp_init(AVFilterContext *ctx, const char *args)
+#define OFFSET(x) offsetof(PPFilterContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption pp_options[] = {
+ { "subfilters", "set postprocess subfilters", OFFSET(subfilters), AV_OPT_TYPE_STRING, {.str="de"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pp);
+
+static av_cold int pp_init(AVFilterContext *ctx)
{
int i;
PPFilterContext *pp = ctx->priv;
- if (!args || !*args)
- args = "de";
-
for (i = 0; i <= PP_QUALITY_MAX; i++) {
- pp->modes[i] = pp_get_mode_by_name_and_quality(args, i);
+ pp->modes[i] = pp_get_mode_by_name_and_quality(pp->subfilters, i);
if (!pp->modes[i])
return AVERROR_EXTERNAL;
}
@@ -117,6 +125,8 @@ static int pp_filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
return AVERROR(ENOMEM);
}
av_frame_copy_props(outbuf, inbuf);
+ outbuf->width = inbuf->width;
+ outbuf->height = inbuf->height;
qp_table = av_frame_get_qp_table(inbuf, &qstride, &qp_type);
pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize,
@@ -161,7 +171,7 @@ static const AVFilterPad pp_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_pp = {
+AVFilter ff_vf_pp = {
.name = "pp",
.description = NULL_IF_CONFIG_SMALL("Filter video using libpostproc."),
.priv_size = sizeof(PPFilterContext),
@@ -171,4 +181,6 @@ AVFilter avfilter_vf_pp = {
.inputs = pp_inputs,
.outputs = pp_outputs,
.process_command = pp_process_command,
+ .priv_class = &pp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_removelogo.c b/ffmpeg/libavfilter/vf_removelogo.c
index e3da197..01a585c 100644
--- a/ffmpeg/libavfilter/vf_removelogo.c
+++ b/ffmpeg/libavfilter/vf_removelogo.c
@@ -70,6 +70,7 @@
*/
#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -79,6 +80,8 @@
#include "lswsutils.h"
typedef struct {
+ const AVClass *class;
+ char *filename;
/* Stores our collection of masks. The first is for an array of
the second for the y axis, and the third for the x axis. */
int ***mask;
@@ -91,6 +94,16 @@ typedef struct {
FFBoundingBox half_mask_bbox;
} RemovelogoContext;
+#define OFFSET(x) offsetof(RemovelogoContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption removelogo_options[] = {
+ { "filename", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "f", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(removelogo);
+
/**
* Choose a slightly larger mask size to improve performance.
*
@@ -141,7 +154,7 @@ static void convert_mask_to_strength_mask(uint8_t *data, int linesize,
while (1) {
/* If this doesn't get set by the end of this pass, then we're done. */
int has_anything_changed = 0;
- uint8_t *current_pixel0 = data, *current_pixel;
+ uint8_t *current_pixel0 = data + 1 + linesize, *current_pixel;
current_pass++;
for (y = 1; y < h-1; y++) {
@@ -161,8 +174,8 @@ static void convert_mask_to_strength_mask(uint8_t *data, int linesize,
if ( *current_pixel >= current_pass &&
*(current_pixel + 1) >= current_pass &&
*(current_pixel - 1) >= current_pass &&
- *(current_pixel + w) >= current_pass &&
- *(current_pixel - w) >= current_pass) {
+ *(current_pixel + linesize) >= current_pass &&
+ *(current_pixel - linesize) >= current_pass) {
/* Increment the value since it still has not been
* eroded, as evidenced by the if statement that
* just evaluated to true. */
@@ -222,8 +235,8 @@ static int load_mask(uint8_t **mask, int *w, int *h,
av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h);
end:
- av_free(src_data[0]);
- av_free(gray_data[0]);
+ av_freep(&src_data[0]);
+ av_freep(&gray_data[0]);
return ret;
}
@@ -264,46 +277,46 @@ static void generate_half_size_image(const uint8_t *src_data, int src_linesize,
src_w/2, src_h/2, 0, max_mask_size);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
- RemovelogoContext *removelogo = ctx->priv;
+ RemovelogoContext *s = ctx->priv;
int ***mask;
int ret = 0;
int a, b, c, w, h;
int full_max_mask_size, half_max_mask_size;
- if (!args) {
- av_log(ctx, AV_LOG_ERROR, "An image file must be specified as argument\n");
+ if (!s->filename) {
+ av_log(ctx, AV_LOG_ERROR, "The bitmap file name is mandatory\n");
return AVERROR(EINVAL);
}
/* Load our mask image. */
- if ((ret = load_mask(&removelogo->full_mask_data, &w, &h, args, ctx)) < 0)
+ if ((ret = load_mask(&s->full_mask_data, &w, &h, s->filename, ctx)) < 0)
return ret;
- removelogo->mask_w = w;
- removelogo->mask_h = h;
+ s->mask_w = w;
+ s->mask_h = h;
- convert_mask_to_strength_mask(removelogo->full_mask_data, w, w, h,
+ convert_mask_to_strength_mask(s->full_mask_data, w, w, h,
16, &full_max_mask_size);
/* Create the scaled down mask image for the chroma planes. */
- if (!(removelogo->half_mask_data = av_mallocz(w/2 * h/2)))
+ if (!(s->half_mask_data = av_mallocz(w/2 * h/2)))
return AVERROR(ENOMEM);
- generate_half_size_image(removelogo->full_mask_data, w,
- removelogo->half_mask_data, w/2,
+ generate_half_size_image(s->full_mask_data, w,
+ s->half_mask_data, w/2,
w, h, &half_max_mask_size);
- removelogo->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
+ s->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
/* Create a circular mask for each size up to max_mask_size. When
the filter is applied, the mask size is determined on a pixel
by pixel basis, with pixels nearer the edge of the logo getting
smaller mask sizes. */
- mask = (int ***)av_malloc(sizeof(int **) * (removelogo->max_mask_size + 1));
+ mask = (int ***)av_malloc(sizeof(int **) * (s->max_mask_size + 1));
if (!mask)
return AVERROR(ENOMEM);
- for (a = 0; a <= removelogo->max_mask_size; a++) {
+ for (a = 0; a <= s->max_mask_size; a++) {
mask[a] = (int **)av_malloc(sizeof(int *) * ((a * 2) + 1));
if (!mask[a])
return AVERROR(ENOMEM);
@@ -319,17 +332,17 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
}
}
}
- removelogo->mask = mask;
+ s->mask = mask;
/* Calculate our bounding rectangles, which determine in what
* region the logo resides for faster processing. */
- ff_calculate_bounding_box(&removelogo->full_mask_bbox, removelogo->full_mask_data, w, w, h, 0);
- ff_calculate_bounding_box(&removelogo->half_mask_bbox, removelogo->half_mask_data, w/2, w/2, h/2, 0);
+ ff_calculate_bounding_box(&s->full_mask_bbox, s->full_mask_data, w, w, h, 0);
+ ff_calculate_bounding_box(&s->half_mask_bbox, s->half_mask_data, w/2, w/2, h/2, 0);
#define SHOW_LOGO_INFO(mask_type) \
av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \
- removelogo->mask_type##_mask_bbox.x1, removelogo->mask_type##_mask_bbox.x2, \
- removelogo->mask_type##_mask_bbox.y1, removelogo->mask_type##_mask_bbox.y2, \
+ s->mask_type##_mask_bbox.x1, s->mask_type##_mask_bbox.x2, \
+ s->mask_type##_mask_bbox.y1, s->mask_type##_mask_bbox.y2, \
mask_type##_max_mask_size);
SHOW_LOGO_INFO(full);
SHOW_LOGO_INFO(half);
@@ -340,12 +353,12 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
static int config_props_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- RemovelogoContext *removelogo = ctx->priv;
+ RemovelogoContext *s = ctx->priv;
- if (inlink->w != removelogo->mask_w || inlink->h != removelogo->mask_h) {
+ if (inlink->w != s->mask_w || inlink->h != s->mask_h) {
av_log(ctx, AV_LOG_INFO,
"Mask image size %dx%d does not match with the input video size %dx%d\n",
- removelogo->mask_w, removelogo->mask_h, inlink->w, inlink->h);
+ s->mask_w, s->mask_h, inlink->w, inlink->h);
return AVERROR(EINVAL);
}
@@ -475,7 +488,7 @@ static void blur_image(int ***mask,
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
- RemovelogoContext *removelogo = inlink->dst->priv;
+ RemovelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpicref;
int direct = 0;
@@ -492,21 +505,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
av_frame_copy_props(outpicref, inpicref);
}
- blur_image(removelogo->mask,
+ blur_image(s->mask,
inpicref ->data[0], inpicref ->linesize[0],
outpicref->data[0], outpicref->linesize[0],
- removelogo->full_mask_data, inlink->w,
- inlink->w, inlink->h, direct, &removelogo->full_mask_bbox);
- blur_image(removelogo->mask,
+ s->full_mask_data, inlink->w,
+ inlink->w, inlink->h, direct, &s->full_mask_bbox);
+ blur_image(s->mask,
inpicref ->data[1], inpicref ->linesize[1],
outpicref->data[1], outpicref->linesize[1],
- removelogo->half_mask_data, inlink->w/2,
- inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
- blur_image(removelogo->mask,
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
+ blur_image(s->mask,
inpicref ->data[2], inpicref ->linesize[2],
outpicref->data[2], outpicref->linesize[2],
- removelogo->half_mask_data, inlink->w/2,
- inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
if (!direct)
av_frame_free(&inpicref);
@@ -514,35 +527,34 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
return ff_filter_frame(outlink, outpicref);
}
-static void uninit(AVFilterContext *ctx)
+static av_cold void uninit(AVFilterContext *ctx)
{
- RemovelogoContext *removelogo = ctx->priv;
+ RemovelogoContext *s = ctx->priv;
int a, b;
- av_freep(&removelogo->full_mask_data);
- av_freep(&removelogo->half_mask_data);
+ av_freep(&s->full_mask_data);
+ av_freep(&s->half_mask_data);
- if (removelogo->mask) {
+ if (s->mask) {
/* Loop through each mask. */
- for (a = 0; a <= removelogo->max_mask_size; a++) {
+ for (a = 0; a <= s->max_mask_size; a++) {
/* Loop through each scanline in a mask. */
for (b = -a; b <= a; b++) {
- av_free(removelogo->mask[a][b + a]); /* Free a scanline. */
+ av_freep(&s->mask[a][b + a]); /* Free a scanline. */
}
- av_free(removelogo->mask[a]);
+ av_freep(&s->mask[a]);
}
/* Free the array of pointers pointing to the masks. */
- av_freep(&removelogo->mask);
+ av_freep(&s->mask);
}
}
static const AVFilterPad removelogo_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_props_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -555,7 +567,7 @@ static const AVFilterPad removelogo_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_removelogo = {
+AVFilter ff_vf_removelogo = {
.name = "removelogo",
.description = NULL_IF_CONFIG_SMALL("Remove a TV logo based on a mask image."),
.priv_size = sizeof(RemovelogoContext),
@@ -564,4 +576,6 @@ AVFilter avfilter_vf_removelogo = {
.query_formats = query_formats,
.inputs = removelogo_inputs,
.outputs = removelogo_outputs,
+ .priv_class = &removelogo_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_scale.c b/ffmpeg/libavfilter/vf_scale.c
index 4cc6aab..2e692cf 100644
--- a/ffmpeg/libavfilter/vf_scale.c
+++ b/ffmpeg/libavfilter/vf_scale.c
@@ -51,6 +51,8 @@ static const char *const var_names[] = {
"dar",
"hsub",
"vsub",
+ "ohsub",
+ "ovsub",
NULL
};
@@ -64,6 +66,8 @@ enum var_name {
VAR_DAR,
VAR_HSUB,
VAR_VSUB,
+ VAR_OHSUB,
+ VAR_OVSUB,
VARS_NB
};
@@ -71,6 +75,7 @@ typedef struct {
const AVClass *class;
struct SwsContext *sws; ///< software scaler context
struct SwsContext *isws[2]; ///< software scaler context for interlaced material
+ AVDictionary *opts;
/**
* New dimensions. Special values are:
@@ -78,7 +83,6 @@ typedef struct {
* -1 = keep original aspect
*/
int w, h;
- char *flags_str; ///sws flags string
char *size_str;
unsigned int flags; ///sws flags
@@ -90,45 +94,26 @@ typedef struct {
char *w_expr; ///< width expression string
char *h_expr; ///< height expression string
-} ScaleContext;
+ char *flags_str;
-#define OFFSET(x) offsetof(ScaleContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+ char *in_color_matrix;
+ char *out_color_matrix;
-static const AVOption scale_options[] = {
- { "w", "set width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "width", "set width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "h", "set height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "height", "set height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "flags", "set libswscale flags", OFFSET(flags_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, INT_MAX, FLAGS },
- { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS },
- { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
- { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
- { NULL },
-};
+ int in_range;
+ int out_range;
-AVFILTER_DEFINE_CLASS(scale);
+ int out_h_chr_pos;
+ int out_v_chr_pos;
+ int in_h_chr_pos;
+ int in_v_chr_pos;
-static av_cold int init(AVFilterContext *ctx, const char *args)
+ int force_original_aspect_ratio;
+} ScaleContext;
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
ScaleContext *scale = ctx->priv;
- static const char *shorthand[] = { "w", "h", NULL };
int ret;
- const char *args0 = args;
-
- scale->class = &scale_class;
- av_opt_set_defaults(scale);
-
- if (args && (scale->size_str = av_get_token(&args, ":"))) {
- if (av_parse_video_size(&scale->w, &scale->h, scale->size_str) < 0) {
- av_freep(&scale->size_str);
- args = args0;
- } else if (*args)
- args++;
- }
-
- if ((ret = av_opt_set_from_string(scale, args, shorthand, "=", ":")) < 0)
- return ret;
if (scale->size_str && (scale->w_expr || scale->h_expr)) {
av_log(ctx, AV_LOG_ERROR,
@@ -136,6 +121,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return AVERROR(EINVAL);
}
+ if (scale->w_expr && !scale->h_expr)
+ FFSWAP(char *, scale->w_expr, scale->size_str);
+
if (scale->size_str) {
char buf[32];
if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
@@ -156,7 +144,8 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
- scale->flags = SWS_BILINEAR;
+ scale->flags = 0;
+
if (scale->flags_str) {
const AVClass *class = sws_get_class();
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
@@ -165,6 +154,8 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
if (ret < 0)
return ret;
}
+ scale->opts = *opts;
+ *opts = NULL;
return 0;
}
@@ -176,7 +167,7 @@ static av_cold void uninit(AVFilterContext *ctx)
sws_freeContext(scale->isws[0]);
sws_freeContext(scale->isws[1]);
scale->sws = NULL;
- av_opt_free(scale);
+ av_dict_free(&scale->opts);
}
static int query_formats(AVFilterContext *ctx)
@@ -188,7 +179,8 @@ static int query_formats(AVFilterContext *ctx)
if (ctx->inputs[0]) {
formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
- if ( sws_isSupportedInput(pix_fmt)
+ if ((sws_isSupportedInput(pix_fmt) ||
+ sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
return ret;
@@ -198,7 +190,8 @@ static int query_formats(AVFilterContext *ctx)
if (ctx->outputs[0]) {
formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
- if ( (sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8)
+ if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
+ sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
return ret;
@@ -209,6 +202,28 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
+static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
+{
+ if (!s)
+ s = "bt601";
+
+ if (s && strstr(s, "bt709")) {
+ colorspace = AVCOL_SPC_BT709;
+ } else if (s && strstr(s, "fcc")) {
+ colorspace = AVCOL_SPC_FCC;
+ } else if (s && strstr(s, "smpte240m")) {
+ colorspace = AVCOL_SPC_SMPTE240M;
+ } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
+ colorspace = AVCOL_SPC_BT470BG;
+ }
+
+ if (colorspace < 1 || colorspace > 7) {
+ colorspace = AVCOL_SPC_BT470BG;
+ }
+
+ return sws_getCoefficients(colorspace);
+}
+
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -216,6 +231,7 @@ static int config_props(AVFilterLink *outlink)
enum AVPixelFormat outfmt = outlink->format;
ScaleContext *scale = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
int64_t w, h;
double var_values[VARS_NB], res;
char *expr;
@@ -231,6 +247,8 @@ static int config_props(AVFilterLink *outlink)
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
+ var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
+ var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = scale->w_expr),
@@ -269,6 +287,19 @@ static int config_props(AVFilterLink *outlink)
if (h == -1)
h = av_rescale(w, inlink->h, inlink->w);
+ if (scale->force_original_aspect_ratio) {
+ int tmp_w = av_rescale(h, inlink->w, inlink->h);
+ int tmp_h = av_rescale(w, inlink->h, inlink->w);
+
+ if (scale->force_original_aspect_ratio == 1) {
+ w = FFMIN(tmp_w, w);
+ h = FFMIN(tmp_h, h);
+ } else {
+ w = FFMAX(tmp_w, w);
+ h = FFMAX(tmp_h, h);
+ }
+ }
+
if (w > INT_MAX || h > INT_MAX ||
(h * inlink->w) > INT_MAX ||
(w * inlink->h) > INT_MAX)
@@ -279,33 +310,59 @@ static int config_props(AVFilterLink *outlink)
/* TODO: make algorithm configurable */
- scale->input_is_pal = desc->flags & PIX_FMT_PAL ||
- desc->flags & PIX_FMT_PSEUDOPAL;
+ scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
- scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PAL ||
- av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PSEUDOPAL;
+ scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
+ av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
if (scale->sws)
sws_freeContext(scale->sws);
+ if (scale->isws[0])
+ sws_freeContext(scale->isws[0]);
+ if (scale->isws[1])
+ sws_freeContext(scale->isws[1]);
+ scale->isws[0] = scale->isws[1] = scale->sws = NULL;
if (inlink->w == outlink->w && inlink->h == outlink->h &&
inlink->format == outlink->format)
- scale->sws = NULL;
+ ;
else {
- scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format,
- outlink->w, outlink->h, outfmt,
- scale->flags, NULL, NULL, NULL);
- if (scale->isws[0])
- sws_freeContext(scale->isws[0]);
- scale->isws[0] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
- outlink->w, outlink->h/2, outfmt,
- scale->flags, NULL, NULL, NULL);
- if (scale->isws[1])
- sws_freeContext(scale->isws[1]);
- scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
- outlink->w, outlink->h/2, outfmt,
- scale->flags, NULL, NULL, NULL);
- if (!scale->sws || !scale->isws[0] || !scale->isws[1])
- return AVERROR(EINVAL);
+ struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ struct SwsContext **s = swscs[i];
+ *s = sws_alloc_context();
+ if (!*s)
+ return AVERROR(ENOMEM);
+
+ if (scale->opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
+ return ret;
+ }
+ }
+
+ av_opt_set_int(*s, "srcw", inlink ->w, 0);
+ av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0);
+ av_opt_set_int(*s, "src_format", inlink->format, 0);
+ av_opt_set_int(*s, "dstw", outlink->w, 0);
+ av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
+ av_opt_set_int(*s, "dst_format", outfmt, 0);
+ av_opt_set_int(*s, "sws_flags", scale->flags, 0);
+
+ av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
+ av_opt_set_int(*s, "src_v_chr_pos", scale->in_v_chr_pos, 0);
+ av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
+ av_opt_set_int(*s, "dst_v_chr_pos", scale->out_v_chr_pos, 0);
+
+ if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
+ return ret;
+ if (!scale->interlaced)
+ break;
+ }
}
if (inlink->sample_aspect_ratio.num){
@@ -360,6 +417,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
AVFrame *out;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
char buf[32];
+ int in_range;
if( in->width != link->w
|| in->height != link->h
@@ -397,6 +455,45 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
if(scale->output_is_pal)
avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
+ in_range = av_frame_get_color_range(in);
+
+ if ( scale->in_color_matrix
+ || scale->out_color_matrix
+ || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
+ || in_range != AVCOL_RANGE_UNSPECIFIED
+ || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
+ int in_full, out_full, brightness, contrast, saturation;
+ const int *inv_table, *table;
+
+ sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
+ (int **)&table, &out_full,
+ &brightness, &contrast, &saturation);
+
+ if (scale->in_color_matrix)
+ inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
+ if (scale->out_color_matrix)
+ table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
+
+ if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
+ in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
+ else if (in_range != AVCOL_RANGE_UNSPECIFIED)
+ in_full = (in_range == AVCOL_RANGE_JPEG);
+ if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
+ out_full = (scale->out_range == AVCOL_RANGE_JPEG);
+
+ sws_setColorspaceDetails(scale->sws, inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ if (scale->isws[0])
+ sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ if (scale->isws[1])
+ sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ }
+
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
@@ -413,10 +510,56 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
return ff_filter_frame(outlink, out);
}
+static const AVClass *child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : sws_get_class();
+}
+
+#define OFFSET(x) offsetof(ScaleContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption scale_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
+ { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS },
+ { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
+ { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
+ { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
+ { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
+ { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
+ { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
+ { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
+ { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
+ { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
+ { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
+ { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
+ { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
+ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
+ { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
+ { NULL }
+};
+
+static const AVClass scale_class = {
+ .class_name = "scale",
+ .item_name = av_default_item_name,
+ .option = scale_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_class_next = child_class_next,
+};
+
static const AVFilterPad avfilter_vf_scale_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -431,18 +574,14 @@ static const AVFilterPad avfilter_vf_scale_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_scale = {
- .name = "scale",
- .description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."),
-
- .init = init,
- .uninit = uninit,
-
+AVFilter ff_vf_scale = {
+ .name = "scale",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
+ .init_dict = init_dict,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .priv_size = sizeof(ScaleContext),
-
- .inputs = avfilter_vf_scale_inputs,
- .outputs = avfilter_vf_scale_outputs,
- .priv_class = &scale_class,
+ .priv_size = sizeof(ScaleContext),
+ .priv_class = &scale_class,
+ .inputs = avfilter_vf_scale_inputs,
+ .outputs = avfilter_vf_scale_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_setfield.c b/ffmpeg/libavfilter/vf_setfield.c
index bb97a84..eb4df74 100644
--- a/ffmpeg/libavfilter/vf_setfield.c
+++ b/ffmpeg/libavfilter/vf_setfield.c
@@ -54,23 +54,6 @@ static const AVOption setfield_options[] = {
AVFILTER_DEFINE_CLASS(setfield);
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- SetFieldContext *setfield = ctx->priv;
- static const char *shorthand[] = { "mode", NULL };
-
- setfield->class = &setfield_class;
- av_opt_set_defaults(setfield);
-
- return av_opt_set_from_string(setfield, args, shorthand, "=", ":");
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- SetFieldContext *setfield = ctx->priv;
- av_opt_free(setfield);
-}
-
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
SetFieldContext *setfield = inlink->dst->priv;
@@ -86,10 +69,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
static const AVFilterPad setfield_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -102,14 +84,11 @@ static const AVFilterPad setfield_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_setfield = {
- .name = "setfield",
+AVFilter ff_vf_setfield = {
+ .name = "setfield",
.description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."),
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(SetFieldContext),
- .inputs = setfield_inputs,
- .outputs = setfield_outputs,
- .priv_class = &setfield_class,
+ .priv_size = sizeof(SetFieldContext),
+ .priv_class = &setfield_class,
+ .inputs = setfield_inputs,
+ .outputs = setfield_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_showinfo.c b/ffmpeg/libavfilter/vf_showinfo.c
index dd97843..ade3e1a 100644
--- a/ffmpeg/libavfilter/vf_showinfo.c
+++ b/ffmpeg/libavfilter/vf_showinfo.c
@@ -31,22 +31,17 @@
#include "internal.h"
#include "video.h"
-typedef struct {
- unsigned int frame;
-} ShowInfoContext;
-
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- ShowInfoContext *showinfo = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint32_t plane_checksum[4] = {0}, checksum = 0;
int i, plane, vsub = desc->log2_chroma_h;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
uint8_t *data = frame->data[plane];
- int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;
+ int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
if (linesize < 0)
return linesize;
@@ -59,10 +54,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
av_log(ctx, AV_LOG_INFO,
- "n:%d pts:%s pts_time:%s pos:%"PRId64" "
+ "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
"checksum:%08X plane_checksum:[%08X",
- showinfo->frame,
+ inlink->frame_count,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
desc->name,
frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
@@ -73,20 +68,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
av_get_picture_type_char(frame->pict_type),
checksum, plane_checksum[0]);
- for (plane = 1; plane < 4 && frame->data[plane]; plane++)
+ for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
av_log(ctx, AV_LOG_INFO, "]\n");
- showinfo->frame++;
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
static const AVFilterPad avfilter_vf_showinfo_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -99,13 +92,9 @@ static const AVFilterPad avfilter_vf_showinfo_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_showinfo = {
+AVFilter ff_vf_showinfo = {
.name = "showinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."),
-
- .priv_size = sizeof(ShowInfoContext),
-
- .inputs = avfilter_vf_showinfo_inputs,
-
- .outputs = avfilter_vf_showinfo_outputs,
+ .inputs = avfilter_vf_showinfo_inputs,
+ .outputs = avfilter_vf_showinfo_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_smartblur.c b/ffmpeg/libavfilter/vf_smartblur.c
index f31cb8a..114ac6f 100644
--- a/ffmpeg/libavfilter/vf_smartblur.c
+++ b/ffmpeg/libavfilter/vf_smartblur.c
@@ -82,21 +82,9 @@ static const AVOption smartblur_options[] = {
AVFILTER_DEFINE_CLASS(smartblur);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
SmartblurContext *sblur = ctx->priv;
- int ret;
- static const char *shorthand[] = {
- "luma_radius", "luma_strength", "luma_threshold",
- "chroma_radius", "chroma_strength", "chroma_threshold",
- NULL
- };
-
- sblur->class = &smartblur_class;
- av_opt_set_defaults(sblur);
-
- if ((ret = av_opt_set_from_string(sblur, args, shorthand, "=", ":")) < 0)
- return ret;
/* make chroma default to luma values, if not explicitly set */
if (sblur->chroma.radius < RADIUS_MIN)
@@ -111,7 +99,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
av_log(ctx, AV_LOG_VERBOSE,
"luma_radius:%f luma_strength:%f luma_threshold:%d "
- "chroma_radius:%f chroma_strength:%f chroma_threshold:%d ",
+ "chroma_radius:%f chroma_strength:%f chroma_threshold:%d\n",
sblur->luma.radius, sblur->luma.strength, sblur->luma.threshold,
sblur->chroma.radius, sblur->chroma.strength, sblur->chroma.threshold);
@@ -124,7 +112,6 @@ static av_cold void uninit(AVFilterContext *ctx)
sws_freeContext(sblur->luma.filter_context);
sws_freeContext(sblur->chroma.filter_context);
- av_opt_free(sblur);
}
static int query_formats(AVFilterContext *ctx)
@@ -179,7 +166,8 @@ static int config_props(AVFilterLink *inlink)
alloc_sws_context(&sblur->luma, inlink->w, inlink->h, sblur->sws_flags);
alloc_sws_context(&sblur->chroma,
- inlink->w >> sblur->hsub, inlink->h >> sblur->vsub,
+ FF_CEIL_RSHIFT(inlink->w, sblur->hsub),
+ FF_CEIL_RSHIFT(inlink->h, sblur->vsub),
sblur->sws_flags);
return 0;
@@ -254,8 +242,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
SmartblurContext *sblur = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpic;
- int cw = inlink->w >> sblur->hsub;
- int ch = inlink->h >> sblur->vsub;
+ int cw = FF_CEIL_RSHIFT(inlink->w, sblur->hsub);
+ int ch = FF_CEIL_RSHIFT(inlink->h, sblur->vsub);
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpic) {
@@ -302,16 +290,15 @@ static const AVFilterPad smartblur_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_smartblur = {
- .name = "smartblur",
- .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
-
- .priv_size = sizeof(SmartblurContext),
-
+AVFilter ff_vf_smartblur = {
+ .name = "smartblur",
+ .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
+ .priv_size = sizeof(SmartblurContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = smartblur_inputs,
.outputs = smartblur_outputs,
.priv_class = &smartblur_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_stereo3d.c b/ffmpeg/libavfilter/vf_stereo3d.c
index 156470f..2140120 100644
--- a/ffmpeg/libavfilter/vf_stereo3d.c
+++ b/ffmpeg/libavfilter/vf_stereo3d.c
@@ -25,6 +25,7 @@
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
@@ -56,6 +57,8 @@ enum StereoCode {
ABOVE_BELOW_RL, // above-below (right eye above, left eye below)
ABOVE_BELOW_2_LR, // above-below with half height resolution
ABOVE_BELOW_2_RL, // above-below with half height resolution
+ ALTERNATING_LR, // alternating frames (left eye first, right eye second)
+ ALTERNATING_RL, // alternating frames (right eye first, left eye second)
STEREO_CODE_COUNT // TODO: needs autodetection
};
@@ -63,6 +66,7 @@ typedef struct StereoComponent {
enum StereoCode format;
int width, height;
int off_left, off_right;
+ int off_lstep, off_rstep;
int row_left, row_right;
} StereoComponent;
@@ -130,18 +134,27 @@ typedef struct Stereo3DContext {
StereoComponent in, out;
int width, height;
int row_step;
- int ana_matrix[3][6];
+ const int *ana_matrix[3];
+ int nb_planes;
+ int linesize[4];
+ int pheight[4];
+ int hsub, vsub;
+ int pixstep[4];
+ AVFrame *prev;
+ double ts_unit;
} Stereo3DContext;
#define OFFSET(x) offsetof(Stereo3DContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption stereo3d_options[] = {
- { "in", "set input format", OFFSET(in.format), AV_OPT_TYPE_INT, {.i64=SIDE_BY_SIDE_LR}, SIDE_BY_SIDE_LR, ABOVE_BELOW_2_RL, FLAGS, "in"},
+ { "in", "set input format", OFFSET(in.format), AV_OPT_TYPE_INT, {.i64=SIDE_BY_SIDE_LR}, SIDE_BY_SIDE_LR, STEREO_CODE_COUNT-1, FLAGS, "in"},
{ "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "in" },
{ "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "in" },
{ "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "in" },
{ "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "in" },
+ { "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "in" },
+ { "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "in" },
{ "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "in" },
{ "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "in" },
{ "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "in" },
@@ -155,6 +168,8 @@ static const AVOption stereo3d_options[] = {
{ "agmd", "anaglyph green magenta dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_DUBOIS}, 0, 0, FLAGS, "out" },
{ "agmg", "anaglyph green magenta gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_GRAY}, 0, 0, FLAGS, "out" },
{ "agmh", "anaglyph green magenta half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_HALF}, 0, 0, FLAGS, "out" },
+ { "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "out" },
+ { "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "out" },
{ "arbg", "anaglyph red blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RB_GRAY}, 0, 0, FLAGS, "out" },
{ "arcc", "anaglyph red cyan color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_COLOR}, 0, 0, FLAGS, "out" },
{ "arcd", "anaglyph red cyan dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_DUBOIS}, 0, 0, FLAGS, "out" },
@@ -173,16 +188,95 @@ static const AVOption stereo3d_options[] = {
{ "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "out" },
{ "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "out" },
{ "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "out" },
- {NULL},
+ { NULL }
};
AVFILTER_DEFINE_CLASS(stereo3d);
+static const enum AVPixelFormat anaglyph_pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat other_pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
+ AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
+ AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
+ AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9BE, AV_PIX_FMT_GBRP9LE,
+ AV_PIX_FMT_GBRP10BE, AV_PIX_FMT_GBRP10LE,
+ AV_PIX_FMT_GBRP12BE, AV_PIX_FMT_GBRP12LE,
+ AV_PIX_FMT_GBRP14BE, AV_PIX_FMT_GBRP14LE,
+ AV_PIX_FMT_GBRP16BE, AV_PIX_FMT_GBRP16LE,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P9LE, AV_PIX_FMT_YUVA420P9LE,
+ AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUVA420P9BE,
+ AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUVA422P9LE,
+ AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUVA422P9BE,
+ AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUVA444P9LE,
+ AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUVA444P9BE,
+ AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUVA420P10LE,
+ AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUVA420P10BE,
+ AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUVA422P10LE,
+ AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUVA422P10BE,
+ AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUVA444P10LE,
+ AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUVA444P10BE,
+ AV_PIX_FMT_YUV420P12BE, AV_PIX_FMT_YUV420P12LE,
+ AV_PIX_FMT_YUV422P12BE, AV_PIX_FMT_YUV422P12LE,
+ AV_PIX_FMT_YUV444P12BE, AV_PIX_FMT_YUV444P12LE,
+ AV_PIX_FMT_YUV420P14BE, AV_PIX_FMT_YUV420P14LE,
+ AV_PIX_FMT_YUV422P14BE, AV_PIX_FMT_YUV422P14LE,
+ AV_PIX_FMT_YUV444P14BE, AV_PIX_FMT_YUV444P14LE,
+ AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
+ AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
+ AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
+ AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
+ AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
+ AV_PIX_FMT_NONE
+};
+
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE
- };
+ Stereo3DContext *s = ctx->priv;
+ const enum AVPixelFormat *pix_fmts;
+
+ switch (s->out.format) {
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ pix_fmts = anaglyph_pix_fmts;
+ break;
+ default:
+ pix_fmts = other_pix_fmts;
+ }
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
@@ -195,12 +289,46 @@ static int config_output(AVFilterLink *outlink)
AVFilterLink *inlink = ctx->inputs[0];
Stereo3DContext *s = ctx->priv;
AVRational aspect = inlink->sample_aspect_ratio;
+ AVRational fps = inlink->frame_rate;
+ AVRational tb = inlink->time_base;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ int ret;
+
+ switch (s->in.format) {
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_2_RL:
+ case SIDE_BY_SIDE_RL:
+ if (inlink->w & 1) {
+ av_log(ctx, AV_LOG_ERROR, "width must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_2_RL:
+ case ABOVE_BELOW_RL:
+ if (s->out.format == INTERLEAVE_ROWS_LR ||
+ s->out.format == INTERLEAVE_ROWS_RL) {
+ if (inlink->h & 3) {
+ av_log(ctx, AV_LOG_ERROR, "height must be multiple of 4\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ if (inlink->h & 1) {
+ av_log(ctx, AV_LOG_ERROR, "height must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
s->in.width =
s->width = inlink->w;
s->in.height =
s->height = inlink->h;
s->row_step = 1;
+ s->in.off_lstep =
+ s->in.off_rstep =
s->in.off_left =
s->in.off_right =
s->in.row_left =
@@ -211,13 +339,13 @@ static int config_output(AVFilterLink *outlink)
aspect.num *= 2;
case SIDE_BY_SIDE_LR:
s->width = inlink->w / 2;
- s->in.off_right = s->width * 3;
+ s->in.off_right = s->width;
break;
case SIDE_BY_SIDE_2_RL:
aspect.num *= 2;
case SIDE_BY_SIDE_RL:
s->width = inlink->w / 2;
- s->in.off_left = s->width * 3;
+ s->in.off_left = s->width;
break;
case ABOVE_BELOW_2_LR:
aspect.den *= 2;
@@ -231,6 +359,12 @@ static int config_output(AVFilterLink *outlink)
s->in.row_left =
s->height = inlink->h / 2;
break;
+ case ALTERNATING_RL:
+ case ALTERNATING_LR:
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ fps.den *= 2;
+ tb.num *= 2;
+ break;
default:
av_log(ctx, AV_LOG_ERROR, "input format %d is not supported\n", s->in.format);
return AVERROR(EINVAL);
@@ -238,6 +372,8 @@ static int config_output(AVFilterLink *outlink)
s->out.width = s->width;
s->out.height = s->height;
+ s->out.off_lstep =
+ s->out.off_rstep =
s->out.off_left =
s->out.off_right =
s->out.row_left =
@@ -257,29 +393,35 @@ static int config_output(AVFilterLink *outlink)
case ANAGLYPH_YB_GRAY:
case ANAGLYPH_YB_HALF:
case ANAGLYPH_YB_COLOR:
- case ANAGLYPH_YB_DUBOIS:
- memcpy(s->ana_matrix, ana_coeff[s->out.format], sizeof(s->ana_matrix));
+ case ANAGLYPH_YB_DUBOIS: {
+ uint8_t rgba_map[4];
+
+ ff_fill_rgba_map(rgba_map, outlink->format);
+ s->ana_matrix[rgba_map[0]] = &ana_coeff[s->out.format][0][0];
+ s->ana_matrix[rgba_map[1]] = &ana_coeff[s->out.format][1][0];
+ s->ana_matrix[rgba_map[2]] = &ana_coeff[s->out.format][2][0];
break;
+ }
case SIDE_BY_SIDE_2_LR:
- aspect.num /= 2;
+ aspect.den *= 2;
case SIDE_BY_SIDE_LR:
- s->out.width =
- s->out.off_right = s->width * 3;
+ s->out.width = s->width * 2;
+ s->out.off_right = s->width;
break;
case SIDE_BY_SIDE_2_RL:
- aspect.num /= 2;
+ aspect.den *= 2;
case SIDE_BY_SIDE_RL:
s->out.width = s->width * 2;
- s->out.off_left = s->width * 3;
+ s->out.off_left = s->width;
break;
case ABOVE_BELOW_2_LR:
- aspect.den /= 2;
+ aspect.num *= 2;
case ABOVE_BELOW_LR:
s->out.height = s->height * 2;
s->out.row_right = s->height;
break;
case ABOVE_BELOW_2_RL:
- aspect.den /= 2;
+ aspect.num *= 2;
case ABOVE_BELOW_RL:
s->out.height = s->height * 2;
s->out.row_left = s->height;
@@ -287,33 +429,50 @@ static int config_output(AVFilterLink *outlink)
case INTERLEAVE_ROWS_LR:
s->row_step = 2;
s->height = s->height / 2;
- s->out.off_right = s->width * 3;
- s->in.off_right += s->in.width * 3;
+ s->out.off_rstep =
+ s->in.off_rstep = 1;
break;
case INTERLEAVE_ROWS_RL:
s->row_step = 2;
s->height = s->height / 2;
- s->out.off_left = s->width * 3;
- s->in.off_left += s->in.width * 3;
+ s->out.off_lstep =
+ s->in.off_lstep = 1;
break;
case MONO_R:
s->in.off_left = s->in.off_right;
s->in.row_left = s->in.row_right;
case MONO_L:
break;
+ case ALTERNATING_RL:
+ case ALTERNATING_LR:
+ fps.num *= 2;
+ tb.den *= 2;
+ break;
default:
- av_log(ctx, AV_LOG_ERROR, "output format is not supported\n");
+ av_log(ctx, AV_LOG_ERROR, "output format %d is not supported\n", s->out.format);
return AVERROR(EINVAL);
}
outlink->w = s->out.width;
outlink->h = s->out.height;
+ outlink->frame_rate = fps;
+ outlink->time_base = tb;
outlink->sample_aspect_ratio = aspect;
+ if ((ret = av_image_fill_linesizes(s->linesize, outlink->format, s->width)) < 0)
+ return ret;
+ s->nb_planes = av_pix_fmt_count_planes(outlink->format);
+ av_image_fill_max_pixsteps(s->pixstep, NULL, desc);
+ s->ts_unit = av_q2d(av_inv_q(av_mul_q(outlink->frame_rate, outlink->time_base)));
+ s->pheight[1] = s->pheight[2] = FF_CEIL_RSHIFT(s->height, desc->log2_chroma_h);
+ s->pheight[0] = s->pheight[3] = s->height;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
return 0;
}
-static inline uint8_t ana_convert(const int *coeff, uint8_t *left, uint8_t *right)
+static inline uint8_t ana_convert(const int *coeff, const uint8_t *left, const uint8_t *right)
{
int sum;
@@ -329,24 +488,59 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
AVFilterContext *ctx = inlink->dst;
Stereo3DContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *out;
- int out_off_left, out_off_right;
- int in_off_left, in_off_right;
- int ret;
+ AVFrame *out, *oleft, *oright, *ileft, *iright;
+ int out_off_left[4], out_off_right[4];
+ int in_off_left[4], in_off_right[4];
+ int i;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ switch (s->in.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
+ if (!s->prev) {
+ s->prev = inpicref;
+ return 0;
+ }
+ ileft = s->prev;
+ iright = inpicref;
+ if (s->in.format == ALTERNATING_RL)
+ FFSWAP(AVFrame *, ileft, iright);
+ break;
+ default:
+ ileft = iright = inpicref;
+ };
+
+ out = oleft = oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
+ av_frame_free(&s->prev);
av_frame_free(&inpicref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, inpicref);
- in_off_left = s->in.row_left * inpicref->linesize[0] + s->in.off_left;
- in_off_right = s->in.row_right * inpicref->linesize[0] + s->in.off_right;
- out_off_left = s->out.row_left * out->linesize[0] + s->out.off_left;
- out_off_right = s->out.row_right * out->linesize[0] + s->out.off_right;
+ if (s->out.format == ALTERNATING_LR ||
+ s->out.format == ALTERNATING_RL) {
+ oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!oright) {
+ av_frame_free(&oleft);
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(oright, inpicref);
+ }
+
+ for (i = 0; i < 4; i++) {
+ int hsub = i == 1 || i == 2 ? s->hsub : 0;
+ int vsub = i == 1 || i == 2 ? s->vsub : 0;
+ in_off_left[i] = (FF_CEIL_RSHIFT(s->in.row_left, vsub) + s->in.off_lstep) * ileft->linesize[i] + FF_CEIL_RSHIFT(s->in.off_left * s->pixstep[i], hsub);
+ in_off_right[i] = (FF_CEIL_RSHIFT(s->in.row_right, vsub) + s->in.off_rstep) * iright->linesize[i] + FF_CEIL_RSHIFT(s->in.off_right * s->pixstep[i], hsub);
+ out_off_left[i] = (FF_CEIL_RSHIFT(s->out.row_left, vsub) + s->out.off_lstep) * oleft->linesize[i] + FF_CEIL_RSHIFT(s->out.off_left * s->pixstep[i], hsub);
+ out_off_right[i] = (FF_CEIL_RSHIFT(s->out.row_right, vsub) + s->out.off_rstep) * oright->linesize[i] + FF_CEIL_RSHIFT(s->out.off_right * s->pixstep[i], hsub);
+ }
switch (s->out.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
case SIDE_BY_SIDE_LR:
case SIDE_BY_SIDE_RL:
case SIDE_BY_SIDE_2_LR:
@@ -357,23 +551,28 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
case ABOVE_BELOW_2_RL:
case INTERLEAVE_ROWS_LR:
case INTERLEAVE_ROWS_RL:
- av_image_copy_plane(out->data[0] + out_off_left,
- out->linesize[0] * s->row_step,
- inpicref->data[0] + in_off_left,
- inpicref->linesize[0] * s->row_step,
- 3 * s->width, s->height);
- av_image_copy_plane(out->data[0] + out_off_right,
- out->linesize[0] * s->row_step,
- inpicref->data[0] + in_off_right,
- inpicref->linesize[0] * s->row_step,
- 3 * s->width, s->height);
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(oleft->data[i] + out_off_left[i],
+ oleft->linesize[i] * s->row_step,
+ ileft->data[i] + in_off_left[i],
+ ileft->linesize[i] * s->row_step,
+ s->linesize[i], s->pheight[i]);
+ av_image_copy_plane(oright->data[i] + out_off_right[i],
+ oright->linesize[i] * s->row_step,
+ iright->data[i] + in_off_right[i],
+ iright->linesize[i] * s->row_step,
+ s->linesize[i], s->pheight[i]);
+ }
break;
case MONO_L:
+ iright = ileft;
case MONO_R:
- av_image_copy_plane(out->data[0], out->linesize[0],
- inpicref->data[0] + in_off_left,
- inpicref->linesize[0],
- 3 * s->width, s->height);
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(out->data[i], out->linesize[i],
+ iright->data[i] + in_off_left[i],
+ iright->linesize[i],
+ s->linesize[i], s->pheight[i]);
+ }
break;
case ANAGLYPH_RB_GRAY:
case ANAGLYPH_RG_GRAY:
@@ -389,23 +588,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
case ANAGLYPH_YB_HALF:
case ANAGLYPH_YB_COLOR:
case ANAGLYPH_YB_DUBOIS: {
- int i, x, y, il, ir, o;
- uint8_t *src = inpicref->data[0];
+ int x, y, il, ir, o;
+ const uint8_t *lsrc = ileft->data[0];
+ const uint8_t *rsrc = iright->data[0];
uint8_t *dst = out->data[0];
int out_width = s->out.width;
- int *ana_matrix[3];
-
- for (i = 0; i < 3; i++)
- ana_matrix[i] = s->ana_matrix[i];
+ const int **ana_matrix = s->ana_matrix;
for (y = 0; y < s->out.height; y++) {
o = out->linesize[0] * y;
- il = in_off_left + y * inpicref->linesize[0];
- ir = in_off_right + y * inpicref->linesize[0];
+ il = in_off_left[0] + y * ileft->linesize[0];
+ ir = in_off_right[0] + y * iright->linesize[0];
for (x = 0; x < out_width; x++, il += 3, ir += 3, o+= 3) {
- dst[o ] = ana_convert(ana_matrix[0], src + il, src + ir);
- dst[o + 1] = ana_convert(ana_matrix[1], src + il, src + ir);
- dst[o + 2] = ana_convert(ana_matrix[2], src + il, src + ir);
+ dst[o ] = ana_convert(ana_matrix[0], lsrc + il, rsrc + ir);
+ dst[o + 1] = ana_convert(ana_matrix[1], lsrc + il, rsrc + ir);
+ dst[o + 2] = ana_convert(ana_matrix[2], lsrc + il, rsrc + ir);
}
}
break;
@@ -414,19 +611,34 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
av_assert0(0);
}
- ret = ff_filter_frame(outlink, out);
av_frame_free(&inpicref);
- if (ret < 0)
- return ret;
- return 0;
+ av_frame_free(&s->prev);
+ if (oright != oleft) {
+ if (s->out.format == ALTERNATING_LR)
+ FFSWAP(AVFrame *, oleft, oright);
+ oright->pts = outlink->frame_count * s->ts_unit;
+ ff_filter_frame(outlink, oright);
+ out = oleft;
+ oleft->pts = outlink->frame_count * s->ts_unit;
+ } else if (s->in.format == ALTERNATING_LR ||
+ s->in.format == ALTERNATING_RL) {
+ out->pts = outlink->frame_count * s->ts_unit;
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ Stereo3DContext *s = ctx->priv;
+
+ av_frame_free(&s->prev);
}
static const AVFilterPad stereo3d_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -440,15 +652,13 @@ static const AVFilterPad stereo3d_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "in", "out", NULL };
-
-AVFilter avfilter_vf_stereo3d = {
+AVFilter ff_vf_stereo3d = {
.name = "stereo3d",
.description = NULL_IF_CONFIG_SMALL("Convert video stereoscopic 3D view."),
.priv_size = sizeof(Stereo3DContext),
+ .uninit = uninit,
.query_formats = query_formats,
.inputs = stereo3d_inputs,
.outputs = stereo3d_outputs,
.priv_class = &stereo3d_class,
- .shorthand = shorthand,
};
diff --git a/ffmpeg/libavfilter/vf_subtitles.c b/ffmpeg/libavfilter/vf_subtitles.c
index e5d2e1c..e44f61d 100644
--- a/ffmpeg/libavfilter/vf_subtitles.c
+++ b/ffmpeg/libavfilter/vf_subtitles.c
@@ -85,17 +85,9 @@ static void ass_log(int ass_level, const char *fmt, va_list args, void *ctx)
av_log(ctx, level, "\n");
}
-static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class)
+static av_cold int init(AVFilterContext *ctx)
{
AssContext *ass = ctx->priv;
- static const char *shorthand[] = { "filename", NULL };
- int ret;
-
- ass->class = class;
- av_opt_set_defaults(ass);
-
- if ((ret = av_opt_set_from_string(ass, args, shorthand, "=", ":")) < 0)
- return ret;
if (!ass->filename) {
av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
@@ -123,7 +115,6 @@ static av_cold void uninit(AVFilterContext *ctx)
{
AssContext *ass = ctx->priv;
- av_opt_free(ass);
if (ass->track)
ass_free_track(ass->track);
if (ass->renderer)
@@ -219,10 +210,10 @@ static const AVOption ass_options[] = {
AVFILTER_DEFINE_CLASS(ass);
-static av_cold int init_ass(AVFilterContext *ctx, const char *args)
+static av_cold int init_ass(AVFilterContext *ctx)
{
AssContext *ass = ctx->priv;
- int ret = init(ctx, args, &ass_class);
+ int ret = init(ctx);
if (ret < 0)
return ret;
@@ -237,7 +228,7 @@ static av_cold int init_ass(AVFilterContext *ctx, const char *args)
return 0;
}
-AVFilter avfilter_vf_ass = {
+AVFilter ff_vf_ass = {
.name = "ass",
.description = NULL_IF_CONFIG_SMALL("Render ASS subtitles onto input video using the libass library."),
.priv_size = sizeof(AssContext),
@@ -260,7 +251,7 @@ static const AVOption subtitles_options[] = {
AVFILTER_DEFINE_CLASS(subtitles);
-static av_cold int init_subtitles(AVFilterContext *ctx, const char *args)
+static av_cold int init_subtitles(AVFilterContext *ctx)
{
int ret, sid;
AVDictionary *codec_opts = NULL;
@@ -273,7 +264,7 @@ static av_cold int init_subtitles(AVFilterContext *ctx, const char *args)
AssContext *ass = ctx->priv;
/* Init libass */
- ret = init(ctx, args, &subtitles_class);
+ ret = init(ctx);
if (ret < 0)
return ret;
ass->track = ass_new_track(ass->library);
@@ -311,7 +302,7 @@ static av_cold int init_subtitles(AVFilterContext *ctx, const char *args)
return AVERROR(EINVAL);
}
dec_desc = avcodec_descriptor_get(dec_ctx->codec_id);
- if (dec_desc && (dec_desc->props & AV_CODEC_PROP_BITMAP_SUB)) {
+ if (dec_desc && !(dec_desc->props & AV_CODEC_PROP_TEXT_SUB)) {
av_log(ctx, AV_LOG_ERROR,
"Only text based subtitles are currently supported\n");
return AVERROR_PATCHWELCOME;
@@ -332,7 +323,7 @@ static av_cold int init_subtitles(AVFilterContext *ctx, const char *args)
pkt.size = 0;
while (av_read_frame(fmt, &pkt) >= 0) {
int i, got_subtitle;
- AVSubtitle sub;
+ AVSubtitle sub = {0};
if (pkt.stream_index == sid) {
ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt);
@@ -361,7 +352,7 @@ end:
return ret;
}
-AVFilter avfilter_vf_subtitles = {
+AVFilter ff_vf_subtitles = {
.name = "subtitles",
.description = NULL_IF_CONFIG_SMALL("Render text subtitles onto input video using the libass library."),
.priv_size = sizeof(AssContext),
diff --git a/ffmpeg/libavfilter/vf_super2xsai.c b/ffmpeg/libavfilter/vf_super2xsai.c
index 4f25968..686dac1 100644
--- a/ffmpeg/libavfilter/vf_super2xsai.c
+++ b/ffmpeg/libavfilter/vf_super2xsai.c
@@ -342,10 +342,10 @@ static const AVFilterPad super2xsai_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_super2xsai = {
- .name = "super2xsai",
- .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
- .priv_size = sizeof(Super2xSaIContext),
+AVFilter ff_vf_super2xsai = {
+ .name = "super2xsai",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
+ .priv_size = sizeof(Super2xSaIContext),
.query_formats = query_formats,
.inputs = super2xsai_inputs,
.outputs = super2xsai_outputs,
diff --git a/ffmpeg/libavfilter/vf_swapuv.c b/ffmpeg/libavfilter/vf_swapuv.c
index 2ca97f9..71ae243 100644
--- a/ffmpeg/libavfilter/vf_swapuv.c
+++ b/ffmpeg/libavfilter/vf_swapuv.c
@@ -54,7 +54,7 @@ static int is_planar_yuv(const AVPixFmtDescriptor *desc)
{
int i;
- if (desc->flags & ~(PIX_FMT_BE | PIX_FMT_PLANAR | PIX_FMT_ALPHA) ||
+ if (desc->flags & ~(AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA) ||
desc->nb_components < 3 ||
(desc->comp[1].depth_minus1 != desc->comp[2].depth_minus1))
return 0;
@@ -101,10 +101,9 @@ static const AVFilterPad swapuv_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_swapuv = {
- .name = "swapuv",
- .description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
- .priv_size = 0,
+AVFilter ff_vf_swapuv = {
+ .name = "swapuv",
+ .description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
.query_formats = query_formats,
.inputs = swapuv_inputs,
.outputs = swapuv_outputs,
diff --git a/ffmpeg/libavfilter/vf_thumbnail.c b/ffmpeg/libavfilter/vf_thumbnail.c
index b62bef6..1883154 100644
--- a/ffmpeg/libavfilter/vf_thumbnail.c
+++ b/ffmpeg/libavfilter/vf_thumbnail.c
@@ -27,6 +27,7 @@
* @see http://notbrainsurgery.livejournal.com/29773.html
*/
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
@@ -38,27 +39,27 @@ struct thumb_frame {
};
typedef struct {
+ const AVClass *class;
int n; ///< current frame
int n_frames; ///< number of frames for analysis
struct thumb_frame *frames; ///< the n_frames frames
AVRational tb; ///< copy of the input timebase to ease access
} ThumbContext;
-static av_cold int init(AVFilterContext *ctx, const char *args)
+#define OFFSET(x) offsetof(ThumbContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption thumbnail_options[] = {
+ { "n", "set the frames batch size", OFFSET(n_frames), AV_OPT_TYPE_INT, {.i64=100}, 2, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(thumbnail);
+
+static av_cold int init(AVFilterContext *ctx)
{
ThumbContext *thumb = ctx->priv;
- if (!args) {
- thumb->n_frames = 100;
- } else {
- int n = sscanf(args, "%d", &thumb->n_frames);
- if (n != 1 || thumb->n_frames < 2) {
- thumb->n_frames = 0;
- av_log(ctx, AV_LOG_ERROR,
- "Invalid number of frames specified (minimum is 2).\n");
- return AVERROR(EINVAL);
- }
- }
thumb->frames = av_calloc(thumb->n_frames, sizeof(*thumb->frames));
if (!thumb->frames) {
av_log(ctx, AV_LOG_ERROR,
@@ -208,11 +209,10 @@ static int query_formats(AVFilterContext *ctx)
static const AVFilterPad thumbnail_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -226,7 +226,7 @@ static const AVFilterPad thumbnail_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_thumbnail = {
+AVFilter ff_vf_thumbnail = {
.name = "thumbnail",
.description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."),
.priv_size = sizeof(ThumbContext),
@@ -235,4 +235,5 @@ AVFilter avfilter_vf_thumbnail = {
.query_formats = query_formats,
.inputs = thumbnail_inputs,
.outputs = thumbnail_outputs,
+ .priv_class = &thumbnail_class,
};
diff --git a/ffmpeg/libavfilter/vf_tile.c b/ffmpeg/libavfilter/vf_tile.c
index b45cbb8..786f4f6 100644
--- a/ffmpeg/libavfilter/vf_tile.c
+++ b/ffmpeg/libavfilter/vf_tile.c
@@ -41,6 +41,7 @@ typedef struct {
FFDrawContext draw;
FFDrawColor blank;
AVFrame *out_ref;
+ uint8_t rgba_color[4];
} TileContext;
#define REASONABLE_SIZE 1024
@@ -51,18 +52,19 @@ typedef struct {
static const AVOption tile_options[] = {
{ "layout", "set grid size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE,
{.str = "6x5"}, 0, 0, FLAGS },
+ { "nb_frames", "set maximum number of frame to render", OFFSET(nb_frames),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
{ "margin", "set outer border margin in pixels", OFFSET(margin),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
{ "padding", "set inner border thickness in pixels", OFFSET(padding),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
- { "nb_frames", "set maximum number of frame to render", OFFSET(nb_frames),
- AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
- {NULL},
+ { "color", "set the color of the unused area", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(tile);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
TileContext *tile = ctx->priv;
@@ -113,8 +115,9 @@ static int config_props(AVFilterLink *outlink)
outlink->frame_rate = av_mul_q(inlink->frame_rate,
(AVRational){ 1, tile->nb_frames });
ff_draw_init(&tile->draw, inlink->format, 0);
- /* TODO make the color an option, or find an unified way of choosing it */
- ff_draw_color(&tile->draw, &tile->blank, (uint8_t[]){ 0, 0, 0, -1 });
+ ff_draw_color(&tile->draw, &tile->blank, tile->rgba_color);
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
@@ -169,8 +172,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
if (!tile->current) {
tile->out_ref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!tile->out_ref)
+ if (!tile->out_ref) {
+ av_frame_free(&picref);
return AVERROR(ENOMEM);
+ }
av_frame_copy_props(tile->out_ref, picref);
tile->out_ref->width = outlink->w;
tile->out_ref->height = outlink->h;
@@ -203,16 +208,9 @@ static int request_frame(AVFilterLink *outlink)
AVFilterLink *inlink = ctx->inputs[0];
int r;
- while (1) {
- r = ff_request_frame(inlink);
- if (r < 0) {
- if (r == AVERROR_EOF && tile->current)
- r = end_last_frame(ctx);
- break;
- }
- if (!tile->current) /* done */
- break;
- }
+ r = ff_request_frame(inlink);
+ if (r == AVERROR_EOF && tile->current)
+ r = end_last_frame(ctx);
return r;
}
@@ -235,10 +233,7 @@ static const AVFilterPad tile_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] =
- { "layout", "nb_frames", "margin", "padding", NULL };
-
-AVFilter avfilter_vf_tile = {
+AVFilter ff_vf_tile = {
.name = "tile",
.description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."),
.init = init,
@@ -247,5 +242,4 @@ AVFilter avfilter_vf_tile = {
.inputs = tile_inputs,
.outputs = tile_outputs,
.priv_class = &tile_class,
- .shorthand = shorthand,
};
diff --git a/ffmpeg/libavfilter/vf_tinterlace.c b/ffmpeg/libavfilter/vf_tinterlace.c
index bce6301..db82393 100644
--- a/ffmpeg/libavfilter/vf_tinterlace.c
+++ b/ffmpeg/libavfilter/vf_tinterlace.c
@@ -87,8 +87,10 @@ static enum AVPixelFormat full_scale_yuvj_pix_fmts[] = {
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
AV_PIX_FMT_NONE
};
@@ -97,25 +99,12 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- TInterlaceContext *tinterlace = ctx->priv;
- static const char *shorthand[] = { "mode", NULL };
-
- tinterlace->class = &tinterlace_class;
- av_opt_set_defaults(tinterlace);
-
- return av_opt_set_from_string(tinterlace, args, shorthand, "=", ":");
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
TInterlaceContext *tinterlace = ctx->priv;
av_frame_free(&tinterlace->cur );
av_frame_free(&tinterlace->next);
-
- av_opt_free(tinterlace);
av_freep(&tinterlace->black_data[0]);
}
@@ -127,6 +116,7 @@ static int config_out_props(AVFilterLink *outlink)
TInterlaceContext *tinterlace = ctx->priv;
tinterlace->vsub = desc->log2_chroma_h;
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->w = inlink->w;
outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
inlink->h*2 : inlink->h;
@@ -143,7 +133,7 @@ static int config_out_props(AVFilterLink *outlink)
/* fill black picture with black */
for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
- int h = i == 1 || i == 2 ? outlink->h >> desc->log2_chroma_h : outlink->h;
+ int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
memset(tinterlace->black_data[i], black[i],
tinterlace->black_linesize[i] * h);
}
@@ -188,7 +178,7 @@ void copy_picture_field(uint8_t *dst[4], int dst_linesize[4],
int h, i;
for (plane = 0; plane < desc->nb_components; plane++) {
- int lines = plane == 1 || plane == 2 ? src_h >> vsub : src_h;
+ int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;
int linesize = av_image_get_linesize(format, w, plane);
uint8_t *dstp = dst[plane];
const uint8_t *srcp = src[plane];
@@ -196,7 +186,7 @@ void copy_picture_field(uint8_t *dst[4], int dst_linesize[4],
if (linesize < 0)
return;
- lines /= k;
+ lines = (lines + (src_field == FIELD_UPPER)) / k;
if (src_field == FIELD_LOWER)
srcp += src_linesize[plane];
if (interleave && dst_field == FIELD_LOWER)
@@ -214,7 +204,7 @@ void copy_picture_field(uint8_t *dst[4], int dst_linesize[4],
if (h == 1) srcp_below = srcp; // there is no line below
for (i = 0; i < linesize; i++) {
// this calculation is an integer representation of
- // '0.5 * current + 0.25 * above + 0.25 + below'
+ // '0.5 * current + 0.25 * above + 0.25 * below'
// '1 +' is for rounding. */
dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
}
@@ -273,12 +263,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
+ if (!out)
+ return AVERROR(ENOMEM);
av_frame_free(&tinterlace->next);
break;
case MODE_PAD: /* expand each frame to double height, but pad alternate
* lines with black; framerate unchanged */
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
av_frame_copy_props(out, cur);
out->height = outlink->h;
@@ -362,21 +356,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
return ret;
}
-static int request_frame(AVFilterLink *outlink)
-{
- TInterlaceContext *tinterlace = outlink->src->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
-
- do {
- int ret;
-
- if ((ret = ff_request_frame(inlink)) < 0)
- return ret;
- } while (!tinterlace->cur);
-
- return 0;
-}
-
static const AVFilterPad tinterlace_inputs[] = {
{
.name = "default",
@@ -388,19 +367,17 @@ static const AVFilterPad tinterlace_inputs[] = {
static const AVFilterPad tinterlace_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_out_props,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
},
{ NULL }
};
-AVFilter avfilter_vf_tinterlace = {
+AVFilter ff_vf_tinterlace = {
.name = "tinterlace",
.description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
.priv_size = sizeof(TInterlaceContext),
- .init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = tinterlace_inputs,
diff --git a/ffmpeg/libavfilter/vf_transpose.c b/ffmpeg/libavfilter/vf_transpose.c
index ed87017..7e471d4 100644
--- a/ffmpeg/libavfilter/vf_transpose.c
+++ b/ffmpeg/libavfilter/vf_transpose.c
@@ -28,10 +28,10 @@
#include <stdio.h>
#include "libavutil/intreadwrite.h"
-#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
+#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -43,36 +43,22 @@ typedef enum {
TRANSPOSE_PT_TYPE_PORTRAIT,
} PassthroughType;
+enum TransposeDir {
+ TRANSPOSE_CCLOCK_FLIP,
+ TRANSPOSE_CLOCK,
+ TRANSPOSE_CCLOCK,
+ TRANSPOSE_CLOCK_FLIP,
+};
+
typedef struct {
const AVClass *class;
int hsub, vsub;
int pixsteps[4];
- /* 0 Rotate by 90 degrees counterclockwise and vflip. */
- /* 1 Rotate by 90 degrees clockwise. */
- /* 2 Rotate by 90 degrees counterclockwise. */
- /* 3 Rotate by 90 degrees clockwise and vflip. */
- int dir;
PassthroughType passthrough; ///< landscape passthrough mode enabled
+ enum TransposeDir dir;
} TransContext;
-#define OFFSET(x) offsetof(TransContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption transpose_options[] = {
- { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, {.i64=0}, 0, 7, FLAGS },
-
- { "passthrough", "do not apply transposition if the input matches the specified geometry",
- OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
- { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
- { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
- { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
-
- { NULL },
-};
-
-AVFILTER_DEFINE_CLASS(transpose);
-
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *pix_fmts = NULL;
@@ -80,9 +66,9 @@ static int query_formats(AVFilterContext *ctx)
for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
- if (!(desc->flags & PIX_FMT_PAL ||
- desc->flags & PIX_FMT_HWACCEL ||
- desc->flags & PIX_FMT_BITSTREAM ||
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
desc->log2_chroma_w != desc->log2_chroma_h))
ff_add_format(&pix_fmts, fmt);
}
@@ -146,44 +132,34 @@ static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
ff_default_get_video_buffer(inlink, w, h);
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
{
- TransContext *trans = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
- AVFrame *out;
+ TransContext *trans = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *out = td->out;
+ AVFrame *in = td->in;
int plane;
- if (trans->passthrough)
- return ff_filter_frame(outlink, in);
-
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
-
- out->pts = in->pts;
-
- if (in->sample_aspect_ratio.num == 0) {
- out->sample_aspect_ratio = in->sample_aspect_ratio;
- } else {
- out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
- out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
- }
-
for (plane = 0; out->data[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
int pixstep = trans->pixsteps[plane];
int inh = in->height >> vsub;
- int outw = out->width >> hsub;
- int outh = out->height >> vsub;
+ int outw = FF_CEIL_RSHIFT(out->width, hsub);
+ int outh = FF_CEIL_RSHIFT(out->height, vsub);
+ int start = (outh * jobnr ) / nb_jobs;
+ int end = (outh * (jobnr+1)) / nb_jobs;
uint8_t *dst, *src;
int dstlinesize, srclinesize;
int x, y;
- dst = out->data[plane];
dstlinesize = out->linesize[plane];
+ dst = out->data[plane] + start * dstlinesize;
src = in->data[plane];
srclinesize = in->linesize[plane];
@@ -193,55 +169,114 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
if (trans->dir&2) {
- dst += out->linesize[plane] * (outh-1);
+ dst = out->data[plane] + dstlinesize * (outh-start-1);
dstlinesize *= -1;
}
- for (y = 0; y < outh; y++) {
- switch (pixstep) {
- case 1:
+ switch (pixstep) {
+ case 1:
+ for (y = start; y < end; y++, dst += dstlinesize)
for (x = 0; x < outw; x++)
dst[x] = src[x*srclinesize + y];
- break;
- case 2:
+ break;
+ case 2:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint16_t *)(dst + 2*x)) = *((uint16_t *)(src + x*srclinesize + y*2));
- break;
- case 3:
+ }
+ break;
+ case 3:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++) {
int32_t v = AV_RB24(src + x*srclinesize + y*3);
AV_WB24(dst + 3*x, v);
}
- break;
- case 4:
+ }
+ break;
+ case 4:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint32_t *)(dst + 4*x)) = *((uint32_t *)(src + x*srclinesize + y*4));
- break;
- case 6:
+ }
+ break;
+ case 6:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++) {
int64_t v = AV_RB48(src + x*srclinesize + y*6);
AV_WB48(dst + 6*x, v);
}
- break;
- case 8:
+ }
+ break;
+ case 8:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint64_t *)(dst + 8*x)) = *((uint64_t *)(src + x*srclinesize + y*8));
- break;
}
- dst += dstlinesize;
+ break;
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransContext *trans = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ if (trans->passthrough)
+ return ff_filter_frame(outlink, in);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (in->sample_aspect_ratio.num == 0) {
+ out->sample_aspect_ratio = in->sample_aspect_ratio;
+ } else {
+ out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
+ out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
+ }
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(TransContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption transpose_options[] = {
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 7, FLAGS, "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .unit = "dir" },
+ { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .unit = "dir" },
+ { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .unit = "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .unit = "dir" },
+
+ { "passthrough", "do not apply transposition if the input matches the specified geometry",
+ OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
+ { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+ { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+ { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(transpose);
+
static const AVFilterPad avfilter_vf_transpose_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer= get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = get_video_buffer,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -255,18 +290,13 @@ static const AVFilterPad avfilter_vf_transpose_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = { "dir", "passthrough", NULL };
-
-AVFilter avfilter_vf_transpose = {
- .name = "transpose",
- .description = NULL_IF_CONFIG_SMALL("Transpose input video."),
-
- .priv_size = sizeof(TransContext),
-
+AVFilter ff_vf_transpose = {
+ .name = "transpose",
+ .description = NULL_IF_CONFIG_SMALL("Transpose input video."),
+ .priv_size = sizeof(TransContext),
+ .priv_class = &transpose_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_transpose_inputs,
- .outputs = avfilter_vf_transpose_outputs,
- .priv_class = &transpose_class,
- .shorthand = shorthand,
+ .inputs = avfilter_vf_transpose_inputs,
+ .outputs = avfilter_vf_transpose_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/ffmpeg/libavfilter/vf_unsharp.c b/ffmpeg/libavfilter/vf_unsharp.c
index 84a14ee..b9f6821 100644
--- a/ffmpeg/libavfilter/vf_unsharp.c
+++ b/ffmpeg/libavfilter/vf_unsharp.c
@@ -36,69 +36,21 @@
* http://www.engin.umd.umich.edu/~jwvm/ece581/21_GBlur.pdf
*/
-#include <float.h> /* DBL_MAX */
-
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
-
-#define MIN_MATRIX_SIZE 3
-#define MAX_MATRIX_SIZE 63
-
-/* right-shift and round-up */
-#define SHIFTUP(x,shift) (-((-(x))>>(shift)))
-
-typedef struct FilterParam {
- int msize_x; ///< matrix width
- int msize_y; ///< matrix height
- int amount; ///< effect amount
- int steps_x; ///< horizontal step count
- int steps_y; ///< vertical step count
- int scalebits; ///< bits to shift pixel
- int32_t halfscale; ///< amount to add to pixel
- uint32_t *sc[MAX_MATRIX_SIZE - 1]; ///< finite state machine storage
-} FilterParam;
-
-typedef struct {
- const AVClass *class;
- FilterParam luma; ///< luma parameters (width, height, amount)
- FilterParam chroma; ///< chroma parameters (width, height, amount)
- int hsub, vsub;
- int luma_msize_x, luma_msize_y, chroma_msize_x, chroma_msize_y;
- double luma_amount, chroma_amount;
-} UnsharpContext;
-
-#define OFFSET(x) offsetof(UnsharpContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption unsharp_options[] = {
- { "luma_msize_x", "set luma matrix x size", OFFSET(luma_msize_x), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "lx", "set luma matrix x size", OFFSET(luma_msize_x), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "luma_msize_y", "set luma matrix y size", OFFSET(luma_msize_y), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "ly", "set luma matrix y size", OFFSET(luma_msize_y), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "luma_amount", "set luma effect amount", OFFSET(luma_amount), AV_OPT_TYPE_DOUBLE, {.dbl=1.0}, -DBL_MAX, DBL_MAX, .flags=FLAGS },
- { "la", "set luma effect amount", OFFSET(luma_amount), AV_OPT_TYPE_DOUBLE, {.dbl=1.0}, -DBL_MAX, DBL_MAX, .flags=FLAGS },
-
- { "chroma_msize_x", "set chroma matrix x size", OFFSET(chroma_msize_x), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "cx", "set chroma matrix x size", OFFSET(chroma_msize_x), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "chroma_msize_y", "set chroma matrix y size", OFFSET(chroma_msize_y), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "cy" , "set chroma matrix y size", OFFSET(chroma_msize_y), AV_OPT_TYPE_INT, {.i64=5}, 3, 63, .flags=FLAGS },
- { "chroma_amount", "set chroma effect strenght", OFFSET(chroma_amount), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -DBL_MAX, DBL_MAX, .flags=FLAGS },
- { "ca", "set chroma effect strenght", OFFSET(chroma_amount), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -DBL_MAX, DBL_MAX, .flags=FLAGS },
-
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(unsharp);
+#include "unsharp.h"
+#include "unsharp_opencl.h"
static void apply_unsharp( uint8_t *dst, int dst_stride,
const uint8_t *src, int src_stride,
- int width, int height, FilterParam *fp)
+ int width, int height, UnsharpFilterParam *fp)
{
uint32_t **sc = fp->sc;
uint32_t sr[MAX_MATRIX_SIZE - 1], tmp1, tmp2;
@@ -113,11 +65,7 @@ static void apply_unsharp( uint8_t *dst, int dst_stride,
const int32_t halfscale = fp->halfscale;
if (!amount) {
- if (dst_stride == src_stride)
- memcpy(dst, src, src_stride * height);
- else
- for (y = 0; y < height; y++, dst += dst_stride, src += src_stride)
- memcpy(dst, src, width);
+ av_image_copy_plane(dst, dst_stride, src, src_stride, width, height);
return;
}
@@ -154,7 +102,25 @@ static void apply_unsharp( uint8_t *dst, int dst_stride,
}
}
-static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, double amount)
+static int apply_unsharp_c(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ UnsharpContext *unsharp = ctx->priv;
+ int i, plane_w[3], plane_h[3];
+ UnsharpFilterParam *fp[3];
+ plane_w[0] = inlink->w;
+ plane_w[1] = plane_w[2] = FF_CEIL_RSHIFT(inlink->w, unsharp->hsub);
+ plane_h[0] = inlink->h;
+ plane_h[1] = plane_h[2] = FF_CEIL_RSHIFT(inlink->h, unsharp->vsub);
+ fp[0] = &unsharp->luma;
+ fp[1] = fp[2] = &unsharp->chroma;
+ for (i = 0; i < 3; i++) {
+ apply_unsharp(out->data[i], out->linesize[i], in->data[i], in->linesize[i], plane_w[i], plane_h[i], fp[i]);
+ }
+ return 0;
+}
+
+static void set_filter_param(UnsharpFilterParam *fp, int msize_x, int msize_y, float amount)
{
fp->msize_x = msize_x;
fp->msize_y = msize_y;
@@ -166,13 +132,26 @@ static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, double a
fp->halfscale = 1 << (fp->scalebits - 1);
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
+ int ret = 0;
UnsharpContext *unsharp = ctx->priv;
- set_filter_param(&unsharp->luma, unsharp->luma_msize_x, unsharp->luma_msize_y, unsharp->luma_amount);
- set_filter_param(&unsharp->chroma, unsharp->chroma_msize_x, unsharp->chroma_msize_y, unsharp->chroma_amount);
+ set_filter_param(&unsharp->luma, unsharp->lmsize_x, unsharp->lmsize_y, unsharp->lamount);
+ set_filter_param(&unsharp->chroma, unsharp->cmsize_x, unsharp->cmsize_y, unsharp->camount);
+
+ unsharp->apply_unsharp = apply_unsharp_c;
+ if (!CONFIG_OPENCL && unsharp->opencl) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
+ return AVERROR(EINVAL);
+ }
+ if (CONFIG_OPENCL && unsharp->opencl) {
+ unsharp->apply_unsharp = ff_opencl_apply_unsharp;
+ ret = ff_opencl_unsharp_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -189,7 +168,7 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static int init_filter_param(AVFilterContext *ctx, FilterParam *fp, const char *effect_type, int width)
+static int init_filter_param(AVFilterContext *ctx, UnsharpFilterParam *fp, const char *effect_type, int width)
{
int z;
const char *effect = fp->amount == 0 ? "none" : fp->amount < 0 ? "blur" : "sharpen";
@@ -223,14 +202,14 @@ static int config_props(AVFilterLink *link)
ret = init_filter_param(link->dst, &unsharp->luma, "luma", link->w);
if (ret < 0)
return ret;
- ret = init_filter_param(link->dst, &unsharp->chroma, "chroma", SHIFTUP(link->w, unsharp->hsub));
+ ret = init_filter_param(link->dst, &unsharp->chroma, "chroma", FF_CEIL_RSHIFT(link->w, unsharp->hsub));
if (ret < 0)
return ret;
return 0;
}
-static void free_filter_param(FilterParam *fp)
+static void free_filter_param(UnsharpFilterParam *fp)
{
int z;
@@ -242,6 +221,10 @@ static av_cold void uninit(AVFilterContext *ctx)
{
UnsharpContext *unsharp = ctx->priv;
+ if (CONFIG_OPENCL && unsharp->opencl) {
+ ff_opencl_unsharp_uninit(ctx);
+ }
+
free_filter_param(&unsharp->luma);
free_filter_param(&unsharp->chroma);
}
@@ -251,8 +234,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
UnsharpContext *unsharp = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
- int cw = SHIFTUP(link->w, unsharp->hsub);
- int ch = SHIFTUP(link->h, unsharp->vsub);
+ int ret = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -260,15 +242,44 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
+ if (CONFIG_OPENCL && unsharp->opencl) {
+ ret = ff_opencl_unsharp_process_inout_buf(link->dst, in, out);
+ if (ret < 0)
+ goto end;
+ }
- apply_unsharp(out->data[0], out->linesize[0], in->data[0], in->linesize[0], link->w, link->h, &unsharp->luma);
- apply_unsharp(out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, &unsharp->chroma);
- apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma);
-
+ ret = unsharp->apply_unsharp(link->dst, in, out);
+end:
av_frame_free(&in);
+
+ if (ret < 0)
+ return ret;
return ff_filter_frame(outlink, out);
}
+#define OFFSET(x) offsetof(UnsharpContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define MIN_SIZE 3
+#define MAX_SIZE 63
+static const AVOption unsharp_options[] = {
+ { "luma_msize_x", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "lx", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "luma_msize_y", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "ly", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "luma_amount", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
+ { "la", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
+ { "chroma_msize_x", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "cx", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "chroma_msize_y", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "cy", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "chroma_amount", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
+ { "ca", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
+ { "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(unsharp);
+
static const AVFilterPad avfilter_vf_unsharp_inputs[] = {
{
.name = "default",
@@ -287,26 +298,15 @@ static const AVFilterPad avfilter_vf_unsharp_outputs[] = {
{ NULL }
};
-static const char *const shorthand[] = {
- "luma_msize_x", "luma_msize_y", "luma_amount",
- "chroma_msize_x", "chroma_msize_y", "chroma_amount",
- NULL
-};
-
-AVFilter avfilter_vf_unsharp = {
- .name = "unsharp",
- .description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
-
- .priv_size = sizeof(UnsharpContext),
-
- .init = init,
- .uninit = uninit,
+AVFilter ff_vf_unsharp = {
+ .name = "unsharp",
+ .description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
+ .priv_size = sizeof(UnsharpContext),
+ .priv_class = &unsharp_class,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_unsharp_inputs,
-
- .outputs = avfilter_vf_unsharp_outputs,
-
- .priv_class = &unsharp_class,
- .shorthand = shorthand,
+ .inputs = avfilter_vf_unsharp_inputs,
+ .outputs = avfilter_vf_unsharp_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/ffmpeg/libavfilter/vf_vflip.c b/ffmpeg/libavfilter/vf_vflip.c
index 28fa800..f6908e4 100644
--- a/ffmpeg/libavfilter/vf_vflip.c
+++ b/ffmpeg/libavfilter/vf_vflip.c
@@ -55,9 +55,10 @@ static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
+ int height = FF_CEIL_RSHIFT(h, vsub);
if (frame->data[i]) {
- frame->data[i] += (((h + (1<<vsub) - 1) >> vsub) - 1) * frame->linesize[i];
+ frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
@@ -72,9 +73,10 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
+ int height = FF_CEIL_RSHIFT(link->h, vsub);
if (frame->data[i]) {
- frame->data[i] += (((link->h + (1<<vsub)-1)>> vsub)-1) * frame->linesize[i];
+ frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
@@ -100,12 +102,10 @@ static const AVFilterPad avfilter_vf_vflip_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vf_vflip = {
- .name = "vflip",
+AVFilter ff_vf_vflip = {
+ .name = "vflip",
.description = NULL_IF_CONFIG_SMALL("Flip the input video vertically."),
-
- .priv_size = sizeof(FlipContext),
-
- .inputs = avfilter_vf_vflip_inputs,
- .outputs = avfilter_vf_vflip_outputs,
+ .priv_size = sizeof(FlipContext),
+ .inputs = avfilter_vf_vflip_inputs,
+ .outputs = avfilter_vf_vflip_outputs,
};
diff --git a/ffmpeg/libavfilter/vf_yadif.c b/ffmpeg/libavfilter/vf_yadif.c
index 80076f7..40383a4 100644
--- a/ffmpeg/libavfilter/vf_yadif.c
+++ b/ffmpeg/libavfilter/vf_yadif.c
@@ -22,22 +22,60 @@
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "yadif.h"
-#undef NDEBUG
-#include <assert.h>
+
+typedef struct ThreadData {
+ AVFrame *frame;
+ int plane;
+ int w, h;
+ int parity;
+ int tff;
+} ThreadData;
+
+typedef struct YADIFContext {
+ const AVClass *class;
+
+ enum YADIFMode mode;
+ enum YADIFParity parity;
+ enum YADIFDeint deint;
+
+ int frame_pending;
+
+ AVFrame *cur;
+ AVFrame *next;
+ AVFrame *prev;
+ AVFrame *out;
+
+ /**
+ * Required alignment for filter_line
+ */
+ void (*filter_line)(void *dst,
+ void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int parity, int mode);
+ void (*filter_edges)(void *dst, void *prev, void *cur, void *next,
+ int w, int prefs, int mrefs, int parity, int mode);
+
+ const AVPixFmtDescriptor *csp;
+ int eof;
+ uint8_t *temp_line;
+ int temp_line_size;
+} YADIFContext;
#define CHECK(j)\
- { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
- + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
- + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
+ { int score = FFABS(cur[mrefs - 1 + j] - cur[prefs - 1 - j])\
+ + FFABS(cur[mrefs + j] - cur[prefs - j])\
+ + FFABS(cur[mrefs + 1 + j] - cur[prefs + 1 - j]);\
if (score < spatial_score) {\
spatial_score= score;\
- spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
+ spatial_pred= (cur[mrefs + j] + cur[prefs - j])>>1;\
/* The is_not_edge argument here controls when the code will enter a branch
* which reads up to and including x-3 and x+3. */
@@ -60,7 +98,7 @@
CHECK( 1) CHECK( 2) }} }} \
}\
\
- if (mode < 2) { \
+ if (!(mode&2)) { \
int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
@@ -103,6 +141,7 @@ static void filter_line_c(void *dst1,
FILTER(0, w, 1)
}
+#define MAX_ALIGN 8
static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity, int mode)
{
@@ -118,13 +157,14 @@ static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
* for is_not_edge should let the compiler ignore the whole branch. */
FILTER(0, 3, 0)
- dst = (uint8_t*)dst1 + w - 3;
- prev = (uint8_t*)prev1 + w - 3;
- cur = (uint8_t*)cur1 + w - 3;
- next = (uint8_t*)next1 + w - 3;
+ dst = (uint8_t*)dst1 + w - (MAX_ALIGN-1);
+ prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1);
+ cur = (uint8_t*)cur1 + w - (MAX_ALIGN-1);
+ next = (uint8_t*)next1 + w - (MAX_ALIGN-1);
prev2 = (uint8_t*)(parity ? prev : cur);
next2 = (uint8_t*)(parity ? cur : next);
+ FILTER(w - (MAX_ALIGN-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
@@ -162,60 +202,77 @@ static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
FILTER(0, 3, 0)
- dst = (uint16_t*)dst1 + w - 3;
- prev = (uint16_t*)prev1 + w - 3;
- cur = (uint16_t*)cur1 + w - 3;
- next = (uint16_t*)next1 + w - 3;
+ dst = (uint16_t*)dst1 + w - (MAX_ALIGN/2-1);
+ prev = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1);
+ cur = (uint16_t*)cur1 + w - (MAX_ALIGN/2-1);
+ next = (uint16_t*)next1 + w - (MAX_ALIGN/2-1);
prev2 = (uint16_t*)(parity ? prev : cur);
next2 = (uint16_t*)(parity ? cur : next);
+ FILTER(w - (MAX_ALIGN/2-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ YADIFContext *s = ctx->priv;
+ ThreadData *td = arg;
+ int refs = s->cur->linesize[td->plane];
+ int df = (s->csp->comp[td->plane].depth_minus1 + 8) / 8;
+ int pix_3 = 3 * df;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
+ int y;
+
+ /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
+ * we need to call the c variant which avoids this for border pixels
+ */
+ for (y = slice_start; y < slice_end; y++) {
+ if ((y ^ td->parity) & 1) {
+ uint8_t *prev = &s->prev->data[td->plane][y * refs];
+ uint8_t *cur = &s->cur ->data[td->plane][y * refs];
+ uint8_t *next = &s->next->data[td->plane][y * refs];
+ uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
+ int mode = y == 1 || y + 2 == td->h ? 2 : s->mode;
+ s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
+ next + pix_3, td->w - (3 + MAX_ALIGN/df-1),
+ y + 1 < td->h ? refs : -refs,
+ y ? -refs : refs,
+ td->parity ^ td->tff, mode);
+ s->filter_edges(dst, prev, cur, next, td->w,
+ y + 1 < td->h ? refs : -refs,
+ y ? -refs : refs,
+ td->parity ^ td->tff, mode);
+ } else {
+ memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
+ &s->cur->data[td->plane][y * refs], td->w * df);
+ }
+ }
+ return 0;
+}
+
static void filter(AVFilterContext *ctx, AVFrame *dstpic,
int parity, int tff)
{
YADIFContext *yadif = ctx->priv;
- int y, i;
+ ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
+ int i;
for (i = 0; i < yadif->csp->nb_components; i++) {
int w = dstpic->width;
int h = dstpic->height;
- int refs = yadif->cur->linesize[i];
- int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;
- int pix_3 = 3 * df;
if (i == 1 || i == 2) {
- /* Why is this not part of the per-plane description thing? */
- w >>= yadif->csp->log2_chroma_w;
- h >>= yadif->csp->log2_chroma_h;
+ w = FF_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
+ h = FF_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
}
- /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
- * we need to call the c variant which avoids this for border pixels
- */
-
- for (y = 0; y < h; y++) {
- if ((y ^ parity) & 1) {
- uint8_t *prev = &yadif->prev->data[i][y * refs];
- uint8_t *cur = &yadif->cur ->data[i][y * refs];
- uint8_t *next = &yadif->next->data[i][y * refs];
- uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]];
- int mode = y == 1 || y + 2 == h ? 2 : yadif->mode;
- yadif->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
- next + pix_3, w - 6,
- y + 1 < h ? refs : -refs,
- y ? -refs : refs,
- parity ^ tff, mode);
- yadif->filter_edges(dst, prev, cur, next, w,
- y + 1 < h ? refs : -refs,
- y ? -refs : refs,
- parity ^ tff, mode);
- } else {
- memcpy(&dstpic->data[i][y * dstpic->linesize[i]],
- &yadif->cur->data[i][y * refs], w * df);
- }
- }
+
+ td.w = w;
+ td.h = h;
+ td.plane = i;
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
}
emms_c();
@@ -261,6 +318,29 @@ static int return_frame(AVFilterContext *ctx, int is_second)
return ret;
}
+static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
+{
+ int i;
+ for (i = 0; i < yadif->csp->nb_components; i++)
+ if (a->linesize[i] != b->linesize[i])
+ return 1;
+ return 0;
+}
+
+static void fixstride(AVFilterLink *link, AVFrame *f)
+{
+ AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
+ if(!dst)
+ return;
+ av_frame_copy_props(dst, f);
+ av_image_copy(dst->data, dst->linesize,
+ (const uint8_t **)f->data, f->linesize,
+ dst->format, dst->width, dst->height);
+ av_frame_unref(f);
+ av_frame_move_ref(f, dst);
+ av_frame_free(&dst);
+}
+
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
@@ -280,7 +360,20 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
if (!yadif->cur)
return 0;
- if (yadif->deint && !yadif->cur->interlaced_frame) {
+ if (checkstride(yadif, yadif->next, yadif->cur)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
+ fixstride(link, yadif->next);
+ }
+ if (checkstride(yadif, yadif->next, yadif->cur))
+ fixstride(link, yadif->cur);
+ if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
+ fixstride(link, yadif->prev);
+ if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
+ return -1;
+ }
+
+ if ((yadif->deint && !yadif->cur->interlaced_frame) || ctx->is_disabled) {
yadif->out = av_frame_clone(yadif->cur);
if (!yadif->out)
return AVERROR(ENOMEM);
@@ -344,32 +437,6 @@ static int request_frame(AVFilterLink *link)
return 0;
}
-#define OFFSET(x) offsetof(YADIFContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-
-#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
-
-static const AVOption yadif_options[] = {
- { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
- CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
- CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
- CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
- CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
-
- { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
- CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
- CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
- CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
-
- { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
- CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
- CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
-
- {NULL},
-};
-
-AVFILTER_DEFINE_CLASS(yadif);
-
static av_cold void uninit(AVFilterContext *ctx)
{
YADIFContext *yadif = ctx->priv;
@@ -391,27 +458,29 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
- AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
+ AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUV420P9LE ),
- AV_NE( AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUV422P9LE ),
- AV_NE( AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUV444P9LE ),
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P12BE, AV_PIX_FMT_YUV420P12LE ),
- AV_NE( AV_PIX_FMT_YUV422P12BE, AV_PIX_FMT_YUV422P12LE ),
- AV_NE( AV_PIX_FMT_YUV444P12BE, AV_PIX_FMT_YUV444P12LE ),
- AV_NE( AV_PIX_FMT_YUV420P14BE, AV_PIX_FMT_YUV420P14LE ),
- AV_NE( AV_PIX_FMT_YUV422P14BE, AV_PIX_FMT_YUV422P14LE ),
- AV_NE( AV_PIX_FMT_YUV444P14BE, AV_PIX_FMT_YUV444P14LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV422P12,
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV422P14,
+ AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUVA422P,
AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRAP,
AV_PIX_FMT_NONE
};
@@ -420,20 +489,13 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
-{
- YADIFContext *yadif = ctx->priv;
-
- av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d deint:%d\n",
- yadif->mode, yadif->parity, yadif->deint);
-
- return 0;
-}
-
static int config_props(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
YADIFContext *s = link->src->priv;
+ int cpu_flags = av_get_cpu_flags();
+ int bit_depth = (!s->csp) ? 8
+ : s->csp->comp[0].depth_minus1 + 1;
link->time_base.num = link->src->inputs[0]->time_base.num;
link->time_base.den = link->src->inputs[0]->time_base.den * 2;
@@ -457,45 +519,88 @@ static int config_props(AVFilterLink *link)
s->filter_edges = filter_edges;
}
- if (ARCH_X86)
- ff_yadif_init_x86(s);
-
+#if HAVE_YASM
+ if (bit_depth >= 15) {
+ if (EXTERNAL_SSE4(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_16bit_sse4;
+ else if (EXTERNAL_SSSE3(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_16bit_ssse3;
+ else if (EXTERNAL_SSE2(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_16bit_sse2;
+#if ARCH_X86_32
+ else if (EXTERNAL_MMXEXT(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_16bit_mmxext;
+#endif /* ARCH_X86_32 */
+ } else if ( bit_depth >= 9 && bit_depth <= 14) {
+ if (EXTERNAL_SSSE3(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_10bit_ssse3;
+ else if (EXTERNAL_SSE2(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_10bit_sse2;
+#if ARCH_X86_32
+ else if (EXTERNAL_MMXEXT(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_10bit_mmxext;
+#endif /* ARCH_X86_32 */
+ } else {
+ if (EXTERNAL_SSSE3(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_ssse3;
+ else if (EXTERNAL_SSE2(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_sse2;
+#if ARCH_X86_32
+ else if (EXTERNAL_MMXEXT(cpu_flags))
+ s->filter_line = ff_yadif_filter_line_mmxext;
+#endif /* ARCH_X86_32 */
+ }
+#endif /* HAVE_YASM */
return 0;
}
-static const AVFilterPad avfilter_vf_yadif_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-static const AVFilterPad avfilter_vf_yadif_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
- .config_props = config_props,
- },
+#define OFFSET(x) offsetof(YADIFContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption yadif_options[] = {
+ { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
+ CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
+ CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
+ CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
+ CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
+
+ { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
+ CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
+
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
+ CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
+ CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
+
{ NULL }
};
-static const char *const shorthand[] = { "mode", "parity", "deint", NULL };
+AVFILTER_DEFINE_CLASS(yadif);
-AVFilter avfilter_vf_yadif = {
+AVFilter ff_vf_yadif = {
.name = "yadif",
.description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
-
.priv_size = sizeof(YADIFContext),
- .init = init,
+ .priv_class = &yadif_class,
.uninit = uninit,
.query_formats = query_formats,
- .inputs = avfilter_vf_yadif_inputs,
- .outputs = avfilter_vf_yadif_outputs,
+ .inputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { .name = NULL}},
+
+ .outputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { .name = NULL}},
- .priv_class = &yadif_class,
- .shorthand = shorthand,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/ffmpeg/libavfilter/video.c b/ffmpeg/libavfilter/video.c
index b274070..74c9161 100644
--- a/ffmpeg/libavfilter/video.c
+++ b/ffmpeg/libavfilter/video.c
@@ -45,32 +45,6 @@ AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
AVFrame *frame = av_frame_alloc();
int ret;
-#if 0 //POOL
- AVFilterPool *pool = link->pool;
- if (pool) {
- for (i = 0; i < POOL_SIZE; i++) {
- picref = pool->pic[i];
- if (picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h) {
- AVFilterBuffer *pic = picref->buf;
- pool->pic[i] = NULL;
- pool->count--;
- av_assert0(!picref->video->qp_table);
- picref->video->w = w;
- picref->video->h = h;
- picref->perms = full_perms;
- picref->format = link->format;
- pic->refcount = 1;
- memcpy(picref->data, pic->data, sizeof(picref->data));
- memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
- pool->refcount++;
- return picref;
- }
- }
- } else {
- pool = link->pool = av_mallocz(sizeof(AVFilterPool));
- pool->refcount = 1;
- }
-#endif
if (!frame)
return NULL;
@@ -82,14 +56,6 @@ AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
if (ret < 0)
av_frame_free(&frame);
-#if 0 //POOL
- memset(data[0], 128, i);
-
- picref->buf->priv = pool;
- picref->buf->free = NULL;
- pool->refcount++;
-#endif
-
return frame;
}
diff --git a/ffmpeg/libavfilter/vsink_nullsink.c b/ffmpeg/libavfilter/vsink_nullsink.c
index d498aab..281721b 100644
--- a/ffmpeg/libavfilter/vsink_nullsink.c
+++ b/ffmpeg/libavfilter/vsink_nullsink.c
@@ -35,7 +35,7 @@ static const AVFilterPad avfilter_vsink_nullsink_inputs[] = {
{ NULL },
};
-AVFilter avfilter_vsink_nullsink = {
+AVFilter ff_vsink_nullsink = {
.name = "nullsink",
.description = NULL_IF_CONFIG_SMALL("Do absolutely nothing with the input video."),
diff --git a/ffmpeg/libavfilter/vsrc_cellauto.c b/ffmpeg/libavfilter/vsrc_cellauto.c
index c783efd..95eabc1 100644
--- a/ffmpeg/libavfilter/vsrc_cellauto.c
+++ b/ffmpeg/libavfilter/vsrc_cellauto.c
@@ -47,8 +47,7 @@ typedef struct {
int buf_prev_row_idx, buf_row_idx;
uint8_t rule;
uint64_t pts;
- AVRational time_base;
- char *rate; ///< video frame rate
+ AVRational frame_rate;
double random_fill_ratio;
uint32_t random_seed;
int stitch, scroll, start_full;
@@ -65,8 +64,8 @@ static const AVOption cellauto_options[] = {
{ "f", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "pattern", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "p", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
- { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "rule", "set rule", OFFSET(rule), AV_OPT_TYPE_INT, {.i64 = 110}, 0, 255, FLAGS },
@@ -78,7 +77,7 @@ static const AVOption cellauto_options[] = {
{ "start_full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
{ "full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
{ "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
- { NULL },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(cellauto);
@@ -160,29 +159,14 @@ static int init_pattern_from_file(AVFilterContext *ctx)
return init_pattern_from_string(ctx);
}
-static int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
CellAutoContext *cellauto = ctx->priv;
- AVRational frame_rate;
int ret;
- cellauto->class = &cellauto_class;
- av_opt_set_defaults(cellauto);
-
- if ((ret = av_set_options_string(cellauto, args, "=", ":")) < 0)
- return ret;
-
- if ((ret = av_parse_video_rate(&frame_rate, cellauto->rate)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", cellauto->rate);
- return AVERROR(EINVAL);
- }
-
if (!cellauto->w && !cellauto->filename && !cellauto->pattern)
av_opt_set(cellauto, "size", "320x518", 0);
- cellauto->time_base.num = frame_rate.den;
- cellauto->time_base.den = frame_rate.num;
-
if (cellauto->filename && cellauto->pattern) {
av_log(ctx, AV_LOG_ERROR, "Only one of the filename or pattern options can be used\n");
return AVERROR(EINVAL);
@@ -215,7 +199,7 @@ static int init(AVFilterContext *ctx, const char *args)
av_log(ctx, AV_LOG_VERBOSE,
"s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%u\n",
- cellauto->w, cellauto->h, frame_rate.num, frame_rate.den,
+ cellauto->w, cellauto->h, cellauto->frame_rate.num, cellauto->frame_rate.den,
cellauto->rule, cellauto->stitch, cellauto->scroll, cellauto->start_full,
cellauto->random_seed);
return 0;
@@ -236,7 +220,7 @@ static int config_props(AVFilterLink *outlink)
outlink->w = cellauto->w;
outlink->h = cellauto->h;
- outlink->time_base = cellauto->time_base;
+ outlink->time_base = av_inv_q(cellauto->frame_rate);
return 0;
}
@@ -304,6 +288,8 @@ static int request_frame(AVFilterLink *outlink)
{
CellAutoContext *cellauto = outlink->src->priv;
AVFrame *picref = ff_get_video_buffer(outlink, cellauto->w, cellauto->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
picref->sample_aspect_ratio = (AVRational) {1, 1};
if (cellauto->generation == 0 && cellauto->start_full) {
int i;
@@ -330,22 +316,22 @@ static int query_formats(AVFilterContext *ctx)
static const AVFilterPad cellauto_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
- .config_props = config_props,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
},
{ NULL }
};
-AVFilter avfilter_vsrc_cellauto = {
- .name = "cellauto",
- .description = NULL_IF_CONFIG_SMALL("Create pattern generated by an elementary cellular automaton."),
- .priv_size = sizeof(CellAutoContext),
- .init = init,
- .uninit = uninit,
+AVFilter ff_vsrc_cellauto = {
+ .name = "cellauto",
+ .description = NULL_IF_CONFIG_SMALL("Create pattern generated by an elementary cellular automaton."),
+ .priv_size = sizeof(CellAutoContext),
+ .priv_class = &cellauto_class,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = cellauto_outputs,
- .priv_class = &cellauto_class,
};
diff --git a/ffmpeg/libavfilter/vsrc_life.c b/ffmpeg/libavfilter/vsrc_life.c
index 584d466..029e1bb 100644
--- a/ffmpeg/libavfilter/vsrc_life.c
+++ b/ffmpeg/libavfilter/vsrc_life.c
@@ -60,15 +60,11 @@ typedef struct {
uint16_t stay_rule; ///< encode the behavior for filled cells
uint16_t born_rule; ///< encode the behavior for empty cells
uint64_t pts;
- AVRational time_base;
- char *rate; ///< video frame rate
+ AVRational frame_rate;
double random_fill_ratio;
uint32_t random_seed;
int stitch;
int mold;
- char *life_color_str;
- char *death_color_str;
- char *mold_color_str;
uint8_t life_color[4];
uint8_t death_color[4];
uint8_t mold_color[4];
@@ -85,8 +81,8 @@ static const AVOption life_options[] = {
{ "f", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
- { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
- { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
{ "rule", "set rule", OFFSET(rule_str), AV_OPT_TYPE_STRING, {.str = "B3/S23"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
{ "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
@@ -94,10 +90,10 @@ static const AVOption life_options[] = {
{ "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
{ "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
{ "mold", "set mold speed for dead cells", OFFSET(mold), AV_OPT_TYPE_INT, {.i64=0}, 0, 0xFF, FLAGS },
- { "life_color", "set life color", OFFSET( life_color_str), AV_OPT_TYPE_STRING, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "death_color", "set death color", OFFSET(death_color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "mold_color", "set mold color", OFFSET( mold_color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
- { NULL },
+ { "life_color", "set life color", OFFSET( life_color), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "death_color", "set death color", OFFSET(death_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "mold_color", "set mold color", OFFSET( mold_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(life);
@@ -198,8 +194,8 @@ static int init_pattern_from_file(AVFilterContext *ctx)
life->h = h;
}
- if (!(life->buf[0] = av_mallocz(sizeof(char) * life->h * life->w)) ||
- !(life->buf[1] = av_mallocz(sizeof(char) * life->h * life->w))) {
+ if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
+ !(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
av_free(life->buf[0]);
av_free(life->buf[1]);
return AVERROR(ENOMEM);
@@ -221,56 +217,27 @@ static int init_pattern_from_file(AVFilterContext *ctx)
return 0;
}
-static int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
LifeContext *life = ctx->priv;
- AVRational frame_rate;
int ret;
- life->class = &life_class;
- av_opt_set_defaults(life);
-
- if ((ret = av_set_options_string(life, args, "=", ":")) < 0)
- return ret;
-
- if ((ret = av_parse_video_rate(&frame_rate, life->rate)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", life->rate);
- return AVERROR(EINVAL);
- }
- av_freep(&life->rate);
-
if (!life->w && !life->filename)
av_opt_set(life, "size", "320x240", 0);
if ((ret = parse_rule(&life->born_rule, &life->stay_rule, life->rule_str, ctx)) < 0)
return ret;
-#define PARSE_COLOR(name) do { \
- if ((ret = av_parse_color(life->name ## _color, life->name ## _color_str, -1, ctx))) { \
- av_log(ctx, AV_LOG_ERROR, "Invalid " #name " color '%s'\n", \
- life->name ## _color_str); \
- return ret; \
- } \
- av_freep(&life->name ## _color_str); \
-} while (0)
-
- PARSE_COLOR(life);
- PARSE_COLOR(death);
- PARSE_COLOR(mold);
-
if (!life->mold && memcmp(life->mold_color, "\x00\x00\x00", 3))
av_log(ctx, AV_LOG_WARNING,
"Mold color is set while mold isn't, ignoring the color.\n");
- life->time_base.num = frame_rate.den;
- life->time_base.den = frame_rate.num;
-
if (!life->filename) {
/* fill the grid randomly */
int i;
- if (!(life->buf[0] = av_mallocz(sizeof(char) * life->h * life->w)) ||
- !(life->buf[1] = av_mallocz(sizeof(char) * life->h * life->w))) {
+ if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
+ !(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
av_free(life->buf[0]);
av_free(life->buf[1]);
return AVERROR(ENOMEM);
@@ -293,7 +260,7 @@ static int init(AVFilterContext *ctx, const char *args)
av_log(ctx, AV_LOG_VERBOSE,
"s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%u\n",
- life->w, life->h, frame_rate.num, frame_rate.den,
+ life->w, life->h, life->frame_rate.num, life->frame_rate.den,
life->rule_str, life->stay_rule, life->born_rule, life->stitch,
life->random_seed);
return 0;
@@ -315,7 +282,7 @@ static int config_props(AVFilterLink *outlink)
outlink->w = life->w;
outlink->h = life->h;
- outlink->time_base = life->time_base;
+ outlink->time_base = av_inv_q(life->frame_rate);
return 0;
}
@@ -431,6 +398,8 @@ static int request_frame(AVFilterLink *outlink)
{
LifeContext *life = outlink->src->priv;
AVFrame *picref = ff_get_video_buffer(outlink, life->w, life->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
picref->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = life->pts++;
@@ -468,14 +437,14 @@ static const AVFilterPad life_outputs[] = {
{ NULL}
};
-AVFilter avfilter_vsrc_life = {
- .name = "life",
- .description = NULL_IF_CONFIG_SMALL("Create life."),
- .priv_size = sizeof(LifeContext),
- .init = init,
- .uninit = uninit,
+AVFilter ff_vsrc_life = {
+ .name = "life",
+ .description = NULL_IF_CONFIG_SMALL("Create life."),
+ .priv_size = sizeof(LifeContext),
+ .priv_class = &life_class,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = life_outputs,
- .priv_class = &life_class,
};
diff --git a/ffmpeg/libavfilter/vsrc_mandelbrot.c b/ffmpeg/libavfilter/vsrc_mandelbrot.c
index 945c707..19dddf9 100644
--- a/ffmpeg/libavfilter/vsrc_mandelbrot.c
+++ b/ffmpeg/libavfilter/vsrc_mandelbrot.c
@@ -41,6 +41,8 @@
enum Outer{
ITERATION_COUNT,
NORMALIZED_ITERATION_COUNT,
+ WHITE,
+ OUTZ,
};
enum Inner{
@@ -58,9 +60,8 @@ typedef struct Point {
typedef struct {
const AVClass *class;
int w, h;
- AVRational time_base;
+ AVRational frame_rate;
uint64_t pts;
- char *rate;
int maxiter;
double start_x;
double start_y;
@@ -76,6 +77,10 @@ typedef struct {
Point *next_cache;
double (*zyklus)[2];
uint32_t dither;
+
+ double morphxf;
+ double morphyf;
+ double morphamp;
} MBContext;
#define OFFSET(x) offsetof(MBContext, x)
@@ -84,8 +89,8 @@ typedef struct {
static const AVOption mandelbrot_options[] = {
{"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
{"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
- {"rate", "set frame rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
- {"r", "set frame rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"rate", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"r", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
{"maxiter", "set max iterations number", OFFSET(maxiter), AV_OPT_TYPE_INT, {.i64=7189}, 1, INT_MAX, FLAGS },
{"start_x", "set the initial x position", OFFSET(start_x), AV_OPT_TYPE_DOUBLE, {.dbl=-0.743643887037158704752191506114774}, -100, 100, FLAGS },
{"start_y", "set the initial y position", OFFSET(start_y), AV_OPT_TYPE_DOUBLE, {.dbl=-0.131825904205311970493132056385139}, -100, 100, FLAGS },
@@ -93,10 +98,15 @@ static const AVOption mandelbrot_options[] = {
{"end_scale", "set the terminal scale value", OFFSET(end_scale), AV_OPT_TYPE_DOUBLE, {.dbl=0.3}, 0, FLT_MAX, FLAGS },
{"end_pts", "set the terminal pts value", OFFSET(end_pts), AV_OPT_TYPE_DOUBLE, {.dbl=400}, 0, INT64_MAX, FLAGS },
{"bailout", "set the bailout value", OFFSET(bailout), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 0, FLT_MAX, FLAGS },
+ {"morphxf", "set morph x frequency", OFFSET(morphxf), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, -FLT_MAX, FLT_MAX, FLAGS },
+ {"morphyf", "set morph y frequency", OFFSET(morphyf), AV_OPT_TYPE_DOUBLE, {.dbl=0.0123}, -FLT_MAX, FLT_MAX, FLAGS },
+ {"morphamp", "set morph amplitude", OFFSET(morphamp), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -FLT_MAX, FLT_MAX, FLAGS },
{"outer", "set outer coloring mode", OFFSET(outer), AV_OPT_TYPE_INT, {.i64=NORMALIZED_ITERATION_COUNT}, 0, INT_MAX, FLAGS, "outer" },
{"iteration_count", "set iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
{"normalized_iteration_count", "set normalized iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=NORMALIZED_ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"white", "set white mode", 0, AV_OPT_TYPE_CONST, {.i64=WHITE}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"outz", "set outz mode", 0, AV_OPT_TYPE_CONST, {.i64=OUTZ}, INT_MIN, INT_MAX, FLAGS, "outer" },
{"inner", "set inner coloring mode", OFFSET(inner), AV_OPT_TYPE_INT, {.i64=MINCOL}, 0, INT_MAX, FLAGS, "inner" },
{"black", "set black mode", 0, AV_OPT_TYPE_CONST, {.i64=BLACK}, INT_MIN, INT_MAX, FLAGS, "inner"},
@@ -109,29 +119,15 @@ static const AVOption mandelbrot_options[] = {
AVFILTER_DEFINE_CLASS(mandelbrot);
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
MBContext *mb = ctx->priv;
- AVRational rate_q;
- int err;
-
- mb->class = &mandelbrot_class;
- av_opt_set_defaults(mb);
- if ((err = (av_set_options_string(mb, args, "=", ":"))) < 0)
- return err;
mb->bailout *= mb->bailout;
mb->start_scale /=mb->h;
mb->end_scale /=mb->h;
- if (av_parse_video_rate(&rate_q, mb->rate) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", mb->rate);
- return AVERROR(EINVAL);
- }
- mb->time_base.num = rate_q.den;
- mb->time_base.den = rate_q.num;
-
mb->cache_allocated = mb->w * mb->h * 3;
mb->cache_used = 0;
mb->point_cache= av_malloc(sizeof(*mb->point_cache)*mb->cache_allocated);
@@ -145,7 +141,6 @@ static av_cold void uninit(AVFilterContext *ctx)
{
MBContext *mb = ctx->priv;
- av_freep(&mb->rate);
av_freep(&mb->point_cache);
av_freep(&mb-> next_cache);
av_freep(&mb->zyklus);
@@ -172,13 +167,15 @@ static int config_props(AVFilterLink *inlink)
inlink->w = mb->w;
inlink->h = mb->h;
- inlink->time_base = mb->time_base;
+ inlink->time_base = av_inv_q(mb->frame_rate);
return 0;
}
static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale){
MBContext *mb = ctx->priv;
+ if(mb->morphamp)
+ return;
for(; *in_cidx < mb->cache_used; (*in_cidx)++){
Point *p= &mb->point_cache[*in_cidx];
int x;
@@ -276,13 +273,18 @@ static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize,
if(color[x + y*linesize] & 0xFF000000)
continue;
- if(interpol(mb, color, x, y, linesize)){
- if(next_cidx < mb->cache_allocated){
- mb->next_cache[next_cidx ].p[0]= cr;
- mb->next_cache[next_cidx ].p[1]= ci;
- mb->next_cache[next_cidx++].val = color[x + y*linesize];
+ if(!mb->morphamp){
+ if(interpol(mb, color, x, y, linesize)){
+ if(next_cidx < mb->cache_allocated){
+ mb->next_cache[next_cidx ].p[0]= cr;
+ mb->next_cache[next_cidx ].p[1]= ci;
+ mb->next_cache[next_cidx++].val = color[x + y*linesize];
+ }
+ continue;
}
- continue;
+ }else{
+ zr += cos(pts * mb->morphxf) * mb->morphamp;
+ zi += sin(pts * mb->morphyf) * mb->morphamp;
}
use_zyklus= (x==0 || mb->inner!=BLACK ||color[x-1 + y*linesize] == 0xFF000000);
@@ -328,10 +330,22 @@ static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize,
zi= mb->zyklus[i][1];
if(zr*zr + zi*zi > mb->bailout){
switch(mb->outer){
- case ITERATION_COUNT: zr = i; break;
- case NORMALIZED_ITERATION_COUNT: zr= i + log2(log(mb->bailout) / log(zr*zr + zi*zi)); break;
+ case ITERATION_COUNT:
+ zr = i;
+ c = lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
+ break;
+ case NORMALIZED_ITERATION_COUNT:
+ zr = i + log2(log(mb->bailout) / log(zr*zr + zi*zi));
+ c = lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
+ break;
+ case WHITE:
+ c = 0xFFFFFF;
+ break;
+ case OUTZ:
+ zr /= mb->bailout;
+ zi /= mb->bailout;
+ c = (((int)(zr*128+128))&0xFF)*256 + (((int)(zi*128+128))&0xFF);
}
- c= lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
break;
}
}
@@ -383,6 +397,9 @@ static int request_frame(AVFilterLink *link)
{
MBContext *mb = link->src->priv;
AVFrame *picref = ff_get_video_buffer(link, mb->w, mb->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+
picref->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = mb->pts++;
@@ -397,19 +414,17 @@ static const AVFilterPad mandelbrot_outputs[] = {
.request_frame = request_frame,
.config_props = config_props,
},
- { NULL },
+ { NULL }
};
-AVFilter avfilter_vsrc_mandelbrot = {
- .name = "mandelbrot",
- .description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."),
-
- .priv_size = sizeof(MBContext),
- .init = init,
- .uninit = uninit,
-
+AVFilter ff_vsrc_mandelbrot = {
+ .name = "mandelbrot",
+ .description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."),
+ .priv_size = sizeof(MBContext),
+ .priv_class = &mandelbrot_class,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
.inputs = NULL,
.outputs = mandelbrot_outputs,
- .priv_class = &mandelbrot_class,
};
diff --git a/ffmpeg/libavfilter/vsrc_mptestsrc.c b/ffmpeg/libavfilter/vsrc_mptestsrc.c
index 05a1af6..d045704 100644
--- a/ffmpeg/libavfilter/vsrc_mptestsrc.c
+++ b/ffmpeg/libavfilter/vsrc_mptestsrc.c
@@ -52,37 +52,34 @@ enum test_type {
typedef struct MPTestContext {
const AVClass *class;
- unsigned int frame_nb;
- AVRational time_base;
- int64_t pts, max_pts;
+ AVRational frame_rate;
+ int64_t pts, max_pts, duration;
int hsub, vsub;
- char *size, *rate, *duration;
enum test_type test;
} MPTestContext;
#define OFFSET(x) offsetof(MPTestContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption mptestsrc_options[]= {
- { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
- { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
- { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
{ "test", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
{ "t", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
- { "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" },
- { "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" },
-
- { NULL },
+ { "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { NULL }
};
AVFILTER_DEFINE_CLASS(mptestsrc);
@@ -256,39 +253,17 @@ static void ring2_test(uint8_t *dst, int dst_linesize, int off)
}
}
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
MPTestContext *test = ctx->priv;
- AVRational frame_rate_q;
- int64_t duration = -1;
- int ret;
-
- test->class = &mptestsrc_class;
- av_opt_set_defaults(test);
-
- if ((ret = (av_set_options_string(test, args, "=", ":"))) < 0)
- return ret;
-
- if ((ret = av_parse_video_rate(&frame_rate_q, test->rate)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->rate);
- return ret;
- }
- if ((test->duration) && (ret = av_parse_time(&duration, test->duration, 1)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration);
- return ret;
- }
-
- test->time_base.num = frame_rate_q.den;
- test->time_base.den = frame_rate_q.num;
- test->max_pts = duration >= 0 ?
- av_rescale_q(duration, AV_TIME_BASE_Q, test->time_base) : -1;
- test->frame_nb = 0;
+ test->max_pts = test->duration >= 0 ?
+ av_rescale_q(test->duration, AV_TIME_BASE_Q, av_inv_q(test->frame_rate)) : -1;
test->pts = 0;
av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n",
- frame_rate_q.num, frame_rate_q.den,
- duration < 0 ? -1 : test->max_pts * av_q2d(test->time_base));
+ test->frame_rate.num, test->frame_rate.den,
+ test->duration < 0 ? -1 : test->max_pts * av_q2d(av_inv_q(test->frame_rate)));
init_idct();
return 0;
@@ -305,7 +280,7 @@ static int config_props(AVFilterLink *outlink)
outlink->w = WIDTH;
outlink->h = HEIGHT;
- outlink->time_base = test->time_base;
+ outlink->time_base = av_inv_q(test->frame_rate);
return 0;
}
@@ -324,19 +299,26 @@ static int request_frame(AVFilterLink *outlink)
{
MPTestContext *test = outlink->src->priv;
AVFrame *picref;
- int w = WIDTH, h = HEIGHT, ch = h>>test->vsub;
- unsigned int frame = test->frame_nb;
+ int w = WIDTH, h = HEIGHT,
+ cw = FF_CEIL_RSHIFT(w, test->hsub), ch = FF_CEIL_RSHIFT(h, test->vsub);
+ unsigned int frame = outlink->frame_count;
enum test_type tt = test->test;
+ int i;
if (test->max_pts >= 0 && test->pts > test->max_pts)
return AVERROR_EOF;
picref = ff_get_video_buffer(outlink, w, h);
+ if (!picref)
+ return AVERROR(ENOMEM);
picref->pts = test->pts++;
// clean image
- memset(picref->data[0], 0, picref->linesize[0] * h);
- memset(picref->data[1], 128, picref->linesize[1] * ch);
- memset(picref->data[2], 128, picref->linesize[2] * ch);
+ for (i = 0; i < h; i++)
+ memset(picref->data[0] + i*picref->linesize[0], 0, w);
+ for (i = 0; i < ch; i++) {
+ memset(picref->data[1] + i*picref->linesize[1], 128, cw);
+ memset(picref->data[2] + i*picref->linesize[2], 128, cw);
+ }
if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
tt = (frame/30)%(TEST_NB-1);
@@ -354,7 +336,6 @@ static int request_frame(AVFilterLink *outlink)
case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break;
}
- test->frame_nb++;
return ff_filter_frame(outlink, picref);
}
@@ -368,15 +349,13 @@ static const AVFilterPad mptestsrc_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vsrc_mptestsrc = {
- .name = "mptestsrc",
- .description = NULL_IF_CONFIG_SMALL("Generate various test pattern."),
- .priv_size = sizeof(MPTestContext),
- .init = init,
-
- .query_formats = query_formats,
-
- .inputs = NULL,
- .outputs = mptestsrc_outputs,
- .priv_class = &mptestsrc_class,
+AVFilter ff_vsrc_mptestsrc = {
+ .name = "mptestsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate various test pattern."),
+ .priv_size = sizeof(MPTestContext),
+ .priv_class = &mptestsrc_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mptestsrc_outputs,
};
diff --git a/ffmpeg/libavfilter/vsrc_testsrc.c b/ffmpeg/libavfilter/vsrc_testsrc.c
index 22f163c..0ad1474 100644
--- a/ffmpeg/libavfilter/vsrc_testsrc.c
+++ b/ffmpeg/libavfilter/vsrc_testsrc.c
@@ -30,11 +30,12 @@
* rgbtestsrc is ported from MPlayer libmpcodecs/vf_rgbtest.c by
* Michael Niedermayer.
*
- * smptebars is by Paul B Mahol.
+ * smptebars and smptehdbars are by Paul B Mahol.
*/
#include <float.h>
+#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
@@ -52,87 +53,53 @@ typedef struct {
unsigned int nb_frame;
AVRational time_base, frame_rate;
int64_t pts;
- char *frame_rate_str; ///< video frame rate
- char *duration_str; ///< total duration of the generated video
int64_t duration; ///< duration expressed in microseconds
AVRational sar; ///< sample aspect ratio
- int nb_decimals;
int draw_once; ///< draw only the first frame, always put out the same picture
+ int draw_once_reset; ///< draw only the first frame or in case of reset
AVFrame *picref; ///< cached reference containing the painted picture
void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame);
+ /* only used by testsrc */
+ int nb_decimals;
+
/* only used by color */
- char *color_str;
FFDrawContext draw;
FFDrawColor color;
uint8_t color_rgba[4];
/* only used by rgbtest */
uint8_t rgba_map[4];
+
+ /* only used by haldclut */
+ int level;
} TestSourceContext;
#define OFFSET(x) offsetof(TestSourceContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-static const AVOption options[] = {
- { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },
- { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },
- { "rate", "set video rate", OFFSET(frame_rate_str), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
- { "r", "set video rate", OFFSET(frame_rate_str), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
- { "duration", "set video duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
- { "d", "set video duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+#define SIZE_OPTIONS \
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
+
+#define COMMON_OPTIONS_NOSIZE \
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },\
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },\
+ { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
+ { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
{ "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl= 1}, 0, INT_MAX, FLAGS },
- /* only used by color */
- { "color", "set color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
- { "c", "set color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+#define COMMON_OPTIONS SIZE_OPTIONS COMMON_OPTIONS_NOSIZE
- /* only used by testsrc */
- { "decimals", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
- { "n", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
- { NULL },
+static const AVOption options[] = {
+ COMMON_OPTIONS
+ { NULL }
};
-static av_cold int init(AVFilterContext *ctx, const char *args)
+static av_cold int init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
- int ret = 0;
-
- av_opt_set_defaults(test);
-
- if ((ret = (av_set_options_string(test, args, "=", ":"))) < 0)
- return ret;
-
- if ((ret = av_parse_video_rate(&test->frame_rate, test->frame_rate_str)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->frame_rate_str);
- return ret;
- }
-
- test->duration = -1;
- if (test->duration_str &&
- (ret = av_parse_time(&test->duration, test->duration_str, 1)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration_str);
- return ret;
- }
-
- if (test->nb_decimals && strcmp(ctx->filter->name, "testsrc")) {
- av_log(ctx, AV_LOG_WARNING,
- "Option 'decimals' is ignored with source '%s'\n",
- ctx->filter->name);
- }
-
- if (test->color_str) {
- if (!strcmp(ctx->filter->name, "color")) {
- ret = av_parse_color(test->color_rgba, test->color_str, -1, ctx);
- if (ret < 0)
- return ret;
- } else {
- av_log(ctx, AV_LOG_WARNING,
- "Option 'color' is ignored with source '%s'\n",
- ctx->filter->name);
- }
- }
test->time_base = av_inv_q(test->frame_rate);
test->nb_frame = 0;
@@ -149,7 +116,6 @@ static av_cold void uninit(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
- av_opt_free(test);
av_frame_free(&test->picref);
}
@@ -176,6 +142,10 @@ static int request_frame(AVFilterLink *outlink)
return AVERROR_EOF;
if (test->draw_once) {
+ if (test->draw_once_reset) {
+ av_frame_free(&test->picref);
+ test->draw_once_reset = 0;
+ }
if (!test->picref) {
test->picref =
ff_get_video_buffer(outlink, test->w, test->h);
@@ -205,7 +175,13 @@ static int request_frame(AVFilterLink *outlink)
#if CONFIG_COLOR_FILTER
-#define color_options options
+static const AVOption color_options[] = {
+ { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ COMMON_OPTIONS
+ { NULL }
+};
+
AVFILTER_DEFINE_CLASS(color);
static void color_fill_picture(AVFilterContext *ctx, AVFrame *picref)
@@ -216,14 +192,12 @@ static void color_fill_picture(AVFilterContext *ctx, AVFrame *picref)
0, 0, test->w, test->h);
}
-static av_cold int color_init(AVFilterContext *ctx, const char *args)
+static av_cold int color_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
- test->class = &color_class;
test->fill_picture_fn = color_fill_picture;
test->draw_once = 1;
- av_opt_set(test, "color", "black", 0);
- return init(ctx, args);
+ return init(ctx);
}
static int color_query_formats(AVFilterContext *ctx)
@@ -249,11 +223,31 @@ static int color_config_props(AVFilterLink *inlink)
if ((ret = config_props(inlink)) < 0)
return ret;
- av_log(ctx, AV_LOG_VERBOSE, "color:0x%02x%02x%02x%02x\n",
- test->color_rgba[0], test->color_rgba[1], test->color_rgba[2], test->color_rgba[3]);
return 0;
}
+static int color_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ TestSourceContext *test = ctx->priv;
+ int ret;
+
+ if (!strcmp(cmd, "color") || !strcmp(cmd, "c")) {
+ uint8_t color_rgba[4];
+
+ ret = av_parse_color(color_rgba, args, -1, ctx);
+ if (ret < 0)
+ return ret;
+
+ memcpy(test->color_rgba, color_rgba, sizeof(color_rgba));
+ ff_draw_color(&test->draw, &test->color, test->color_rgba);
+ test->draw_once_reset = 1;
+ return 0;
+ }
+
+ return AVERROR(ENOSYS);
+}
+
static const AVFilterPad color_outputs[] = {
{
.name = "default",
@@ -264,21 +258,149 @@ static const AVFilterPad color_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vsrc_color = {
- .name = "color",
- .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input."),
+AVFilter ff_vsrc_color = {
+ .name = "color",
+ .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input."),
+ .priv_class = &color_class,
+ .priv_size = sizeof(TestSourceContext),
+ .init = color_init,
+ .uninit = uninit,
+ .query_formats = color_query_formats,
+ .inputs = NULL,
+ .outputs = color_outputs,
+ .process_command = color_process_command,
+};
- .priv_size = sizeof(TestSourceContext),
- .init = color_init,
- .uninit = uninit,
+#endif /* CONFIG_COLOR_FILTER */
- .query_formats = color_query_formats,
- .inputs = NULL,
- .outputs = color_outputs,
- .priv_class = &color_class,
+#if CONFIG_HALDCLUTSRC_FILTER
+
+static const AVOption haldclutsrc_options[] = {
+ { "level", "set level", OFFSET(level), AV_OPT_TYPE_INT, {.i64 = 6}, 2, 8, FLAGS },
+ COMMON_OPTIONS_NOSIZE
+ { NULL }
};
-#endif /* CONFIG_COLOR_FILTER */
+AVFILTER_DEFINE_CLASS(haldclutsrc);
+
+static void haldclutsrc_fill_picture(AVFilterContext *ctx, AVFrame *frame)
+{
+ int i, j, k, x = 0, y = 0, is16bit = 0, step;
+ uint32_t alpha = 0;
+ const TestSourceContext *hc = ctx->priv;
+ int level = hc->level;
+ float scale;
+ const int w = frame->width;
+ const int h = frame->height;
+ const uint8_t *data = frame->data[0];
+ const int linesize = frame->linesize[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ uint8_t rgba_map[4];
+
+ av_assert0(w == h && w == level*level*level);
+
+ ff_fill_rgba_map(rgba_map, frame->format);
+
+ switch (frame->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ is16bit = 1;
+ alpha = 0xffff;
+ break;
+ case AV_PIX_FMT_RGBA:
+ case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_ARGB:
+ case AV_PIX_FMT_ABGR:
+ alpha = 0xff;
+ break;
+ }
+
+ step = av_get_padded_bits_per_pixel(desc) >> (3 + is16bit);
+ scale = ((float)(1 << (8*(is16bit+1))) - 1) / (level*level - 1);
+
+#define LOAD_CLUT(nbits) do { \
+ uint##nbits##_t *dst = ((uint##nbits##_t *)(data + y*linesize)) + x*step; \
+ dst[rgba_map[0]] = av_clip_uint##nbits(i * scale); \
+ dst[rgba_map[1]] = av_clip_uint##nbits(j * scale); \
+ dst[rgba_map[2]] = av_clip_uint##nbits(k * scale); \
+ if (step == 4) \
+ dst[rgba_map[3]] = alpha; \
+} while (0)
+
+ level *= level;
+ for (k = 0; k < level; k++) {
+ for (j = 0; j < level; j++) {
+ for (i = 0; i < level; i++) {
+ if (!is16bit)
+ LOAD_CLUT(8);
+ else
+ LOAD_CLUT(16);
+ if (++x == w) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }
+}
+
+static av_cold int haldclutsrc_init(AVFilterContext *ctx)
+{
+ TestSourceContext *hc = ctx->priv;
+ hc->fill_picture_fn = haldclutsrc_fill_picture;
+ hc->draw_once = 1;
+ return init(ctx);
+}
+
+static int haldclutsrc_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE,
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int haldclutsrc_config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TestSourceContext *hc = ctx->priv;
+
+ hc->w = hc->h = hc->level * hc->level * hc->level;
+ return config_props(outlink);
+}
+
+static const AVFilterPad haldclutsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = haldclutsrc_config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_haldclutsrc = {
+ .name = "haldclutsrc",
+ .description = NULL_IF_CONFIG_SMALL("Provide an identity Hald CLUT."),
+ .priv_class = &haldclutsrc_class,
+ .priv_size = sizeof(TestSourceContext),
+ .init = haldclutsrc_init,
+ .uninit = uninit,
+ .query_formats = haldclutsrc_query_formats,
+ .inputs = NULL,
+ .outputs = haldclutsrc_outputs,
+};
+#endif /* CONFIG_HALDCLUTSRC_FILTER */
#if CONFIG_NULLSRC_FILTER
@@ -287,13 +409,12 @@ AVFILTER_DEFINE_CLASS(nullsrc);
static void nullsrc_fill_picture(AVFilterContext *ctx, AVFrame *picref) { }
-static av_cold int nullsrc_init(AVFilterContext *ctx, const char *args)
+static av_cold int nullsrc_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
- test->class = &nullsrc_class;
test->fill_picture_fn = nullsrc_fill_picture;
- return init(ctx, args);
+ return init(ctx);
}
static const AVFilterPad nullsrc_outputs[] = {
@@ -306,22 +427,28 @@ static const AVFilterPad nullsrc_outputs[] = {
{ NULL },
};
-AVFilter avfilter_vsrc_nullsrc = {
+AVFilter ff_vsrc_nullsrc = {
.name = "nullsrc",
.description = NULL_IF_CONFIG_SMALL("Null video source, return unprocessed video frames."),
- .init = nullsrc_init,
- .uninit = uninit,
- .priv_size = sizeof(TestSourceContext),
- .inputs = NULL,
- .outputs = nullsrc_outputs,
- .priv_class = &nullsrc_class,
+ .init = nullsrc_init,
+ .uninit = uninit,
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &nullsrc_class,
+ .inputs = NULL,
+ .outputs = nullsrc_outputs,
};
#endif /* CONFIG_NULLSRC_FILTER */
#if CONFIG_TESTSRC_FILTER
-#define testsrc_options options
+static const AVOption testsrc_options[] = {
+ COMMON_OPTIONS
+ { "decimals", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
+ { "n", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
+ { NULL }
+};
+
AVFILTER_DEFINE_CLASS(testsrc);
/**
@@ -336,8 +463,8 @@ AVFILTER_DEFINE_CLASS(testsrc);
* @param w width of the rectangle to draw, expressed as a number of segment_width units
* @param h height of the rectangle to draw, expressed as a number of segment_width units
*/
-static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigned segment_width,
- unsigned x, unsigned y, unsigned w, unsigned h)
+static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, int segment_width,
+ int x, int y, int w, int h)
{
int i;
int step = 3;
@@ -351,8 +478,8 @@ static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigne
}
}
-static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize,
- unsigned segment_width)
+static void draw_digit(int digit, uint8_t *dst, int dst_linesize,
+ int segment_width)
{
#define TOP_HBAR 1
#define MID_HBAR 2
@@ -446,7 +573,7 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
}
/* draw sliding color line */
- p0 = p = data + frame->linesize[0] * height * 3/4;
+ p0 = p = data + frame->linesize[0] * (height * 3/4);
grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) %
GRADIENT_SIZE;
rgrad = 0;
@@ -483,11 +610,16 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
/* draw digits */
seg_size = width / 80;
if (seg_size >= 1 && height >= 13 * seg_size) {
+ int64_t p10decimals = 1;
double time = av_q2d(test->time_base) * test->nb_frame *
pow(10, test->nb_decimals);
- if (time > INT_MAX)
+ if (time >= INT_MAX)
return;
- second = (int)time;
+
+ for (x = 0; x < test->nb_decimals; x++)
+ p10decimals *= 10;
+
+ second = av_rescale_rnd(test->nb_frame * test->time_base.num, p10decimals, test->time_base.den, AV_ROUND_ZERO);
x = width - (width - seg_size * 64) / 2;
y = (height - seg_size * 13) / 2;
p = data + (x*3 + y * frame->linesize[0]);
@@ -501,13 +633,12 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
}
}
-static av_cold int test_init(AVFilterContext *ctx, const char *args)
+static av_cold int test_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
- test->class = &testsrc_class;
test->fill_picture_fn = test_fill_picture;
- return init(ctx, args);
+ return init(ctx);
}
static int test_query_formats(AVFilterContext *ctx)
@@ -529,18 +660,16 @@ static const AVFilterPad avfilter_vsrc_testsrc_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vsrc_testsrc = {
- .name = "testsrc",
- .description = NULL_IF_CONFIG_SMALL("Generate test pattern."),
- .priv_size = sizeof(TestSourceContext),
- .init = test_init,
- .uninit = uninit,
-
- .query_formats = test_query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_vsrc_testsrc_outputs,
- .priv_class = &testsrc_class,
+AVFilter ff_vsrc_testsrc = {
+ .name = "testsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate test pattern."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &testsrc_class,
+ .init = test_init,
+ .uninit = uninit,
+ .query_formats = test_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_testsrc_outputs,
};
#endif /* CONFIG_TESTSRC_FILTER */
@@ -606,14 +735,13 @@ static void rgbtest_fill_picture(AVFilterContext *ctx, AVFrame *frame)
}
}
-static av_cold int rgbtest_init(AVFilterContext *ctx, const char *args)
+static av_cold int rgbtest_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
test->draw_once = 1;
- test->class = &rgbtestsrc_class;
test->fill_picture_fn = rgbtest_fill_picture;
- return init(ctx, args);
+ return init(ctx);
}
static int rgbtest_query_formats(AVFilterContext *ctx)
@@ -648,146 +776,296 @@ static const AVFilterPad avfilter_vsrc_rgbtestsrc_outputs[] = {
{ NULL }
};
-AVFilter avfilter_vsrc_rgbtestsrc = {
- .name = "rgbtestsrc",
- .description = NULL_IF_CONFIG_SMALL("Generate RGB test pattern."),
- .priv_size = sizeof(TestSourceContext),
- .init = rgbtest_init,
- .uninit = uninit,
-
- .query_formats = rgbtest_query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_rgbtestsrc_outputs,
- .priv_class = &rgbtestsrc_class,
+AVFilter ff_vsrc_rgbtestsrc = {
+ .name = "rgbtestsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate RGB test pattern."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &rgbtestsrc_class,
+ .init = rgbtest_init,
+ .uninit = uninit,
+ .query_formats = rgbtest_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_rgbtestsrc_outputs,
};
#endif /* CONFIG_RGBTESTSRC_FILTER */
-#if CONFIG_SMPTEBARS_FILTER
-
-#define smptebars_options options
-AVFILTER_DEFINE_CLASS(smptebars);
+#if CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER
static const uint8_t rainbow[7][4] = {
- { 191, 191, 191, 255 }, /* gray */
- { 191, 191, 0, 255 }, /* yellow */
- { 0, 191, 191, 255 }, /* cyan */
- { 0, 191, 0, 255 }, /* green */
- { 191, 0, 191, 255 }, /* magenta */
- { 191, 0, 0, 255 }, /* red */
- { 0, 0, 191, 255 }, /* blue */
+ { 180, 128, 128, 255 }, /* gray */
+ { 168, 44, 136, 255 }, /* yellow */
+ { 145, 147, 44, 255 }, /* cyan */
+ { 133, 63, 52, 255 }, /* green */
+ { 63, 193, 204, 255 }, /* magenta */
+ { 51, 109, 212, 255 }, /* red */
+ { 28, 212, 120, 255 }, /* blue */
};
static const uint8_t wobnair[7][4] = {
- { 0, 0, 191, 255 }, /* blue */
- { 19, 19, 19, 255 }, /* 7.5% intensity black */
- { 191, 0, 191, 255 }, /* magenta */
- { 19, 19, 19, 255 }, /* 7.5% intensity black */
- { 0, 191, 191, 255 }, /* cyan */
- { 19, 19, 19, 255 }, /* 7.5% intensity black */
- { 191, 191, 191, 255 }, /* gray */
+ { 32, 240, 118, 255 }, /* blue */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 54, 184, 198, 255 }, /* magenta */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 188, 154, 16, 255 }, /* cyan */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 191, 128, 128, 255 }, /* gray */
};
-static const uint8_t white[4] = { 255, 255, 255, 255 };
-static const uint8_t black[4] = { 19, 19, 19, 255 }; /* 7.5% intensity black */
+static const uint8_t white[4] = { 235, 128, 128, 255 };
+static const uint8_t black[4] = { 19, 128, 128, 255 }; /* 7.5% intensity black */
/* pluge pulses */
-static const uint8_t neg4ire[4] = { 9, 9, 9, 255 }; /* 3.5% intensity black */
-static const uint8_t pos4ire[4] = { 29, 29, 29, 255 }; /* 11.5% intensity black */
+static const uint8_t neg4ire[4] = { 9, 128, 128, 255 }; /* 3.5% intensity black */
+static const uint8_t pos4ire[4] = { 29, 128, 128, 255 }; /* 11.5% intensity black */
/* fudged Q/-I */
-static const uint8_t i_pixel[4] = { 0, 68, 130, 255 };
-static const uint8_t q_pixel[4] = { 67, 0, 130, 255 };
+static const uint8_t i_pixel[4] = { 61, 153, 99, 255 };
+static const uint8_t q_pixel[4] = { 35, 174, 152, 255 };
+
+static const uint8_t gray40[4] = { 104, 128, 128, 255 };
+static const uint8_t gray15[4] = { 49, 128, 128, 255 };
+static const uint8_t cyan[4] = { 188, 154, 16, 255 };
+static const uint8_t yellow[4] = { 219, 16, 138, 255 };
+static const uint8_t blue[4] = { 32, 240, 118, 255 };
+static const uint8_t red[4] = { 63, 102, 240, 255 };
+static const uint8_t black0[4] = { 16, 128, 128, 255 };
+static const uint8_t black2[4] = { 20, 128, 128, 255 };
+static const uint8_t black4[4] = { 25, 128, 128, 255 };
+static const uint8_t neg2[4] = { 12, 128, 128, 255 };
+
+static void draw_bar(TestSourceContext *test, const uint8_t color[4],
+ unsigned x, unsigned y, unsigned w, unsigned h,
+ AVFrame *frame)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ uint8_t *p, *p0;
+ int plane;
+
+ x = FFMIN(x, test->w - 1);
+ y = FFMIN(y, test->h - 1);
+ w = FFMIN(w, test->w - x);
+ h = FFMIN(h, test->h - y);
+
+ av_assert0(x + w <= test->w);
+ av_assert0(y + h <= test->h);
+
+ for (plane = 0; frame->data[plane]; plane++) {
+ const int c = color[plane];
+ const int linesize = frame->linesize[plane];
+ int i, px, py, pw, ph;
+
+ if (plane == 1 || plane == 2) {
+ px = x >> desc->log2_chroma_w;
+ pw = w >> desc->log2_chroma_w;
+ py = y >> desc->log2_chroma_h;
+ ph = h >> desc->log2_chroma_h;
+ } else {
+ px = x;
+ pw = w;
+ py = y;
+ ph = h;
+ }
+
+ p0 = p = frame->data[plane] + py * linesize + px;
+ memset(p, c, pw);
+ p += linesize;
+ for (i = 1; i < ph; i++, p += linesize)
+ memcpy(p, p0, pw);
+ }
+}
+
+static int smptebars_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE,
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static const AVFilterPad smptebars_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+#if CONFIG_SMPTEBARS_FILTER
+
+#define smptebars_options options
+AVFILTER_DEFINE_CLASS(smptebars);
static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
{
TestSourceContext *test = ctx->priv;
- FFDrawColor color;
- int r_w, r_h, w_h, p_w, p_h, i, x = 0;
+ int r_w, r_h, w_h, p_w, p_h, i, tmp, x = 0;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
- r_w = (test->w + 6) / 7;
- r_h = test->h * 2 / 3;
- w_h = test->h * 3 / 4 - r_h;
- p_w = r_w * 5 / 4;
- p_h = test->h - w_h - r_h;
+ av_frame_set_colorspace(picref, AVCOL_SPC_BT470BG);
-#define DRAW_COLOR(rgba, x, y, w, h) \
- ff_draw_color(&test->draw, &color, rgba); \
- ff_fill_rectangle(&test->draw, &color, \
- picref->data, picref->linesize, x, y, w, h) \
+ r_w = FFALIGN((test->w + 6) / 7, 1 << pixdesc->log2_chroma_w);
+ r_h = FFALIGN(test->h * 2 / 3, 1 << pixdesc->log2_chroma_h);
+ w_h = FFALIGN(test->h * 3 / 4 - r_h, 1 << pixdesc->log2_chroma_h);
+ p_w = FFALIGN(r_w * 5 / 4, 1 << pixdesc->log2_chroma_w);
+ p_h = test->h - w_h - r_h;
for (i = 0; i < 7; i++) {
- DRAW_COLOR(rainbow[i], x, 0, FFMIN(r_w, test->w - x), r_h);
- DRAW_COLOR(wobnair[i], x, r_h, FFMIN(r_w, test->w - x), w_h);
+ draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
+ draw_bar(test, wobnair[i], x, r_h, r_w, w_h, picref);
x += r_w;
}
x = 0;
- DRAW_COLOR(i_pixel, x, r_h + w_h, p_w, p_h);
+ draw_bar(test, i_pixel, x, r_h + w_h, p_w, p_h, picref);
x += p_w;
- DRAW_COLOR(white, x, r_h + w_h, p_w, p_h);
+ draw_bar(test, white, x, r_h + w_h, p_w, p_h, picref);
x += p_w;
- DRAW_COLOR(q_pixel, x, r_h + w_h, p_w, p_h);
+ draw_bar(test, q_pixel, x, r_h + w_h, p_w, p_h, picref);
x += p_w;
- DRAW_COLOR(black, x, r_h + w_h, 5 * r_w - x, p_h);
- x += 5 * r_w - x;
- DRAW_COLOR(neg4ire, x, r_h + w_h, r_w / 3, p_h);
- x += r_w / 3;
- DRAW_COLOR(black, x, r_h + w_h, r_w / 3, p_h);
- x += r_w / 3;
- DRAW_COLOR(pos4ire, x, r_h + w_h, r_w / 3, p_h);
- x += r_w / 3;
- DRAW_COLOR(black, x, r_h + w_h, test->w - x, p_h);
+ tmp = FFALIGN(5 * r_w - x, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, neg4ire, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, black, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, pos4ire, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, black, x, r_h + w_h, test->w - x, p_h, picref);
}
-static av_cold int smptebars_init(AVFilterContext *ctx, const char *args)
+static av_cold int smptebars_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
- test->class = &smptebars_class;
test->fill_picture_fn = smptebars_fill_picture;
test->draw_once = 1;
- return init(ctx, args);
+ return init(ctx);
}
-static int smptebars_query_formats(AVFilterContext *ctx)
-{
- ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
- return 0;
-}
+AVFilter ff_vsrc_smptebars = {
+ .name = "smptebars",
+ .description = NULL_IF_CONFIG_SMALL("Generate SMPTE color bars."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &smptebars_class,
+ .init = smptebars_init,
+ .uninit = uninit,
+ .query_formats = smptebars_query_formats,
+ .inputs = NULL,
+ .outputs = smptebars_outputs,
+};
+
+#endif /* CONFIG_SMPTEBARS_FILTER */
+
+#if CONFIG_SMPTEHDBARS_FILTER
-static int smptebars_config_props(AVFilterLink *outlink)
+#define smptehdbars_options options
+AVFILTER_DEFINE_CLASS(smptehdbars);
+
+static void smptehdbars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
{
- AVFilterContext *ctx = outlink->src;
TestSourceContext *test = ctx->priv;
+ int d_w, r_w, r_h, l_w, i, tmp, x = 0, y = 0;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
- ff_draw_init(&test->draw, outlink->format, 0);
+ av_frame_set_colorspace(picref, AVCOL_SPC_BT709);
- return config_props(outlink);
+ d_w = FFALIGN(test->w / 8, 1 << pixdesc->log2_chroma_w);
+ r_h = FFALIGN(test->h * 7 / 12, 1 << pixdesc->log2_chroma_h);
+ draw_bar(test, gray40, x, 0, d_w, r_h, picref);
+ x += d_w;
+
+ r_w = FFALIGN((((test->w + 3) / 4) * 3) / 7, 1 << pixdesc->log2_chroma_w);
+ for (i = 0; i < 7; i++) {
+ draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
+ x += r_w;
+ }
+ draw_bar(test, gray40, x, 0, test->w - x, r_h, picref);
+ y = r_h;
+ r_h = FFALIGN(test->h / 12, 1 << pixdesc->log2_chroma_h);
+ draw_bar(test, cyan, 0, y, d_w, r_h, picref);
+ x = d_w;
+ draw_bar(test, i_pixel, x, y, r_w, r_h, picref);
+ x += r_w;
+ tmp = r_w * 6;
+ draw_bar(test, rainbow[0], x, y, tmp, r_h, picref);
+ x += tmp;
+ l_w = x;
+ draw_bar(test, blue, x, y, test->w - x, r_h, picref);
+ y += r_h;
+ draw_bar(test, yellow, 0, y, d_w, r_h, picref);
+ x = d_w;
+ draw_bar(test, q_pixel, x, y, r_w, r_h, picref);
+ x += r_w;
+
+ for (i = 0; i < tmp; i += 1 << pixdesc->log2_chroma_w) {
+ uint8_t yramp[4] = {0};
+
+ yramp[0] = i * 255 / tmp;
+ yramp[1] = 128;
+ yramp[2] = 128;
+ yramp[3] = 255;
+
+ draw_bar(test, yramp, x, y, 1 << pixdesc->log2_chroma_w, r_h, picref);
+ x += 1 << pixdesc->log2_chroma_w;
+ }
+ draw_bar(test, red, x, y, test->w - x, r_h, picref);
+ y += r_h;
+ draw_bar(test, gray15, 0, y, d_w, test->h - y, picref);
+ x = d_w;
+ tmp = FFALIGN(r_w * 3 / 2, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w * 2, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, white, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w * 5 / 6, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, neg2, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black2, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black4, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ r_w = l_w - x;
+ draw_bar(test, black0, x, y, r_w, test->h - y, picref);
+ x += r_w;
+ draw_bar(test, gray15, x, y, test->w - x, test->h - y, picref);
}
-static const AVFilterPad smptebars_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
- .config_props = smptebars_config_props,
- },
- { NULL }
-};
+static av_cold int smptehdbars_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
-AVFilter avfilter_vsrc_smptebars = {
- .name = "smptebars",
- .description = NULL_IF_CONFIG_SMALL("Generate SMPTE color bars."),
- .priv_size = sizeof(TestSourceContext),
- .init = smptebars_init,
- .uninit = uninit,
+ test->fill_picture_fn = smptehdbars_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+AVFilter ff_vsrc_smptehdbars = {
+ .name = "smptehdbars",
+ .description = NULL_IF_CONFIG_SMALL("Generate SMPTE HD color bars."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &smptehdbars_class,
+ .init = smptehdbars_init,
+ .uninit = uninit,
.query_formats = smptebars_query_formats,
.inputs = NULL,
.outputs = smptebars_outputs,
- .priv_class = &smptebars_class,
};
-#endif /* CONFIG_SMPTEBARS_FILTER */
+#endif /* CONFIG_SMPTEHDBARS_FILTER */
+#endif /* CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER */
diff --git a/ffmpeg/libavfilter/x86/Makefile b/ffmpeg/libavfilter/x86/Makefile
index cd97347..be4ad83 100644
--- a/ffmpeg/libavfilter/x86/Makefile
+++ b/ffmpeg/libavfilter/x86/Makefile
@@ -1,8 +1,11 @@
-OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun.o
+OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun_init.o
OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d_init.o
+OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup_init.o
+OBJS-$(CONFIG_SPP_FILTER) += x86/vf_spp.o
OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume_init.o
-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif_init.o
+YASM-OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun.o
YASM-OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d.o
+YASM-OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup.o
YASM-OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume.o
YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o x86/yadif-16.o x86/yadif-10.o
diff --git a/ffmpeg/libavfilter/x86/af_volume_init.c b/ffmpeg/libavfilter/x86/af_volume_init.c
index beee8ca..57c7eab 100644
--- a/ffmpeg/libavfilter/x86/af_volume_init.c
+++ b/ffmpeg/libavfilter/x86/af_volume_init.c
@@ -17,6 +17,7 @@
*/
#include "config.h"
+#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/samplefmt.h"
#include "libavutil/x86/cpu.h"
@@ -32,26 +33,26 @@ void ff_scale_samples_s32_ssse3_atom(uint8_t *dst, const uint8_t *src, int len,
void ff_scale_samples_s32_avx(uint8_t *dst, const uint8_t *src, int len,
int volume);
-void ff_volume_init_x86(VolumeContext *vol)
+av_cold void ff_volume_init_x86(VolumeContext *vol)
{
- int mm_flags = av_get_cpu_flags();
+ int cpu_flags = av_get_cpu_flags();
enum AVSampleFormat sample_fmt = av_get_packed_sample_fmt(vol->sample_fmt);
if (sample_fmt == AV_SAMPLE_FMT_S16) {
- if (EXTERNAL_SSE2(mm_flags) && vol->volume_i < 32768) {
+ if (EXTERNAL_SSE2(cpu_flags) && vol->volume_i < 32768) {
vol->scale_samples = ff_scale_samples_s16_sse2;
vol->samples_align = 8;
}
} else if (sample_fmt == AV_SAMPLE_FMT_S32) {
- if (EXTERNAL_SSE2(mm_flags)) {
+ if (EXTERNAL_SSE2(cpu_flags)) {
vol->scale_samples = ff_scale_samples_s32_sse2;
vol->samples_align = 4;
}
- if (EXTERNAL_SSSE3(mm_flags) && mm_flags & AV_CPU_FLAG_ATOM) {
+ if (EXTERNAL_SSSE3(cpu_flags) && cpu_flags & AV_CPU_FLAG_ATOM) {
vol->scale_samples = ff_scale_samples_s32_ssse3_atom;
vol->samples_align = 4;
}
- if (EXTERNAL_AVX(mm_flags)) {
+ if (EXTERNAL_AVX(cpu_flags)) {
vol->scale_samples = ff_scale_samples_s32_avx;
vol->samples_align = 8;
}
diff --git a/ffmpeg/libavfilter/x86/vf_gradfun.c b/ffmpeg/libavfilter/x86/vf_gradfun.c
deleted file mode 100644
index 214e764..0000000
--- a/ffmpeg/libavfilter/x86/vf_gradfun.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2009 Loren Merritt <lorenm@u.washignton.edu>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
-#include "libavutil/x86/asm.h"
-#include "libavfilter/gradfun.h"
-
-#if HAVE_INLINE_ASM
-
-DECLARE_ALIGNED(16, static const uint16_t, pw_7f)[8] = {0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F};
-DECLARE_ALIGNED(16, static const uint16_t, pw_ff)[8] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-#if HAVE_MMXEXT_INLINE
-static void gradfun_filter_line_mmxext(uint8_t *dst, const uint8_t *src, const uint16_t *dc,
- int width, int thresh,
- const uint16_t *dithers)
-{
- intptr_t x;
- if (width & 3) {
- x = width & ~3;
- ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2, width - x, thresh, dithers);
- width = x;
- }
- x = -width;
- __asm__ volatile(
- "movd %4, %%mm5 \n"
- "pxor %%mm7, %%mm7 \n"
- "pshufw $0, %%mm5, %%mm5 \n"
- "movq %6, %%mm6 \n"
- "movq (%5), %%mm3 \n"
- "movq 8(%5), %%mm4 \n"
-
- "1: \n"
- "movd (%2,%0), %%mm0 \n"
- "movd (%3,%0), %%mm1 \n"
- "punpcklbw %%mm7, %%mm0 \n"
- "punpcklwd %%mm1, %%mm1 \n"
- "psllw $7, %%mm0 \n"
- "pxor %%mm2, %%mm2 \n"
- "psubw %%mm0, %%mm1 \n" // delta = dc - pix
- "psubw %%mm1, %%mm2 \n"
- "pmaxsw %%mm1, %%mm2 \n"
- "pmulhuw %%mm5, %%mm2 \n" // m = abs(delta) * thresh >> 16
- "psubw %%mm6, %%mm2 \n"
- "pminsw %%mm7, %%mm2 \n" // m = -max(0, 127-m)
- "pmullw %%mm2, %%mm2 \n"
- "paddw %%mm3, %%mm0 \n" // pix += dither
- "psllw $2, %%mm1 \n" // m = m*m*delta >> 14
- "pmulhw %%mm2, %%mm1 \n"
- "paddw %%mm1, %%mm0 \n" // pix += m
- "psraw $7, %%mm0 \n"
- "packuswb %%mm0, %%mm0 \n"
- "movd %%mm0, (%1,%0) \n" // dst = clip(pix>>7)
- "add $4, %0 \n"
- "jnl 2f \n"
-
- "movd (%2,%0), %%mm0 \n"
- "movd (%3,%0), %%mm1 \n"
- "punpcklbw %%mm7, %%mm0 \n"
- "punpcklwd %%mm1, %%mm1 \n"
- "psllw $7, %%mm0 \n"
- "pxor %%mm2, %%mm2 \n"
- "psubw %%mm0, %%mm1 \n" // delta = dc - pix
- "psubw %%mm1, %%mm2 \n"
- "pmaxsw %%mm1, %%mm2 \n"
- "pmulhuw %%mm5, %%mm2 \n" // m = abs(delta) * thresh >> 16
- "psubw %%mm6, %%mm2 \n"
- "pminsw %%mm7, %%mm2 \n" // m = -max(0, 127-m)
- "pmullw %%mm2, %%mm2 \n"
- "paddw %%mm4, %%mm0 \n" // pix += dither
- "psllw $2, %%mm1 \n" // m = m*m*delta >> 14
- "pmulhw %%mm2, %%mm1 \n"
- "paddw %%mm1, %%mm0 \n" // pix += m
- "psraw $7, %%mm0 \n"
- "packuswb %%mm0, %%mm0 \n"
- "movd %%mm0, (%1,%0) \n" // dst = clip(pix>>7)
- "add $4, %0 \n"
- "jl 1b \n"
-
- "2: \n"
- "emms \n"
- :"+r"(x)
- :"r"(dst+width), "r"(src+width), "r"(dc+width/2),
- "rm"(thresh), "r"(dithers), "m"(*pw_7f)
- :"memory"
- );
-}
-#endif
-
-#if HAVE_SSSE3_INLINE
-static void gradfun_filter_line_ssse3(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
-{
- intptr_t x;
- if (width & 7) {
- // could be 10% faster if I somehow eliminated this
- x = width & ~7;
- ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2, width - x, thresh, dithers);
- width = x;
- }
- x = -width;
- __asm__ volatile(
- "movd %4, %%xmm5 \n"
- "pxor %%xmm7, %%xmm7 \n"
- "pshuflw $0,%%xmm5, %%xmm5 \n"
- "movdqa %6, %%xmm6 \n"
- "punpcklqdq %%xmm5, %%xmm5 \n"
- "movdqa %5, %%xmm4 \n"
- "1: \n"
- "movq (%2,%0), %%xmm0 \n"
- "movq (%3,%0), %%xmm1 \n"
- "punpcklbw %%xmm7, %%xmm0 \n"
- "punpcklwd %%xmm1, %%xmm1 \n"
- "psllw $7, %%xmm0 \n"
- "psubw %%xmm0, %%xmm1 \n" // delta = dc - pix
- "pabsw %%xmm1, %%xmm2 \n"
- "pmulhuw %%xmm5, %%xmm2 \n" // m = abs(delta) * thresh >> 16
- "psubw %%xmm6, %%xmm2 \n"
- "pminsw %%xmm7, %%xmm2 \n" // m = -max(0, 127-m)
- "pmullw %%xmm2, %%xmm2 \n"
- "psllw $2, %%xmm1 \n"
- "paddw %%xmm4, %%xmm0 \n" // pix += dither
- "pmulhw %%xmm2, %%xmm1 \n" // m = m*m*delta >> 14
- "paddw %%xmm1, %%xmm0 \n" // pix += m
- "psraw $7, %%xmm0 \n"
- "packuswb %%xmm0, %%xmm0 \n"
- "movq %%xmm0, (%1,%0) \n" // dst = clip(pix>>7)
- "add $8, %0 \n"
- "jl 1b \n"
- :"+&r"(x)
- :"r"(dst+width), "r"(src+width), "r"(dc+width/2),
- "rm"(thresh), "m"(*dithers), "m"(*pw_7f)
- :"memory"
- );
-}
-#endif /* HAVE_SSSE3_INLINE */
-
-#if HAVE_SSE2_INLINE
-static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
-{
-#define BLURV(load)\
- intptr_t x = -2*width;\
- __asm__ volatile(\
- "movdqa %6, %%xmm7 \n"\
- "1: \n"\
- load" (%4,%0), %%xmm0 \n"\
- load" (%5,%0), %%xmm1 \n"\
- "movdqa %%xmm0, %%xmm2 \n"\
- "movdqa %%xmm1, %%xmm3 \n"\
- "psrlw $8, %%xmm0 \n"\
- "psrlw $8, %%xmm1 \n"\
- "pand %%xmm7, %%xmm2 \n"\
- "pand %%xmm7, %%xmm3 \n"\
- "paddw %%xmm1, %%xmm0 \n"\
- "paddw %%xmm3, %%xmm2 \n"\
- "paddw %%xmm2, %%xmm0 \n"\
- "paddw (%2,%0), %%xmm0 \n"\
- "movdqa (%1,%0), %%xmm1 \n"\
- "movdqa %%xmm0, (%1,%0) \n"\
- "psubw %%xmm1, %%xmm0 \n"\
- "movdqa %%xmm0, (%3,%0) \n"\
- "add $16, %0 \n"\
- "jl 1b \n"\
- :"+&r"(x)\
- :"r"(buf+width),\
- "r"(buf1+width),\
- "r"(dc+width),\
- "r"(src+width*2),\
- "r"(src+width*2+src_linesize),\
- "m"(*pw_ff)\
- :"memory"\
- );
- if (((intptr_t) src | src_linesize) & 15) {
- BLURV("movdqu");
- } else {
- BLURV("movdqa");
- }
-}
-#endif /* HAVE_SSE2_INLINE */
-
-#endif /* HAVE_INLINE_ASM */
-
-av_cold void ff_gradfun_init_x86(GradFunContext *gf)
-{
- int cpu_flags = av_get_cpu_flags();
-
-#if HAVE_MMXEXT_INLINE
- if (cpu_flags & AV_CPU_FLAG_MMXEXT)
- gf->filter_line = gradfun_filter_line_mmxext;
-#endif
-#if HAVE_SSSE3_INLINE
- if (cpu_flags & AV_CPU_FLAG_SSSE3)
- gf->filter_line = gradfun_filter_line_ssse3;
-#endif
-#if HAVE_SSE2_INLINE
- if (cpu_flags & AV_CPU_FLAG_SSE2)
- gf->blur_line = gradfun_blur_line_sse2;
-#endif
-}
diff --git a/ffmpeg/libavfilter/x86/vf_hqdn3d_init.c b/ffmpeg/libavfilter/x86/vf_hqdn3d_init.c
index 4abb878..b63916b 100644
--- a/ffmpeg/libavfilter/x86/vf_hqdn3d_init.c
+++ b/ffmpeg/libavfilter/x86/vf_hqdn3d_init.c
@@ -25,17 +25,25 @@
#include "libavfilter/vf_hqdn3d.h"
#include "config.h"
-void ff_hqdn3d_row_8_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
-void ff_hqdn3d_row_9_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
-void ff_hqdn3d_row_10_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
-void ff_hqdn3d_row_16_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal);
+void ff_hqdn3d_row_8_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant,
+ uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial,
+ int16_t *temporal);
+void ff_hqdn3d_row_9_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant,
+ uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial,
+ int16_t *temporal);
+void ff_hqdn3d_row_10_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant,
+ uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial,
+ int16_t *temporal);
+void ff_hqdn3d_row_16_x86(uint8_t *src, uint8_t *dst, uint16_t *line_ant,
+ uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial,
+ int16_t *temporal);
av_cold void ff_hqdn3d_init_x86(HQDN3DContext *hqdn3d)
{
#if HAVE_YASM
- hqdn3d->denoise_row[ 8] = ff_hqdn3d_row_8_x86;
- hqdn3d->denoise_row[ 9] = ff_hqdn3d_row_9_x86;
+ hqdn3d->denoise_row[8] = ff_hqdn3d_row_8_x86;
+ hqdn3d->denoise_row[9] = ff_hqdn3d_row_9_x86;
hqdn3d->denoise_row[10] = ff_hqdn3d_row_10_x86;
hqdn3d->denoise_row[16] = ff_hqdn3d_row_16_x86;
-#endif
+#endif /* HAVE_YASM */
}
diff --git a/ffmpeg/libavfilter/x86/vf_yadif_init.c b/ffmpeg/libavfilter/x86/vf_yadif_init.c
deleted file mode 100644
index 58f2fc6..0000000
--- a/ffmpeg/libavfilter/x86/vf_yadif_init.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
-#include "libavutil/x86/asm.h"
-#include "libavutil/x86/cpu.h"
-#include "libavcodec/x86/dsputil_mmx.h"
-#include "libavfilter/yadif.h"
-
-void ff_yadif_filter_line_mmxext(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_sse2(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_ssse3(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-
-void ff_yadif_filter_line_16bit_mmxext(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_16bit_sse2(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_16bit_ssse3(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_16bit_sse4(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-
-void ff_yadif_filter_line_10bit_mmxext(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_10bit_sse2(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-void ff_yadif_filter_line_10bit_ssse3(void *dst, void *prev, void *cur,
- void *next, int w, int prefs,
- int mrefs, int parity, int mode);
-
-av_cold void ff_yadif_init_x86(YADIFContext *yadif)
-{
- int cpu_flags = av_get_cpu_flags();
- int bit_depth = (!yadif->csp) ? 8
- : yadif->csp->comp[0].depth_minus1 + 1;
-
-#if HAVE_YASM
- if (bit_depth >= 15) {
-#if ARCH_X86_32
- if (EXTERNAL_MMXEXT(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_16bit_mmxext;
-#endif /* ARCH_X86_32 */
- if (EXTERNAL_SSE2(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_16bit_sse2;
- if (EXTERNAL_SSSE3(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_16bit_ssse3;
- if (EXTERNAL_SSE4(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_16bit_sse4;
- } else if ( bit_depth >= 9 && bit_depth <= 14) {
-#if ARCH_X86_32
- if (EXTERNAL_MMXEXT(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_10bit_mmxext;
-#endif /* ARCH_X86_32 */
- if (EXTERNAL_SSE2(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_10bit_sse2;
- if (EXTERNAL_SSSE3(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_10bit_ssse3;
- } else {
-#if ARCH_X86_32
- if (EXTERNAL_MMXEXT(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_mmxext;
-#endif /* ARCH_X86_32 */
- if (EXTERNAL_SSE2(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_sse2;
- if (EXTERNAL_SSSE3(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_ssse3;
- }
-#endif /* HAVE_YASM */
-}
diff --git a/ffmpeg/libavfilter/yadif.h b/ffmpeg/libavfilter/yadif.h
index 3ddf005..5afe014 100644
--- a/ffmpeg/libavfilter/yadif.h
+++ b/ffmpeg/libavfilter/yadif.h
@@ -19,7 +19,6 @@
#ifndef AVFILTER_YADIF_H
#define AVFILTER_YADIF_H
-#include "libavutil/pixdesc.h"
#include "avfilter.h"
enum YADIFMode {
@@ -40,35 +39,37 @@ enum YADIFDeint {
YADIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced
};
-typedef struct YADIFContext {
- const AVClass *class;
+void ff_yadif_filter_line_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
- enum YADIFMode mode;
- enum YADIFParity parity;
- enum YADIFDeint deint;
+void ff_yadif_filter_line_16bit_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_sse4(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
- int frame_pending;
-
- AVFrame *cur;
- AVFrame *next;
- AVFrame *prev;
- AVFrame *out;
-
- /**
- * Required alignment for filter_line
- */
- void (*filter_line)(void *dst,
- void *prev, void *cur, void *next,
- int w, int prefs, int mrefs, int parity, int mode);
- void (*filter_edges)(void *dst, void *prev, void *cur, void *next,
- int w, int prefs, int mrefs, int parity, int mode);
-
- const AVPixFmtDescriptor *csp;
- int eof;
- uint8_t *temp_line;
- int temp_line_size;
-} YADIFContext;
-
-void ff_yadif_init_x86(YADIFContext *yadif);
+void ff_yadif_filter_line_10bit_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_10bit_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_10bit_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
#endif /* AVFILTER_YADIF_H */