From 150c9823e71a161e97003849cf8b2f55b21520bd Mon Sep 17 00:00:00 2001 From: Tim Redfern Date: Mon, 26 Aug 2013 15:10:18 +0100 Subject: adding ffmpeg specific version --- ffmpeg1/libavfilter/Makefile | 226 +++ ffmpeg1/libavfilter/af_aconvert.c | 185 ++ ffmpeg1/libavfilter/af_afade.c | 307 +++ ffmpeg1/libavfilter/af_aformat.c | 155 ++ ffmpeg1/libavfilter/af_amerge.c | 355 ++++ ffmpeg1/libavfilter/af_amix.c | 567 ++++++ ffmpeg1/libavfilter/af_anull.c | 58 + ffmpeg1/libavfilter/af_apad.c | 163 ++ ffmpeg1/libavfilter/af_aresample.c | 288 +++ ffmpeg1/libavfilter/af_asetnsamples.c | 211 ++ ffmpeg1/libavfilter/af_ashowinfo.c | 132 ++ ffmpeg1/libavfilter/af_astreamsync.c | 220 ++ ffmpeg1/libavfilter/af_asyncts.c | 307 +++ ffmpeg1/libavfilter/af_atempo.c | 1172 +++++++++++ ffmpeg1/libavfilter/af_biquads.c | 627 ++++++ ffmpeg1/libavfilter/af_channelmap.c | 416 ++++ ffmpeg1/libavfilter/af_channelsplit.c | 153 ++ ffmpeg1/libavfilter/af_earwax.c | 169 ++ ffmpeg1/libavfilter/af_join.c | 522 +++++ ffmpeg1/libavfilter/af_pan.c | 407 ++++ ffmpeg1/libavfilter/af_resample.c | 305 +++ ffmpeg1/libavfilter/af_silencedetect.c | 184 ++ ffmpeg1/libavfilter/af_volume.c | 305 +++ ffmpeg1/libavfilter/af_volume.h | 55 + ffmpeg1/libavfilter/af_volumedetect.c | 161 ++ ffmpeg1/libavfilter/all_channel_layouts.inc | 68 + ffmpeg1/libavfilter/allfilters.c | 202 ++ ffmpeg1/libavfilter/asink_anullsink.c | 48 + ffmpeg1/libavfilter/asrc_abuffer.h | 91 + ffmpeg1/libavfilter/asrc_aevalsrc.c | 263 +++ ffmpeg1/libavfilter/asrc_anullsrc.c | 141 ++ ffmpeg1/libavfilter/asrc_flite.c | 291 +++ ffmpeg1/libavfilter/asrc_sine.c | 230 +++ ffmpeg1/libavfilter/audio.c | 184 ++ ffmpeg1/libavfilter/audio.h | 83 + ffmpeg1/libavfilter/avcodec.c | 157 ++ ffmpeg1/libavfilter/avcodec.h | 110 + ffmpeg1/libavfilter/avf_concat.c | 425 ++++ ffmpeg1/libavfilter/avf_showspectrum.c | 515 +++++ ffmpeg1/libavfilter/avf_showwaves.c | 271 +++ ffmpeg1/libavfilter/avfilter.c | 777 +++++++ ffmpeg1/libavfilter/avfilter.h | 895 ++++++++ ffmpeg1/libavfilter/avfiltergraph.c | 1073 ++++++++++ ffmpeg1/libavfilter/avfiltergraph.h | 280 +++ ffmpeg1/libavfilter/bbox.c | 75 + ffmpeg1/libavfilter/bbox.h | 44 + ffmpeg1/libavfilter/buffer.c | 167 ++ ffmpeg1/libavfilter/bufferqueue.h | 121 ++ ffmpeg1/libavfilter/buffersink.c | 558 +++++ ffmpeg1/libavfilter/buffersink.h | 186 ++ ffmpeg1/libavfilter/buffersrc.c | 581 ++++++ ffmpeg1/libavfilter/buffersrc.h | 148 ++ ffmpeg1/libavfilter/drawutils.c | 552 +++++ ffmpeg1/libavfilter/drawutils.h | 155 ++ ffmpeg1/libavfilter/f_ebur128.c | 822 ++++++++ ffmpeg1/libavfilter/f_perms.c | 187 ++ ffmpeg1/libavfilter/f_select.c | 511 +++++ ffmpeg1/libavfilter/f_sendcmd.c | 598 ++++++ ffmpeg1/libavfilter/f_setpts.c | 269 +++ ffmpeg1/libavfilter/f_settb.c | 225 +++ ffmpeg1/libavfilter/fifo.c | 305 +++ ffmpeg1/libavfilter/filtfmts.c | 128 ++ ffmpeg1/libavfilter/formats.c | 661 ++++++ ffmpeg1/libavfilter/formats.h | 270 +++ ffmpeg1/libavfilter/gradfun.h | 47 + ffmpeg1/libavfilter/graphdump.c | 164 ++ ffmpeg1/libavfilter/graphparser.c | 601 ++++++ ffmpeg1/libavfilter/internal.h | 328 +++ ffmpeg1/libavfilter/lavfutils.c | 95 + ffmpeg1/libavfilter/lavfutils.h | 43 + ffmpeg1/libavfilter/libavfilter.pc | 14 + ffmpeg1/libavfilter/libavfilter.v | 5 + ffmpeg1/libavfilter/libmpcodecs/av_helpers.h | 27 + ffmpeg1/libavfilter/libmpcodecs/cpudetect.h | 60 + ffmpeg1/libavfilter/libmpcodecs/help_mp.h | 2126 ++++++++++++++++++++ ffmpeg1/libavfilter/libmpcodecs/img_format.c | 233 +++ ffmpeg1/libavfilter/libmpcodecs/img_format.h | 300 +++ ffmpeg1/libavfilter/libmpcodecs/libvo/fastmemcpy.h | 99 + ffmpeg1/libavfilter/libmpcodecs/libvo/video_out.h | 281 +++ ffmpeg1/libavfilter/libmpcodecs/mp_image.c | 253 +++ ffmpeg1/libavfilter/libmpcodecs/mp_image.h | 159 ++ ffmpeg1/libavfilter/libmpcodecs/mp_msg.h | 166 ++ ffmpeg1/libavfilter/libmpcodecs/mpbswap.h | 34 + ffmpeg1/libavfilter/libmpcodecs/mpc_info.h | 43 + ffmpeg1/libavfilter/libmpcodecs/pullup.c | 823 ++++++++ ffmpeg1/libavfilter/libmpcodecs/pullup.h | 102 + ffmpeg1/libavfilter/libmpcodecs/vf.h | 169 ++ ffmpeg1/libavfilter/libmpcodecs/vf_detc.c | 453 +++++ ffmpeg1/libavfilter/libmpcodecs/vf_dint.c | 214 ++ ffmpeg1/libavfilter/libmpcodecs/vf_divtc.c | 722 +++++++ ffmpeg1/libavfilter/libmpcodecs/vf_down3dright.c | 166 ++ ffmpeg1/libavfilter/libmpcodecs/vf_eq.c | 240 +++ ffmpeg1/libavfilter/libmpcodecs/vf_eq2.c | 519 +++++ ffmpeg1/libavfilter/libmpcodecs/vf_fil.c | 116 ++ ffmpeg1/libavfilter/libmpcodecs/vf_filmdint.c | 1461 ++++++++++++++ ffmpeg1/libavfilter/libmpcodecs/vf_fspp.c | 2118 +++++++++++++++++++ ffmpeg1/libavfilter/libmpcodecs/vf_ilpack.c | 458 +++++ ffmpeg1/libavfilter/libmpcodecs/vf_ivtc.c | 550 +++++ ffmpeg1/libavfilter/libmpcodecs/vf_mcdeint.c | 340 ++++ ffmpeg1/libavfilter/libmpcodecs/vf_noise.c | 475 +++++ ffmpeg1/libavfilter/libmpcodecs/vf_ow.c | 322 +++ ffmpeg1/libavfilter/libmpcodecs/vf_perspective.c | 345 ++++ ffmpeg1/libavfilter/libmpcodecs/vf_phase.c | 303 +++ ffmpeg1/libavfilter/libmpcodecs/vf_pp7.c | 491 +++++ ffmpeg1/libavfilter/libmpcodecs/vf_pullup.c | 316 +++ ffmpeg1/libavfilter/libmpcodecs/vf_qp.c | 178 ++ ffmpeg1/libavfilter/libmpcodecs/vf_sab.c | 324 +++ ffmpeg1/libavfilter/libmpcodecs/vf_scale.h | 34 + ffmpeg1/libavfilter/libmpcodecs/vf_softpulldown.c | 163 ++ ffmpeg1/libavfilter/libmpcodecs/vf_spp.c | 621 ++++++ ffmpeg1/libavfilter/libmpcodecs/vf_telecine.c | 158 ++ ffmpeg1/libavfilter/libmpcodecs/vf_tinterlace.c | 235 +++ ffmpeg1/libavfilter/libmpcodecs/vf_uspp.c | 393 ++++ ffmpeg1/libavfilter/libmpcodecs/vfcap.h | 56 + ffmpeg1/libavfilter/lswsutils.c | 50 + ffmpeg1/libavfilter/lswsutils.h | 38 + ffmpeg1/libavfilter/split.c | 134 ++ ffmpeg1/libavfilter/src_movie.c | 620 ++++++ ffmpeg1/libavfilter/transform.c | 202 ++ ffmpeg1/libavfilter/transform.h | 127 ++ ffmpeg1/libavfilter/version.h | 67 + ffmpeg1/libavfilter/vf_alphaextract.c | 131 ++ ffmpeg1/libavfilter/vf_alphamerge.c | 208 ++ ffmpeg1/libavfilter/vf_aspect.c | 214 ++ ffmpeg1/libavfilter/vf_bbox.c | 117 ++ ffmpeg1/libavfilter/vf_blackdetect.c | 219 ++ ffmpeg1/libavfilter/vf_blackframe.c | 160 ++ ffmpeg1/libavfilter/vf_blend.c | 471 +++++ ffmpeg1/libavfilter/vf_boxblur.c | 396 ++++ ffmpeg1/libavfilter/vf_colormatrix.c | 387 ++++ ffmpeg1/libavfilter/vf_copy.c | 71 + ffmpeg1/libavfilter/vf_crop.c | 354 ++++ ffmpeg1/libavfilter/vf_cropdetect.c | 248 +++ ffmpeg1/libavfilter/vf_curves.c | 363 ++++ ffmpeg1/libavfilter/vf_decimate.c | 259 +++ ffmpeg1/libavfilter/vf_delogo.c | 275 +++ ffmpeg1/libavfilter/vf_deshake.c | 585 ++++++ ffmpeg1/libavfilter/vf_drawbox.c | 187 ++ ffmpeg1/libavfilter/vf_drawtext.c | 1039 ++++++++++ ffmpeg1/libavfilter/vf_edgedetect.c | 330 +++ ffmpeg1/libavfilter/vf_fade.c | 248 +++ ffmpeg1/libavfilter/vf_field.c | 131 ++ ffmpeg1/libavfilter/vf_fieldorder.c | 216 ++ ffmpeg1/libavfilter/vf_format.c | 169 ++ ffmpeg1/libavfilter/vf_fps.c | 298 +++ ffmpeg1/libavfilter/vf_framestep.c | 124 ++ ffmpeg1/libavfilter/vf_frei0r.c | 526 +++++ ffmpeg1/libavfilter/vf_geq.c | 249 +++ ffmpeg1/libavfilter/vf_gradfun.c | 270 +++ ffmpeg1/libavfilter/vf_hflip.c | 176 ++ ffmpeg1/libavfilter/vf_histeq.c | 297 +++ ffmpeg1/libavfilter/vf_histogram.c | 345 ++++ ffmpeg1/libavfilter/vf_hqdn3d.c | 376 ++++ ffmpeg1/libavfilter/vf_hqdn3d.h | 41 + ffmpeg1/libavfilter/vf_hue.c | 385 ++++ ffmpeg1/libavfilter/vf_idet.c | 326 +++ ffmpeg1/libavfilter/vf_il.c | 230 +++ ffmpeg1/libavfilter/vf_kerndeint.c | 332 +++ ffmpeg1/libavfilter/vf_libopencv.c | 409 ++++ ffmpeg1/libavfilter/vf_lut.c | 445 ++++ ffmpeg1/libavfilter/vf_mp.c | 844 ++++++++ ffmpeg1/libavfilter/vf_noise.c | 368 ++++ ffmpeg1/libavfilter/vf_null.c | 55 + ffmpeg1/libavfilter/vf_overlay.c | 677 +++++++ ffmpeg1/libavfilter/vf_pad.c | 420 ++++ ffmpeg1/libavfilter/vf_pixdesctest.c | 136 ++ ffmpeg1/libavfilter/vf_pp.c | 174 ++ ffmpeg1/libavfilter/vf_removelogo.c | 567 ++++++ ffmpeg1/libavfilter/vf_scale.c | 448 +++++ ffmpeg1/libavfilter/vf_setfield.c | 115 ++ ffmpeg1/libavfilter/vf_showinfo.c | 111 + ffmpeg1/libavfilter/vf_smartblur.c | 317 +++ ffmpeg1/libavfilter/vf_stereo3d.c | 454 +++++ ffmpeg1/libavfilter/vf_subtitles.c | 375 ++++ ffmpeg1/libavfilter/vf_super2xsai.c | 352 ++++ ffmpeg1/libavfilter/vf_swapuv.c | 111 + ffmpeg1/libavfilter/vf_thumbnail.c | 238 +++ ffmpeg1/libavfilter/vf_tile.c | 251 +++ ffmpeg1/libavfilter/vf_tinterlace.c | 409 ++++ ffmpeg1/libavfilter/vf_transpose.c | 272 +++ ffmpeg1/libavfilter/vf_unsharp.c | 312 +++ ffmpeg1/libavfilter/vf_vflip.c | 111 + ffmpeg1/libavfilter/vf_yadif.c | 501 +++++ ffmpeg1/libavfilter/video.c | 157 ++ ffmpeg1/libavfilter/video.h | 41 + ffmpeg1/libavfilter/vsink_nullsink.c | 46 + ffmpeg1/libavfilter/vsrc_cellauto.c | 351 ++++ ffmpeg1/libavfilter/vsrc_life.c | 481 +++++ ffmpeg1/libavfilter/vsrc_mandelbrot.c | 415 ++++ ffmpeg1/libavfilter/vsrc_mptestsrc.c | 382 ++++ ffmpeg1/libavfilter/vsrc_testsrc.c | 793 ++++++++ ffmpeg1/libavfilter/x86/Makefile | 8 + ffmpeg1/libavfilter/x86/af_volume.asm | 140 ++ ffmpeg1/libavfilter/x86/af_volume_init.c | 59 + ffmpeg1/libavfilter/x86/vf_gradfun.c | 217 ++ ffmpeg1/libavfilter/x86/vf_hqdn3d.asm | 106 + ffmpeg1/libavfilter/x86/vf_hqdn3d_init.c | 41 + ffmpeg1/libavfilter/x86/vf_yadif.asm | 252 +++ ffmpeg1/libavfilter/x86/vf_yadif_init.c | 100 + ffmpeg1/libavfilter/x86/yadif-10.asm | 282 +++ ffmpeg1/libavfilter/x86/yadif-16.asm | 347 ++++ ffmpeg1/libavfilter/yadif.h | 74 + 202 files changed, 63015 insertions(+) create mode 100644 ffmpeg1/libavfilter/Makefile create mode 100644 ffmpeg1/libavfilter/af_aconvert.c create mode 100644 ffmpeg1/libavfilter/af_afade.c create mode 100644 ffmpeg1/libavfilter/af_aformat.c create mode 100644 ffmpeg1/libavfilter/af_amerge.c create mode 100644 ffmpeg1/libavfilter/af_amix.c create mode 100644 ffmpeg1/libavfilter/af_anull.c create mode 100644 ffmpeg1/libavfilter/af_apad.c create mode 100644 ffmpeg1/libavfilter/af_aresample.c create mode 100644 ffmpeg1/libavfilter/af_asetnsamples.c create mode 100644 ffmpeg1/libavfilter/af_ashowinfo.c create mode 100644 ffmpeg1/libavfilter/af_astreamsync.c create mode 100644 ffmpeg1/libavfilter/af_asyncts.c create mode 100644 ffmpeg1/libavfilter/af_atempo.c create mode 100644 ffmpeg1/libavfilter/af_biquads.c create mode 100644 ffmpeg1/libavfilter/af_channelmap.c create mode 100644 ffmpeg1/libavfilter/af_channelsplit.c create mode 100644 ffmpeg1/libavfilter/af_earwax.c create mode 100644 ffmpeg1/libavfilter/af_join.c create mode 100644 ffmpeg1/libavfilter/af_pan.c create mode 100644 ffmpeg1/libavfilter/af_resample.c create mode 100644 ffmpeg1/libavfilter/af_silencedetect.c create mode 100644 ffmpeg1/libavfilter/af_volume.c create mode 100644 ffmpeg1/libavfilter/af_volume.h create mode 100644 ffmpeg1/libavfilter/af_volumedetect.c create mode 100644 ffmpeg1/libavfilter/all_channel_layouts.inc create mode 100644 ffmpeg1/libavfilter/allfilters.c create mode 100644 ffmpeg1/libavfilter/asink_anullsink.c create mode 100644 ffmpeg1/libavfilter/asrc_abuffer.h create mode 100644 ffmpeg1/libavfilter/asrc_aevalsrc.c create mode 100644 ffmpeg1/libavfilter/asrc_anullsrc.c create mode 100644 ffmpeg1/libavfilter/asrc_flite.c create mode 100644 ffmpeg1/libavfilter/asrc_sine.c create mode 100644 ffmpeg1/libavfilter/audio.c create mode 100644 ffmpeg1/libavfilter/audio.h create mode 100644 ffmpeg1/libavfilter/avcodec.c create mode 100644 ffmpeg1/libavfilter/avcodec.h create mode 100644 ffmpeg1/libavfilter/avf_concat.c create mode 100644 ffmpeg1/libavfilter/avf_showspectrum.c create mode 100644 ffmpeg1/libavfilter/avf_showwaves.c create mode 100644 ffmpeg1/libavfilter/avfilter.c create mode 100644 ffmpeg1/libavfilter/avfilter.h create mode 100644 ffmpeg1/libavfilter/avfiltergraph.c create mode 100644 ffmpeg1/libavfilter/avfiltergraph.h create mode 100644 ffmpeg1/libavfilter/bbox.c create mode 100644 ffmpeg1/libavfilter/bbox.h create mode 100644 ffmpeg1/libavfilter/buffer.c create mode 100644 ffmpeg1/libavfilter/bufferqueue.h create mode 100644 ffmpeg1/libavfilter/buffersink.c create mode 100644 ffmpeg1/libavfilter/buffersink.h create mode 100644 ffmpeg1/libavfilter/buffersrc.c create mode 100644 ffmpeg1/libavfilter/buffersrc.h create mode 100644 ffmpeg1/libavfilter/drawutils.c create mode 100644 ffmpeg1/libavfilter/drawutils.h create mode 100644 ffmpeg1/libavfilter/f_ebur128.c create mode 100644 ffmpeg1/libavfilter/f_perms.c create mode 100644 ffmpeg1/libavfilter/f_select.c create mode 100644 ffmpeg1/libavfilter/f_sendcmd.c create mode 100644 ffmpeg1/libavfilter/f_setpts.c create mode 100644 ffmpeg1/libavfilter/f_settb.c create mode 100644 ffmpeg1/libavfilter/fifo.c create mode 100644 ffmpeg1/libavfilter/filtfmts.c create mode 100644 ffmpeg1/libavfilter/formats.c create mode 100644 ffmpeg1/libavfilter/formats.h create mode 100644 ffmpeg1/libavfilter/gradfun.h create mode 100644 ffmpeg1/libavfilter/graphdump.c create mode 100644 ffmpeg1/libavfilter/graphparser.c create mode 100644 ffmpeg1/libavfilter/internal.h create mode 100644 ffmpeg1/libavfilter/lavfutils.c create mode 100644 ffmpeg1/libavfilter/lavfutils.h create mode 100644 ffmpeg1/libavfilter/libavfilter.pc create mode 100644 ffmpeg1/libavfilter/libavfilter.v create mode 100644 ffmpeg1/libavfilter/libmpcodecs/av_helpers.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/cpudetect.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/help_mp.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/img_format.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/img_format.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/libvo/fastmemcpy.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/libvo/video_out.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/mp_image.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/mp_image.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/mp_msg.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/mpbswap.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/mpc_info.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/pullup.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/pullup.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_detc.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_dint.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_divtc.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_down3dright.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_eq.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_eq2.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_fil.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_filmdint.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_fspp.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_ilpack.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_ivtc.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_mcdeint.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_noise.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_ow.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_perspective.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_phase.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_pp7.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_pullup.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_qp.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_sab.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_scale.h create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_softpulldown.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_spp.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_telecine.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_tinterlace.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vf_uspp.c create mode 100644 ffmpeg1/libavfilter/libmpcodecs/vfcap.h create mode 100644 ffmpeg1/libavfilter/lswsutils.c create mode 100644 ffmpeg1/libavfilter/lswsutils.h create mode 100644 ffmpeg1/libavfilter/split.c create mode 100644 ffmpeg1/libavfilter/src_movie.c create mode 100644 ffmpeg1/libavfilter/transform.c create mode 100644 ffmpeg1/libavfilter/transform.h create mode 100644 ffmpeg1/libavfilter/version.h create mode 100644 ffmpeg1/libavfilter/vf_alphaextract.c create mode 100644 ffmpeg1/libavfilter/vf_alphamerge.c create mode 100644 ffmpeg1/libavfilter/vf_aspect.c create mode 100644 ffmpeg1/libavfilter/vf_bbox.c create mode 100644 ffmpeg1/libavfilter/vf_blackdetect.c create mode 100644 ffmpeg1/libavfilter/vf_blackframe.c create mode 100644 ffmpeg1/libavfilter/vf_blend.c create mode 100644 ffmpeg1/libavfilter/vf_boxblur.c create mode 100644 ffmpeg1/libavfilter/vf_colormatrix.c create mode 100644 ffmpeg1/libavfilter/vf_copy.c create mode 100644 ffmpeg1/libavfilter/vf_crop.c create mode 100644 ffmpeg1/libavfilter/vf_cropdetect.c create mode 100644 ffmpeg1/libavfilter/vf_curves.c create mode 100644 ffmpeg1/libavfilter/vf_decimate.c create mode 100644 ffmpeg1/libavfilter/vf_delogo.c create mode 100644 ffmpeg1/libavfilter/vf_deshake.c create mode 100644 ffmpeg1/libavfilter/vf_drawbox.c create mode 100644 ffmpeg1/libavfilter/vf_drawtext.c create mode 100644 ffmpeg1/libavfilter/vf_edgedetect.c create mode 100644 ffmpeg1/libavfilter/vf_fade.c create mode 100644 ffmpeg1/libavfilter/vf_field.c create mode 100644 ffmpeg1/libavfilter/vf_fieldorder.c create mode 100644 ffmpeg1/libavfilter/vf_format.c create mode 100644 ffmpeg1/libavfilter/vf_fps.c create mode 100644 ffmpeg1/libavfilter/vf_framestep.c create mode 100644 ffmpeg1/libavfilter/vf_frei0r.c create mode 100644 ffmpeg1/libavfilter/vf_geq.c create mode 100644 ffmpeg1/libavfilter/vf_gradfun.c create mode 100644 ffmpeg1/libavfilter/vf_hflip.c create mode 100644 ffmpeg1/libavfilter/vf_histeq.c create mode 100644 ffmpeg1/libavfilter/vf_histogram.c create mode 100644 ffmpeg1/libavfilter/vf_hqdn3d.c create mode 100644 ffmpeg1/libavfilter/vf_hqdn3d.h create mode 100644 ffmpeg1/libavfilter/vf_hue.c create mode 100644 ffmpeg1/libavfilter/vf_idet.c create mode 100644 ffmpeg1/libavfilter/vf_il.c create mode 100644 ffmpeg1/libavfilter/vf_kerndeint.c create mode 100644 ffmpeg1/libavfilter/vf_libopencv.c create mode 100644 ffmpeg1/libavfilter/vf_lut.c create mode 100644 ffmpeg1/libavfilter/vf_mp.c create mode 100644 ffmpeg1/libavfilter/vf_noise.c create mode 100644 ffmpeg1/libavfilter/vf_null.c create mode 100644 ffmpeg1/libavfilter/vf_overlay.c create mode 100644 ffmpeg1/libavfilter/vf_pad.c create mode 100644 ffmpeg1/libavfilter/vf_pixdesctest.c create mode 100644 ffmpeg1/libavfilter/vf_pp.c create mode 100644 ffmpeg1/libavfilter/vf_removelogo.c create mode 100644 ffmpeg1/libavfilter/vf_scale.c create mode 100644 ffmpeg1/libavfilter/vf_setfield.c create mode 100644 ffmpeg1/libavfilter/vf_showinfo.c create mode 100644 ffmpeg1/libavfilter/vf_smartblur.c create mode 100644 ffmpeg1/libavfilter/vf_stereo3d.c create mode 100644 ffmpeg1/libavfilter/vf_subtitles.c create mode 100644 ffmpeg1/libavfilter/vf_super2xsai.c create mode 100644 ffmpeg1/libavfilter/vf_swapuv.c create mode 100644 ffmpeg1/libavfilter/vf_thumbnail.c create mode 100644 ffmpeg1/libavfilter/vf_tile.c create mode 100644 ffmpeg1/libavfilter/vf_tinterlace.c create mode 100644 ffmpeg1/libavfilter/vf_transpose.c create mode 100644 ffmpeg1/libavfilter/vf_unsharp.c create mode 100644 ffmpeg1/libavfilter/vf_vflip.c create mode 100644 ffmpeg1/libavfilter/vf_yadif.c create mode 100644 ffmpeg1/libavfilter/video.c create mode 100644 ffmpeg1/libavfilter/video.h create mode 100644 ffmpeg1/libavfilter/vsink_nullsink.c create mode 100644 ffmpeg1/libavfilter/vsrc_cellauto.c create mode 100644 ffmpeg1/libavfilter/vsrc_life.c create mode 100644 ffmpeg1/libavfilter/vsrc_mandelbrot.c create mode 100644 ffmpeg1/libavfilter/vsrc_mptestsrc.c create mode 100644 ffmpeg1/libavfilter/vsrc_testsrc.c create mode 100644 ffmpeg1/libavfilter/x86/Makefile create mode 100644 ffmpeg1/libavfilter/x86/af_volume.asm create mode 100644 ffmpeg1/libavfilter/x86/af_volume_init.c create mode 100644 ffmpeg1/libavfilter/x86/vf_gradfun.c create mode 100644 ffmpeg1/libavfilter/x86/vf_hqdn3d.asm create mode 100644 ffmpeg1/libavfilter/x86/vf_hqdn3d_init.c create mode 100644 ffmpeg1/libavfilter/x86/vf_yadif.asm create mode 100644 ffmpeg1/libavfilter/x86/vf_yadif_init.c create mode 100644 ffmpeg1/libavfilter/x86/yadif-10.asm create mode 100644 ffmpeg1/libavfilter/x86/yadif-16.asm create mode 100644 ffmpeg1/libavfilter/yadif.h (limited to 'ffmpeg1/libavfilter') diff --git a/ffmpeg1/libavfilter/Makefile b/ffmpeg1/libavfilter/Makefile new file mode 100644 index 0000000..690b1cb --- /dev/null +++ b/ffmpeg1/libavfilter/Makefile @@ -0,0 +1,226 @@ +include $(SUBDIR)../config.mak + +NAME = avfilter +FFLIBS = avutil +FFLIBS-$(CONFIG_ACONVERT_FILTER) += swresample +FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec +FFLIBS-$(CONFIG_ARESAMPLE_FILTER) += swresample +FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample +FFLIBS-$(CONFIG_ATEMPO_FILTER) += avcodec +FFLIBS-$(CONFIG_DECIMATE_FILTER) += avcodec +FFLIBS-$(CONFIG_DESHAKE_FILTER) += avcodec +FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec +FFLIBS-$(CONFIG_MP_FILTER) += avcodec +FFLIBS-$(CONFIG_PAN_FILTER) += swresample +FFLIBS-$(CONFIG_PP_FILTER) += postproc +FFLIBS-$(CONFIG_REMOVELOGO_FILTER) += avformat avcodec swscale +FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample +FFLIBS-$(CONFIG_SCALE_FILTER) += swscale +FFLIBS-$(CONFIG_SHOWSPECTRUM_FILTER) += avcodec +FFLIBS-$(CONFIG_SMARTBLUR_FILTER) += swscale +FFLIBS-$(CONFIG_SUBTITLES_FILTER) += avformat avcodec + +HEADERS = asrc_abuffer.h \ + avcodec.h \ + avfilter.h \ + avfiltergraph.h \ + buffersink.h \ + buffersrc.h \ + version.h \ + +OBJS = allfilters.o \ + audio.o \ + avfilter.o \ + avfiltergraph.o \ + buffer.o \ + buffersink.o \ + buffersrc.o \ + drawutils.o \ + fifo.o \ + formats.o \ + graphdump.o \ + graphparser.o \ + transform.o \ + video.o \ + + +OBJS-$(CONFIG_AVCODEC) += avcodec.o +OBJS-$(CONFIG_AVFORMAT) += lavfutils.o +OBJS-$(CONFIG_SWSCALE) += lswsutils.o + +OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o +OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o +OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o +OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o +OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o +OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o +OBJS-$(CONFIG_APAD_FILTER) += af_apad.o +OBJS-$(CONFIG_APERMS_FILTER) += f_perms.o +OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o +OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o +OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o +OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o +OBJS-$(CONFIG_ASETPTS_FILTER) += f_setpts.o +OBJS-$(CONFIG_ASETTB_FILTER) += f_settb.o +OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o +OBJS-$(CONFIG_ASPLIT_FILTER) += split.o +OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o +OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o +OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o +OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o +OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o +OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o +OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o +OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o +OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o +OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o +OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_JOIN_FILTER) += af_join.o +OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_PAN_FILTER) += af_pan.o +OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o +OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o +OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o +OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o +OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o + +OBJS-$(CONFIG_AEVALSRC_FILTER) += asrc_aevalsrc.o +OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o +OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o +OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o + +OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o + +OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o +OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_alphaextract.o +OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o +OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o +OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o +OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o +OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o +OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o +OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o +OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o +OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o +OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o +OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o +OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o +OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o +OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o +OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o +OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o +OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o +OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o +OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o +OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o +OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o +OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o +OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o +OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o +OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o +OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o +OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o +OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o +OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o +OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o +OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o +OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o +OBJS-$(CONFIG_IL_FILTER) += vf_il.o +OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o +OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o +OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o +OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o +OBJS-$(CONFIG_MP_FILTER) += vf_mp.o +OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o +OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o +OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o +OBJS-$(CONFIG_NULL_FILTER) += vf_null.o +OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o +OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o +OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o +OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o +OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o +OBJS-$(CONFIG_PP_FILTER) += vf_pp.o +OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o +OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o +OBJS-$(CONFIG_SELECT_FILTER) += f_select.o +OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o +OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o +OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o +OBJS-$(CONFIG_SETPTS_FILTER) += f_setpts.o +OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o +OBJS-$(CONFIG_SETTB_FILTER) += f_settb.o +OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o +OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o +OBJS-$(CONFIG_SPLIT_FILTER) += split.o +OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o +OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o +OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o +OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o +OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o +OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o +OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o +OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o +OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o +OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o +OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o + +OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o +OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o +OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o +OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o +OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o +OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o +OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o +OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o +OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o +OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o + +OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o + +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/mp_image.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/img_format.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_detc.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_dint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_divtc.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_down3dright.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq2.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fil.o +#OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_filmdint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fspp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ilpack.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ivtc.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_mcdeint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_noise.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ow.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_perspective.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_phase.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pp7.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pullup.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_qp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_sab.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_softpulldown.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_spp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_telecine.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_tinterlace.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_uspp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/pullup.o + +# multimedia filters +OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o +OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o +OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o + +# multimedia sources +OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o +OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o + +TOOLS = graph2dot +TESTPROGS = drawutils filtfmts formats + +clean:: + $(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%) diff --git a/ffmpeg1/libavfilter/af_aconvert.c b/ffmpeg1/libavfilter/af_aconvert.c new file mode 100644 index 0000000..c05e571 --- /dev/null +++ b/ffmpeg1/libavfilter/af_aconvert.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2011 Mina Nagy Zaki + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * sample format and channel layout conversion audio filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libswresample/swresample.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +typedef struct { + enum AVSampleFormat out_sample_fmt; + int64_t out_chlayout; + struct SwrContext *swr; +} AConvertContext; + +static av_cold int init(AVFilterContext *ctx, const char *args0) +{ + AConvertContext *aconvert = ctx->priv; + char *arg, *ptr = NULL; + int ret = 0; + char *args = av_strdup(args0); + + aconvert->out_sample_fmt = AV_SAMPLE_FMT_NONE; + aconvert->out_chlayout = 0; + + if ((arg = av_strtok(args, ":", &ptr)) && strcmp(arg, "auto")) { + if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0) + goto end; + } + if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) { + if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0) + goto end; + } + +end: + av_freep(&args); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + AConvertContext *aconvert = ctx->priv; + swr_free(&aconvert->swr); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AConvertContext *aconvert = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterChannelLayouts *layouts; + + ff_formats_ref(ff_all_formats(AVMEDIA_TYPE_AUDIO), + &inlink->out_formats); + if (aconvert->out_sample_fmt != AV_SAMPLE_FMT_NONE) { + formats = NULL; + ff_add_format(&formats, aconvert->out_sample_fmt); + ff_formats_ref(formats, &outlink->in_formats); + } else + ff_formats_ref(ff_all_formats(AVMEDIA_TYPE_AUDIO), + &outlink->in_formats); + + ff_channel_layouts_ref(ff_all_channel_layouts(), + &inlink->out_channel_layouts); + if (aconvert->out_chlayout != 0) { + layouts = NULL; + ff_add_channel_layout(&layouts, aconvert->out_chlayout); + ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts); + } else + ff_channel_layouts_ref(ff_all_channel_layouts(), + &outlink->in_channel_layouts); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + int ret; + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + AConvertContext *aconvert = ctx->priv; + char buf1[64], buf2[64]; + + /* if not specified in args, use the format and layout of the output */ + if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE) + aconvert->out_sample_fmt = outlink->format; + if (aconvert->out_chlayout == 0) + aconvert->out_chlayout = outlink->channel_layout; + + aconvert->swr = swr_alloc_set_opts(aconvert->swr, + aconvert->out_chlayout, aconvert->out_sample_fmt, inlink->sample_rate, + inlink->channel_layout, inlink->format, inlink->sample_rate, + 0, ctx); + if (!aconvert->swr) + return AVERROR(ENOMEM); + ret = swr_init(aconvert->swr); + if (ret < 0) + return ret; + + av_get_channel_layout_string(buf1, sizeof(buf1), + -1, inlink ->channel_layout); + av_get_channel_layout_string(buf2, sizeof(buf2), + -1, outlink->channel_layout); + av_log(ctx, AV_LOG_VERBOSE, + "fmt:%s cl:%s -> fmt:%s cl:%s\n", + av_get_sample_fmt_name(inlink ->format), buf1, + av_get_sample_fmt_name(outlink->format), buf2); + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) +{ + AConvertContext *aconvert = inlink->dst->priv; + const int n = insamplesref->nb_samples; + AVFilterLink *const outlink = inlink->dst->outputs[0]; + AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n); + int ret; + + swr_convert(aconvert->swr, outsamplesref->extended_data, n, + (void *)insamplesref->extended_data, n); + + av_frame_copy_props(outsamplesref, insamplesref); + av_frame_set_channels(outsamplesref, outlink->channels); + outsamplesref->channel_layout = outlink->channel_layout; + + ret = ff_filter_frame(outlink, outsamplesref); + av_frame_free(&insamplesref); + return ret; +} + +static const AVFilterPad aconvert_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad aconvert_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter avfilter_af_aconvert = { + .name = "aconvert", + .description = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout."), + .priv_size = sizeof(AConvertContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = aconvert_inputs, + .outputs = aconvert_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_afade.c b/ffmpeg1/libavfilter/af_afade.c new file mode 100644 index 0000000..1134849 --- /dev/null +++ b/ffmpeg1/libavfilter/af_afade.c @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2013 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * fade audio filter + */ + +#include "libavutil/opt.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int type; + int curve; + int nb_samples; + int64_t start_sample; + double duration; + double start_time; + + void (*fade_samples)(uint8_t **dst, uint8_t * const *src, + int nb_samples, int channels, int direction, + int64_t start, int range, int curve); +} AudioFadeContext; + +enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, PAR, QUA, CUB, SQU, CBR }; + +#define OFFSET(x) offsetof(AudioFadeContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption afade_options[] = { + { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" }, + { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" }, + { "in", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" }, + { "out", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" }, + { "start_sample", "set expression of sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS }, + { "ss", "set expression of sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS }, + { "nb_samples", "set expression for fade duration in samples", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS }, + { "ns", "set expression for fade duration in samples", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS }, + { "start_time", "set expression of second to start fading", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 7*24*60*60,FLAGS }, + { "st", "set expression of second to start fading", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 7*24*60*60,FLAGS }, + { "duration", "set expression for fade duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 24*60*60, FLAGS }, + { "d", "set expression for fade duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 24*60*60, FLAGS }, + { "curve", "set expression for fade curve", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" }, + { "c", "set expression for fade curve", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" }, + { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" }, + { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" }, + { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" }, + { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" }, + { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" }, + { "par", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" }, + { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" }, + { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" }, + { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" }, + { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(afade); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AudioFadeContext *afade = ctx->priv; + int ret; + + afade->class = &afade_class; + av_opt_set_defaults(afade); + + if ((ret = av_set_options_string(afade, args, "=", ":")) < 0) + return ret; + + if (INT64_MAX - afade->nb_samples < afade->start_sample) + return AVERROR(EINVAL); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static double fade_gain(int curve, int64_t index, int range) +{ + double gain; + + gain = FFMAX(0.0, FFMIN(1.0, 1.0 * index / range)); + + switch (curve) { + case QSIN: + gain = sin(gain * M_PI / 2.0); + break; + case ESIN: + gain = 1.0 - cos(M_PI / 4.0 * (pow(2.0*gain - 1, 3) + 1)); + break; + case HSIN: + gain = (1.0 - cos(gain * M_PI)) / 2.0; + break; + case LOG: + gain = pow(0.1, (1 - gain) * 5.0); + break; + case PAR: + gain = (1 - (1 - gain) * (1 - gain)); + break; + case QUA: + gain *= gain; + break; + case CUB: + gain = gain * gain * gain; + break; + case SQU: + gain = sqrt(gain); + break; + case CBR: + gain = cbrt(gain); + break; + } + + return gain; +} + +#define FADE_PLANAR(name, type) \ +static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \ + int nb_samples, int channels, int dir, \ + int64_t start, int range, int curve) \ +{ \ + int i, c; \ + \ + for (i = 0; i < nb_samples; i++) { \ + double gain = fade_gain(curve, start + i * dir, range); \ + for (c = 0; c < channels; c++) { \ + type *d = (type *)dst[c]; \ + const type *s = (type *)src[c]; \ + \ + d[i] = s[i] * gain; \ + } \ + } \ +} + +#define FADE(name, type) \ +static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \ + int nb_samples, int channels, int dir, \ + int64_t start, int range, int curve) \ +{ \ + type *d = (type *)dst[0]; \ + const type *s = (type *)src[0]; \ + int i, c, k = 0; \ + \ + for (i = 0; i < nb_samples; i++) { \ + double gain = fade_gain(curve, start + i * dir, range); \ + for (c = 0; c < channels; c++, k++) \ + d[k] = s[k] * gain; \ + } \ +} + +FADE_PLANAR(dbl, double) +FADE_PLANAR(flt, float) +FADE_PLANAR(s16, int16_t) +FADE_PLANAR(s32, int32_t) + +FADE(dbl, double) +FADE(flt, float) +FADE(s16, int16_t) +FADE(s32, int32_t) + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AudioFadeContext *afade = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + switch (inlink->format) { + case AV_SAMPLE_FMT_DBL: afade->fade_samples = fade_samples_dbl; break; + case AV_SAMPLE_FMT_DBLP: afade->fade_samples = fade_samples_dblp; break; + case AV_SAMPLE_FMT_FLT: afade->fade_samples = fade_samples_flt; break; + case AV_SAMPLE_FMT_FLTP: afade->fade_samples = fade_samples_fltp; break; + case AV_SAMPLE_FMT_S16: afade->fade_samples = fade_samples_s16; break; + case AV_SAMPLE_FMT_S16P: afade->fade_samples = fade_samples_s16p; break; + case AV_SAMPLE_FMT_S32: afade->fade_samples = fade_samples_s32; break; + case AV_SAMPLE_FMT_S32P: afade->fade_samples = fade_samples_s32p; break; + } + + if (afade->duration) + afade->nb_samples = afade->duration * inlink->sample_rate; + if (afade->start_time) + afade->start_sample = afade->start_time * inlink->sample_rate; + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AudioFadeContext *afade = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + int nb_samples = buf->nb_samples; + AVFrame *out_buf; + int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base); + + if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) || + ( afade->type && (cur_sample + afade->nb_samples < afade->start_sample))) + return ff_filter_frame(outlink, buf); + + if (av_frame_is_writable(buf)) { + out_buf = buf; + } else { + out_buf = ff_get_audio_buffer(inlink, nb_samples); + if (!out_buf) + return AVERROR(ENOMEM); + out_buf->pts = buf->pts; + } + + if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) || + ( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) { + av_samples_set_silence(out_buf->extended_data, 0, nb_samples, + av_frame_get_channels(out_buf), out_buf->format); + } else { + int64_t start; + + if (!afade->type) + start = cur_sample - afade->start_sample; + else + start = afade->start_sample + afade->nb_samples - cur_sample; + + afade->fade_samples(out_buf->extended_data, buf->extended_data, + nb_samples, av_frame_get_channels(buf), + afade->type ? -1 : 1, start, + afade->nb_samples, afade->curve); + } + + if (buf != out_buf) + av_frame_free(&buf); + + return ff_filter_frame(outlink, out_buf); +} + +static const AVFilterPad avfilter_af_afade_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_afade_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter avfilter_af_afade = { + .name = "afade", + .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."), + .query_formats = query_formats, + .priv_size = sizeof(AudioFadeContext), + .init = init, + .inputs = avfilter_af_afade_inputs, + .outputs = avfilter_af_afade_outputs, + .priv_class = &afade_class, +}; diff --git a/ffmpeg1/libavfilter/af_aformat.c b/ffmpeg1/libavfilter/af_aformat.c new file mode 100644 index 0000000..9ac381f --- /dev/null +++ b/ffmpeg1/libavfilter/af_aformat.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2011 Mina Nagy Zaki + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * format audio filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/opt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +typedef struct AFormatContext { + const AVClass *class; + + AVFilterFormats *formats; + AVFilterFormats *sample_rates; + AVFilterChannelLayouts *channel_layouts; + + char *formats_str; + char *sample_rates_str; + char *channel_layouts_str; +} AFormatContext; + +#define OFFSET(x) offsetof(AFormatContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption aformat_options[] = { + { "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(aformat); + +#define PARSE_FORMATS(str, type, list, add_to_list, get_fmt, none, desc) \ +do { \ + char *next, *cur = str; \ + while (cur) { \ + type fmt; \ + next = strchr(cur, ','); \ + if (next) \ + *next++ = 0; \ + \ + if ((fmt = get_fmt(cur)) == none) { \ + av_log(ctx, AV_LOG_ERROR, "Error parsing " desc ": %s.\n", cur);\ + ret = AVERROR(EINVAL); \ + goto fail; \ + } \ + add_to_list(&list, fmt); \ + \ + cur = next; \ + } \ +} while (0) + +static int get_sample_rate(const char *samplerate) +{ + int ret = strtol(samplerate, NULL, 0); + return FFMAX(ret, 0); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AFormatContext *s = ctx->priv; + int ret; + + if (!args) { + av_log(ctx, AV_LOG_ERROR, "No parameters supplied.\n"); + return AVERROR(EINVAL); + } + + s->class = &aformat_class; + av_opt_set_defaults(s); + + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) + return ret; + + PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats, + ff_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format"); + PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, ff_add_format, + get_sample_rate, 0, "sample rate"); + PARSE_FORMATS(s->channel_layouts_str, uint64_t, s->channel_layouts, + ff_add_channel_layout, av_get_channel_layout, 0, + "channel layout"); + +fail: + av_opt_free(s); + return ret; +} + +static int query_formats(AVFilterContext *ctx) +{ + AFormatContext *s = ctx->priv; + + ff_set_common_formats(ctx, s->formats ? s->formats : + ff_all_formats(AVMEDIA_TYPE_AUDIO)); + ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates : + ff_all_samplerates()); + ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts : + ff_all_channel_counts()); + + return 0; +} + +static const AVFilterPad avfilter_af_aformat_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_aformat_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO + }, + { NULL } +}; + +AVFilter avfilter_af_aformat = { + .name = "aformat", + .description = NULL_IF_CONFIG_SMALL("Convert the input audio to one of the specified formats."), + .init = init, + .query_formats = query_formats, + .priv_size = sizeof(AFormatContext), + + .inputs = avfilter_af_aformat_inputs, + .outputs = avfilter_af_aformat_outputs, + .priv_class = &aformat_class, +}; diff --git a/ffmpeg1/libavfilter/af_amerge.c b/ffmpeg1/libavfilter/af_amerge.c new file mode 100644 index 0000000..28c3682 --- /dev/null +++ b/ffmpeg1/libavfilter/af_amerge.c @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2011 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio merging filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/bprint.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libswresample/swresample.h" // only for SWR_CH_MAX +#include "avfilter.h" +#include "audio.h" +#include "bufferqueue.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int nb_inputs; + int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */ + int bps; + struct amerge_input { + struct FFBufQueue queue; + int nb_ch; /**< number of channels for the input */ + int nb_samples; + int pos; + } *in; +} AMergeContext; + +#define OFFSET(x) offsetof(AMergeContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption amerge_options[] = { + { "inputs", "specify the number of inputs", OFFSET(nb_inputs), + AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS }, + {0} +}; + +AVFILTER_DEFINE_CLASS(amerge); + +static av_cold void uninit(AVFilterContext *ctx) +{ + AMergeContext *am = ctx->priv; + int i; + + for (i = 0; i < am->nb_inputs; i++) { + if (am->in) + ff_bufqueue_discard_all(&am->in[i].queue); + if (ctx->input_pads) + av_freep(&ctx->input_pads[i].name); + } + av_freep(&am->in); +} + +static int query_formats(AVFilterContext *ctx) +{ + AMergeContext *am = ctx->priv; + int64_t inlayout[SWR_CH_MAX], outlayout = 0; + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + int i, overlap = 0, nb_ch = 0; + + for (i = 0; i < am->nb_inputs; i++) { + if (!ctx->inputs[i]->in_channel_layouts || + !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) { + av_log(ctx, AV_LOG_ERROR, + "No channel layout for input %d\n", i + 1); + return AVERROR(EINVAL); + } + inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0]; + if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) { + char buf[256]; + av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]); + av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1); + } + am->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]); + if (outlayout & inlayout[i]) + overlap++; + outlayout |= inlayout[i]; + nb_ch += am->in[i].nb_ch; + } + if (nb_ch > SWR_CH_MAX) { + av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX); + return AVERROR(EINVAL); + } + if (overlap) { + av_log(ctx, AV_LOG_WARNING, + "Input channel layouts overlap: " + "output layout will be determined by the number of distinct input channels\n"); + for (i = 0; i < nb_ch; i++) + am->route[i] = i; + outlayout = av_get_default_channel_layout(nb_ch); + if (!outlayout) + outlayout = ((int64_t)1 << nb_ch) - 1; + } else { + int *route[SWR_CH_MAX]; + int c, out_ch_number = 0; + + route[0] = am->route; + for (i = 1; i < am->nb_inputs; i++) + route[i] = route[i - 1] + am->in[i - 1].nb_ch; + for (c = 0; c < 64; c++) + for (i = 0; i < am->nb_inputs; i++) + if ((inlayout[i] >> c) & 1) + *(route[i]++) = out_ch_number++; + } + formats = ff_make_format_list(ff_packed_sample_fmts_array); + ff_set_common_formats(ctx, formats); + for (i = 0; i < am->nb_inputs; i++) { + layouts = NULL; + ff_add_channel_layout(&layouts, inlayout[i]); + ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts); + } + layouts = NULL; + ff_add_channel_layout(&layouts, outlayout); + ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AMergeContext *am = ctx->priv; + AVBPrint bp; + int i; + + for (i = 1; i < am->nb_inputs; i++) { + if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) { + av_log(ctx, AV_LOG_ERROR, + "Inputs must have the same sample rate " + "%d for in%d vs %d\n", + ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate); + return AVERROR(EINVAL); + } + } + am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format); + outlink->sample_rate = ctx->inputs[0]->sample_rate; + outlink->time_base = ctx->inputs[0]->time_base; + + av_bprint_init(&bp, 0, 1); + for (i = 0; i < am->nb_inputs; i++) { + av_bprintf(&bp, "%sin%d:", i ? " + " : "", i); + av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout); + } + av_bprintf(&bp, " -> out:"); + av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout); + av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AMergeContext *am = ctx->priv; + int i, ret; + + for (i = 0; i < am->nb_inputs; i++) + if (!am->in[i].nb_samples) + if ((ret = ff_request_frame(ctx->inputs[i])) < 0) + return ret; + return 0; +} + +/** + * Copy samples from several input streams to one output stream. + * @param nb_inputs number of inputs + * @param in inputs; used only for the nb_ch field; + * @param route routing values; + * input channel i goes to output channel route[i]; + * i < in[0].nb_ch are the channels from the first output; + * i >= in[0].nb_ch are the channels from the second output + * @param ins pointer to the samples of each inputs, in packed format; + * will be left at the end of the copied samples + * @param outs pointer to the samples of the output, in packet format; + * must point to a buffer big enough; + * will be left at the end of the copied samples + * @param ns number of samples to copy + * @param bps bytes per sample + */ +static inline void copy_samples(int nb_inputs, struct amerge_input in[], + int *route, uint8_t *ins[], + uint8_t **outs, int ns, int bps) +{ + int *route_cur; + int i, c, nb_ch = 0; + + for (i = 0; i < nb_inputs; i++) + nb_ch += in[i].nb_ch; + while (ns--) { + route_cur = route; + for (i = 0; i < nb_inputs; i++) { + for (c = 0; c < in[i].nb_ch; c++) { + memcpy((*outs) + bps * *(route_cur++), ins[i], bps); + ins[i] += bps; + } + } + *outs += nb_ch * bps; + } +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AMergeContext *am = ctx->priv; + AVFilterLink *const outlink = ctx->outputs[0]; + int input_number; + int nb_samples, ns, i; + AVFrame *outbuf, *inbuf[SWR_CH_MAX]; + uint8_t *ins[SWR_CH_MAX], *outs; + + for (input_number = 0; input_number < am->nb_inputs; input_number++) + if (inlink == ctx->inputs[input_number]) + break; + av_assert1(input_number < am->nb_inputs); + if (ff_bufqueue_is_full(&am->in[input_number].queue)) { + av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n"); + av_frame_free(&insamples); + return AVERROR(ENOMEM); + } + ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples)); + am->in[input_number].nb_samples += insamples->nb_samples; + av_frame_free(&insamples); + nb_samples = am->in[0].nb_samples; + for (i = 1; i < am->nb_inputs; i++) + nb_samples = FFMIN(nb_samples, am->in[i].nb_samples); + if (!nb_samples) + return 0; + + outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples); + outs = outbuf->data[0]; + for (i = 0; i < am->nb_inputs; i++) { + inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); + ins[i] = inbuf[i]->data[0] + + am->in[i].pos * am->in[i].nb_ch * am->bps; + } + av_frame_copy_props(outbuf, inbuf[0]); + outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : + inbuf[0]->pts + + av_rescale_q(am->in[0].pos, + (AVRational){ 1, ctx->inputs[0]->sample_rate }, + ctx->outputs[0]->time_base); + + outbuf->nb_samples = nb_samples; + outbuf->channel_layout = outlink->channel_layout; + av_frame_set_channels(outbuf, outlink->channels); + + while (nb_samples) { + ns = nb_samples; + for (i = 0; i < am->nb_inputs; i++) + ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos); + /* Unroll the most common sample formats: speed +~350% for the loop, + +~13% overall (including two common decoders) */ + switch (am->bps) { + case 1: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 1); + break; + case 2: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 2); + break; + case 4: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 4); + break; + default: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, am->bps); + break; + } + + nb_samples -= ns; + for (i = 0; i < am->nb_inputs; i++) { + am->in[i].nb_samples -= ns; + am->in[i].pos += ns; + if (am->in[i].pos == inbuf[i]->nb_samples) { + am->in[i].pos = 0; + av_frame_free(&inbuf[i]); + ff_bufqueue_get(&am->in[i].queue); + inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); + ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL; + } + } + } + return ff_filter_frame(ctx->outputs[0], outbuf); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AMergeContext *am = ctx->priv; + int ret, i; + + am->class = &amerge_class; + av_opt_set_defaults(am); + ret = av_set_options_string(am, args, "=", ":"); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args); + return ret; + } + am->in = av_calloc(am->nb_inputs, sizeof(*am->in)); + if (!am->in) + return AVERROR(ENOMEM); + for (i = 0; i < am->nb_inputs; i++) { + char *name = av_asprintf("in%d", i); + AVFilterPad pad = { + .name = name, + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }; + if (!name) + return AVERROR(ENOMEM); + ff_insert_inpad(ctx, i, &pad); + } + return 0; +} + +static const AVFilterPad amerge_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_amerge = { + .name = "amerge", + .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into " + "a single multi-channel stream."), + .priv_size = sizeof(AMergeContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = NULL, + .outputs = amerge_outputs, + .priv_class = &amerge_class, +}; diff --git a/ffmpeg1/libavfilter/af_amix.c b/ffmpeg1/libavfilter/af_amix.c new file mode 100644 index 0000000..dcb24b0 --- /dev/null +++ b/ffmpeg1/libavfilter/af_amix.c @@ -0,0 +1,567 @@ +/* + * Audio Mix Filter + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio Mix Filter + * + * Mixes audio from multiple sources into a single output. The channel layout, + * sample rate, and sample format will be the same for all inputs and the + * output. + */ + +#include "libavutil/audio_fifo.h" +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/float_dsp.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +#define INPUT_OFF 0 /**< input has reached EOF */ +#define INPUT_ON 1 /**< input is active */ +#define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */ + +#define DURATION_LONGEST 0 +#define DURATION_SHORTEST 1 +#define DURATION_FIRST 2 + + +typedef struct FrameInfo { + int nb_samples; + int64_t pts; + struct FrameInfo *next; +} FrameInfo; + +/** + * Linked list used to store timestamps and frame sizes of all frames in the + * FIFO for the first input. + * + * This is needed to keep timestamps synchronized for the case where multiple + * input frames are pushed to the filter for processing before a frame is + * requested by the output link. + */ +typedef struct FrameList { + int nb_frames; + int nb_samples; + FrameInfo *list; + FrameInfo *end; +} FrameList; + +static void frame_list_clear(FrameList *frame_list) +{ + if (frame_list) { + while (frame_list->list) { + FrameInfo *info = frame_list->list; + frame_list->list = info->next; + av_free(info); + } + frame_list->nb_frames = 0; + frame_list->nb_samples = 0; + frame_list->end = NULL; + } +} + +static int frame_list_next_frame_size(FrameList *frame_list) +{ + if (!frame_list->list) + return 0; + return frame_list->list->nb_samples; +} + +static int64_t frame_list_next_pts(FrameList *frame_list) +{ + if (!frame_list->list) + return AV_NOPTS_VALUE; + return frame_list->list->pts; +} + +static void frame_list_remove_samples(FrameList *frame_list, int nb_samples) +{ + if (nb_samples >= frame_list->nb_samples) { + frame_list_clear(frame_list); + } else { + int samples = nb_samples; + while (samples > 0) { + FrameInfo *info = frame_list->list; + av_assert0(info != NULL); + if (info->nb_samples <= samples) { + samples -= info->nb_samples; + frame_list->list = info->next; + if (!frame_list->list) + frame_list->end = NULL; + frame_list->nb_frames--; + frame_list->nb_samples -= info->nb_samples; + av_free(info); + } else { + info->nb_samples -= samples; + info->pts += samples; + frame_list->nb_samples -= samples; + samples = 0; + } + } + } +} + +static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts) +{ + FrameInfo *info = av_malloc(sizeof(*info)); + if (!info) + return AVERROR(ENOMEM); + info->nb_samples = nb_samples; + info->pts = pts; + info->next = NULL; + + if (!frame_list->list) { + frame_list->list = info; + frame_list->end = info; + } else { + av_assert0(frame_list->end != NULL); + frame_list->end->next = info; + frame_list->end = info; + } + frame_list->nb_frames++; + frame_list->nb_samples += nb_samples; + + return 0; +} + + +typedef struct MixContext { + const AVClass *class; /**< class for AVOptions */ + AVFloatDSPContext fdsp; + + int nb_inputs; /**< number of inputs */ + int active_inputs; /**< number of input currently active */ + int duration_mode; /**< mode for determining duration */ + float dropout_transition; /**< transition time when an input drops out */ + + int nb_channels; /**< number of channels */ + int sample_rate; /**< sample rate */ + int planar; + AVAudioFifo **fifos; /**< audio fifo for each input */ + uint8_t *input_state; /**< current state of each input */ + float *input_scale; /**< mixing scale factor for each input */ + float scale_norm; /**< normalization factor for all inputs */ + int64_t next_pts; /**< calculated pts for next output frame */ + FrameList *frame_list; /**< list of frame info for the first input */ +} MixContext; + +#define OFFSET(x) offsetof(MixContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption amix_options[] = { + { "inputs", "Number of inputs.", + OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F }, + { "duration", "How to determine the end-of-stream.", + OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" }, + { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" }, + { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" }, + { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" }, + { "dropout_transition", "Transition time, in seconds, for volume " + "renormalization when an input stream ends.", + OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(amix); + +/** + * Update the scaling factors to apply to each input during mixing. + * + * This balances the full volume range between active inputs and handles + * volume transitions when EOF is encountered on an input but mixing continues + * with the remaining inputs. + */ +static void calculate_scales(MixContext *s, int nb_samples) +{ + int i; + + if (s->scale_norm > s->active_inputs) { + s->scale_norm -= nb_samples / (s->dropout_transition * s->sample_rate); + s->scale_norm = FFMAX(s->scale_norm, s->active_inputs); + } + + for (i = 0; i < s->nb_inputs; i++) { + if (s->input_state[i] == INPUT_ON) + s->input_scale[i] = 1.0f / s->scale_norm; + else + s->input_scale[i] = 0.0f; + } +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + MixContext *s = ctx->priv; + int i; + char buf[64]; + + s->planar = av_sample_fmt_is_planar(outlink->format); + s->sample_rate = outlink->sample_rate; + outlink->time_base = (AVRational){ 1, outlink->sample_rate }; + s->next_pts = AV_NOPTS_VALUE; + + s->frame_list = av_mallocz(sizeof(*s->frame_list)); + if (!s->frame_list) + return AVERROR(ENOMEM); + + s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos)); + if (!s->fifos) + return AVERROR(ENOMEM); + + s->nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + for (i = 0; i < s->nb_inputs; i++) { + s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024); + if (!s->fifos[i]) + return AVERROR(ENOMEM); + } + + s->input_state = av_malloc(s->nb_inputs); + if (!s->input_state) + return AVERROR(ENOMEM); + memset(s->input_state, INPUT_ON, s->nb_inputs); + s->active_inputs = s->nb_inputs; + + s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale)); + if (!s->input_scale) + return AVERROR(ENOMEM); + s->scale_norm = s->active_inputs; + calculate_scales(s, 0); + + av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout); + + av_log(ctx, AV_LOG_VERBOSE, + "inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs, + av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf); + + return 0; +} + +/** + * Read samples from the input FIFOs, mix, and write to the output link. + */ +static int output_frame(AVFilterLink *outlink, int nb_samples) +{ + AVFilterContext *ctx = outlink->src; + MixContext *s = ctx->priv; + AVFrame *out_buf, *in_buf; + int i; + + calculate_scales(s, nb_samples); + + out_buf = ff_get_audio_buffer(outlink, nb_samples); + if (!out_buf) + return AVERROR(ENOMEM); + + in_buf = ff_get_audio_buffer(outlink, nb_samples); + if (!in_buf) { + av_frame_free(&out_buf); + return AVERROR(ENOMEM); + } + + for (i = 0; i < s->nb_inputs; i++) { + if (s->input_state[i] == INPUT_ON) { + int planes, plane_size, p; + + av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data, + nb_samples); + + planes = s->planar ? s->nb_channels : 1; + plane_size = nb_samples * (s->planar ? 1 : s->nb_channels); + plane_size = FFALIGN(plane_size, 16); + + for (p = 0; p < planes; p++) { + s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p], + (float *) in_buf->extended_data[p], + s->input_scale[i], plane_size); + } + } + } + av_frame_free(&in_buf); + + out_buf->pts = s->next_pts; + if (s->next_pts != AV_NOPTS_VALUE) + s->next_pts += nb_samples; + + return ff_filter_frame(outlink, out_buf); +} + +/** + * Returns the smallest number of samples available in the input FIFOs other + * than that of the first input. + */ +static int get_available_samples(MixContext *s) +{ + int i; + int available_samples = INT_MAX; + + av_assert0(s->nb_inputs > 1); + + for (i = 1; i < s->nb_inputs; i++) { + int nb_samples; + if (s->input_state[i] == INPUT_OFF) + continue; + nb_samples = av_audio_fifo_size(s->fifos[i]); + available_samples = FFMIN(available_samples, nb_samples); + } + if (available_samples == INT_MAX) + return 0; + return available_samples; +} + +/** + * Requests a frame, if needed, from each input link other than the first. + */ +static int request_samples(AVFilterContext *ctx, int min_samples) +{ + MixContext *s = ctx->priv; + int i, ret; + + av_assert0(s->nb_inputs > 1); + + for (i = 1; i < s->nb_inputs; i++) { + ret = 0; + if (s->input_state[i] == INPUT_OFF) + continue; + while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples) + ret = ff_request_frame(ctx->inputs[i]); + if (ret == AVERROR_EOF) { + if (av_audio_fifo_size(s->fifos[i]) == 0) { + s->input_state[i] = INPUT_OFF; + continue; + } + } else if (ret < 0) + return ret; + } + return 0; +} + +/** + * Calculates the number of active inputs and determines EOF based on the + * duration option. + * + * @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop. + */ +static int calc_active_inputs(MixContext *s) +{ + int i; + int active_inputs = 0; + for (i = 0; i < s->nb_inputs; i++) + active_inputs += !!(s->input_state[i] != INPUT_OFF); + s->active_inputs = active_inputs; + + if (!active_inputs || + (s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) || + (s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs)) + return AVERROR_EOF; + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + MixContext *s = ctx->priv; + int ret; + int wanted_samples, available_samples; + + ret = calc_active_inputs(s); + if (ret < 0) + return ret; + + if (s->input_state[0] == INPUT_OFF) { + ret = request_samples(ctx, 1); + if (ret < 0) + return ret; + + ret = calc_active_inputs(s); + if (ret < 0) + return ret; + + available_samples = get_available_samples(s); + if (!available_samples) + return AVERROR(EAGAIN); + + return output_frame(outlink, available_samples); + } + + if (s->frame_list->nb_frames == 0) { + ret = ff_request_frame(ctx->inputs[0]); + if (ret == AVERROR_EOF) { + s->input_state[0] = INPUT_OFF; + if (s->nb_inputs == 1) + return AVERROR_EOF; + else + return AVERROR(EAGAIN); + } else if (ret < 0) + return ret; + } + av_assert0(s->frame_list->nb_frames > 0); + + wanted_samples = frame_list_next_frame_size(s->frame_list); + + if (s->active_inputs > 1) { + ret = request_samples(ctx, wanted_samples); + if (ret < 0) + return ret; + + ret = calc_active_inputs(s); + if (ret < 0) + return ret; + } + + if (s->active_inputs > 1) { + available_samples = get_available_samples(s); + if (!available_samples) + return AVERROR(EAGAIN); + available_samples = FFMIN(available_samples, wanted_samples); + } else { + available_samples = wanted_samples; + } + + s->next_pts = frame_list_next_pts(s->frame_list); + frame_list_remove_samples(s->frame_list, available_samples); + + return output_frame(outlink, available_samples); +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AVFilterContext *ctx = inlink->dst; + MixContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + int i, ret = 0; + + for (i = 0; i < ctx->nb_inputs; i++) + if (ctx->inputs[i] == inlink) + break; + if (i >= ctx->nb_inputs) { + av_log(ctx, AV_LOG_ERROR, "unknown input link\n"); + ret = AVERROR(EINVAL); + goto fail; + } + + if (i == 0) { + int64_t pts = av_rescale_q(buf->pts, inlink->time_base, + outlink->time_base); + ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts); + if (ret < 0) + goto fail; + } + + ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, + buf->nb_samples); + +fail: + av_frame_free(&buf); + + return ret; +} + +static int init(AVFilterContext *ctx, const char *args) +{ + MixContext *s = ctx->priv; + int i, ret; + + s->class = &amix_class; + av_opt_set_defaults(s); + + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) + return ret; + av_opt_free(s); + + for (i = 0; i < s->nb_inputs; i++) { + char name[32]; + AVFilterPad pad = { 0 }; + + snprintf(name, sizeof(name), "input%d", i); + pad.type = AVMEDIA_TYPE_AUDIO; + pad.name = av_strdup(name); + pad.filter_frame = filter_frame; + + ff_insert_inpad(ctx, i, &pad); + } + + avpriv_float_dsp_init(&s->fdsp, 0); + + return 0; +} + +static void uninit(AVFilterContext *ctx) +{ + int i; + MixContext *s = ctx->priv; + + if (s->fifos) { + for (i = 0; i < s->nb_inputs; i++) + av_audio_fifo_free(s->fifos[i]); + av_freep(&s->fifos); + } + frame_list_clear(s->frame_list); + av_freep(&s->frame_list); + av_freep(&s->input_state); + av_freep(&s->input_scale); + + for (i = 0; i < ctx->nb_inputs; i++) + av_freep(&ctx->input_pads[i].name); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + ff_add_format(&formats, AV_SAMPLE_FMT_FLT); + ff_add_format(&formats, AV_SAMPLE_FMT_FLTP); + ff_set_common_formats(ctx, formats); + ff_set_common_channel_layouts(ctx, ff_all_channel_layouts()); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + return 0; +} + +static const AVFilterPad avfilter_af_amix_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame + }, + { NULL } +}; + +AVFilter avfilter_af_amix = { + .name = "amix", + .description = NULL_IF_CONFIG_SMALL("Audio mixing."), + .priv_size = sizeof(MixContext), + + .init = init, + .uninit = uninit, + .query_formats = query_formats, + + .inputs = NULL, + .outputs = avfilter_af_amix_outputs, + .priv_class = &amix_class, +}; diff --git a/ffmpeg1/libavfilter/af_anull.c b/ffmpeg1/libavfilter/af_anull.c new file mode 100644 index 0000000..c61da3b --- /dev/null +++ b/ffmpeg1/libavfilter/af_anull.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * null audio filter + */ + +#include "audio.h" +#include "avfilter.h" +#include "internal.h" +#include "libavutil/internal.h" + +static const AVFilterPad avfilter_af_anull_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_anull_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_anull = { + .name = "anull", + .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."), + + .priv_size = 0, + + .query_formats = ff_query_formats_all, + + .inputs = avfilter_af_anull_inputs, + + .outputs = avfilter_af_anull_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_apad.c b/ffmpeg1/libavfilter/af_apad.c new file mode 100644 index 0000000..b4a0fc8 --- /dev/null +++ b/ffmpeg1/libavfilter/af_apad.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/** + * @file + * audio pad filter. + * + * Based on af_aresample.c + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "libavutil/avassert.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int64_t next_pts; + + int packet_size; + int64_t pad_len; + int64_t whole_len; +} APadContext; + +#define OFFSET(x) offsetof(APadContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption apad_options[] = { + { "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A }, + { "pad_len", "number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A }, + { "whole_len", "target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(apad); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + APadContext *apad = ctx->priv; + + apad->class = &apad_class; + apad->next_pts = AV_NOPTS_VALUE; + + av_opt_set_defaults(apad); + + if ((ret = av_opt_set_from_string(apad, args, NULL, "=", ":")) < 0) + return ret; + + if (apad->whole_len && apad->pad_len) { + av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *frame) +{ + AVFilterContext *ctx = inlink->dst; + APadContext *apad = ctx->priv; + + if (apad->whole_len) + apad->whole_len -= frame->nb_samples; + + apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); + return ff_filter_frame(ctx->outputs[0], frame); +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + APadContext *apad = ctx->priv; + int ret; + + ret = ff_request_frame(ctx->inputs[0]); + + if (ret == AVERROR_EOF) { + int n_out = apad->packet_size; + AVFrame *outsamplesref; + + if (apad->whole_len > 0) { + apad->pad_len = apad->whole_len; + apad->whole_len = 0; + } + if (apad->pad_len > 0) { + n_out = FFMIN(n_out, apad->pad_len); + apad->pad_len -= n_out; + } + + if(!n_out) + return AVERROR_EOF; + + outsamplesref = ff_get_audio_buffer(outlink, n_out); + if (!outsamplesref) + return AVERROR(ENOMEM); + + av_assert0(outsamplesref->sample_rate == outlink->sample_rate); + av_assert0(outsamplesref->nb_samples == n_out); + + av_samples_set_silence(outsamplesref->extended_data, 0, + n_out, + av_frame_get_channels(outsamplesref), + outsamplesref->format); + + outsamplesref->pts = apad->next_pts; + if (apad->next_pts != AV_NOPTS_VALUE) + apad->next_pts += av_rescale_q(n_out, (AVRational){1, outlink->sample_rate}, outlink->time_base); + + return ff_filter_frame(outlink, outsamplesref); + } + return ret; +} + +static const AVFilterPad apad_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL }, +}; + +static const AVFilterPad apad_outputs[] = { + { + .name = "default", + .request_frame = request_frame, + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL }, +}; + +AVFilter avfilter_af_apad = { + .name = "apad", + .description = NULL_IF_CONFIG_SMALL("Pad audio with silence."), + .init = init, + .priv_size = sizeof(APadContext), + .inputs = apad_inputs, + .outputs = apad_outputs, + .priv_class = &apad_class, +}; diff --git a/ffmpeg1/libavfilter/af_aresample.c b/ffmpeg1/libavfilter/af_aresample.c new file mode 100644 index 0000000..80351c3 --- /dev/null +++ b/ffmpeg1/libavfilter/af_aresample.c @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2011 Mina Nagy Zaki + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * resampling audio filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "libavutil/avassert.h" +#include "libswresample/swresample.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +typedef struct { + double ratio; + struct SwrContext *swr; + int64_t next_pts; + int req_fullfilled; +} AResampleContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AResampleContext *aresample = ctx->priv; + int ret = 0; + char *argd = av_strdup(args); + + aresample->next_pts = AV_NOPTS_VALUE; + aresample->swr = swr_alloc(); + if (!aresample->swr) { + ret = AVERROR(ENOMEM); + goto end; + } + + if (args) { + char *ptr = argd, *token; + + while (token = av_strtok(ptr, ":", &ptr)) { + char *value; + av_strtok(token, "=", &value); + + if (value) { + if ((ret = av_opt_set(aresample->swr, token, value, 0)) < 0) + goto end; + } else { + int out_rate; + if ((ret = ff_parse_sample_rate(&out_rate, token, ctx)) < 0) + goto end; + if ((ret = av_opt_set_int(aresample->swr, "osr", out_rate, 0)) < 0) + goto end; + } + } + } +end: + av_free(argd); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + AResampleContext *aresample = ctx->priv; + swr_free(&aresample->swr); +} + +static int query_formats(AVFilterContext *ctx) +{ + AResampleContext *aresample = ctx->priv; + int out_rate = av_get_int(aresample->swr, "osr", NULL); + uint64_t out_layout = av_get_int(aresample->swr, "ocl", NULL); + enum AVSampleFormat out_format = av_get_int(aresample->swr, "osf", NULL); + + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + + AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); + AVFilterFormats *out_formats; + AVFilterFormats *in_samplerates = ff_all_samplerates(); + AVFilterFormats *out_samplerates; + AVFilterChannelLayouts *in_layouts = ff_all_channel_counts(); + AVFilterChannelLayouts *out_layouts; + + ff_formats_ref (in_formats, &inlink->out_formats); + ff_formats_ref (in_samplerates, &inlink->out_samplerates); + ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts); + + if(out_rate > 0) { + out_samplerates = ff_make_format_list((int[]){ out_rate, -1 }); + } else { + out_samplerates = ff_all_samplerates(); + } + ff_formats_ref(out_samplerates, &outlink->in_samplerates); + + if(out_format != AV_SAMPLE_FMT_NONE) { + out_formats = ff_make_format_list((int[]){ out_format, -1 }); + } else + out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); + ff_formats_ref(out_formats, &outlink->in_formats); + + if(out_layout) { + out_layouts = avfilter_make_format64_list((int64_t[]){ out_layout, -1 }); + } else + out_layouts = ff_all_channel_counts(); + ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts); + + return 0; +} + + +static int config_output(AVFilterLink *outlink) +{ + int ret; + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + AResampleContext *aresample = ctx->priv; + int out_rate; + uint64_t out_layout; + enum AVSampleFormat out_format; + char inchl_buf[128], outchl_buf[128]; + + aresample->swr = swr_alloc_set_opts(aresample->swr, + outlink->channel_layout, outlink->format, outlink->sample_rate, + inlink->channel_layout, inlink->format, inlink->sample_rate, + 0, ctx); + if (!aresample->swr) + return AVERROR(ENOMEM); + if (!inlink->channel_layout) + av_opt_set_int(aresample->swr, "ich", inlink->channels, 0); + if (!outlink->channel_layout) + av_opt_set_int(aresample->swr, "och", outlink->channels, 0); + + ret = swr_init(aresample->swr); + if (ret < 0) + return ret; + + out_rate = av_get_int(aresample->swr, "osr", NULL); + out_layout = av_get_int(aresample->swr, "ocl", NULL); + out_format = av_get_int(aresample->swr, "osf", NULL); + outlink->time_base = (AVRational) {1, out_rate}; + + av_assert0(outlink->sample_rate == out_rate); + av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout); + av_assert0(outlink->format == out_format); + + aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate; + + av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout); + av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout); + + av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n", + inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate, + outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) +{ + AResampleContext *aresample = inlink->dst->priv; + const int n_in = insamplesref->nb_samples; + int n_out = n_in * aresample->ratio * 2 + 256; + AVFilterLink *const outlink = inlink->dst->outputs[0]; + AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out); + int ret; + + if(!outsamplesref) + return AVERROR(ENOMEM); + + av_frame_copy_props(outsamplesref, insamplesref); + outsamplesref->format = outlink->format; + av_frame_set_channels(outsamplesref, outlink->channels); + outsamplesref->channel_layout = outlink->channel_layout; + outsamplesref->sample_rate = outlink->sample_rate; + + if(insamplesref->pts != AV_NOPTS_VALUE) { + int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); + int64_t outpts= swr_next_pts(aresample->swr, inpts); + aresample->next_pts = + outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate); + } else { + outsamplesref->pts = AV_NOPTS_VALUE; + } + n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, + (void *)insamplesref->extended_data, n_in); + if (n_out <= 0) { + av_frame_free(&outsamplesref); + av_frame_free(&insamplesref); + return 0; + } + + outsamplesref->nb_samples = n_out; + + ret = ff_filter_frame(outlink, outsamplesref); + aresample->req_fullfilled= 1; + av_frame_free(&insamplesref); + return ret; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AResampleContext *aresample = ctx->priv; + AVFilterLink *const inlink = outlink->src->inputs[0]; + int ret; + + aresample->req_fullfilled = 0; + do{ + ret = ff_request_frame(ctx->inputs[0]); + }while(!aresample->req_fullfilled && ret>=0); + + if (ret == AVERROR_EOF) { + AVFrame *outsamplesref; + int n_out = 4096; + + outsamplesref = ff_get_audio_buffer(outlink, n_out); + if (!outsamplesref) + return AVERROR(ENOMEM); + n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0); + if (n_out <= 0) { + av_frame_free(&outsamplesref); + return (n_out == 0) ? AVERROR_EOF : n_out; + } + + outsamplesref->sample_rate = outlink->sample_rate; + outsamplesref->nb_samples = n_out; +#if 0 + outsamplesref->pts = aresample->next_pts; + if(aresample->next_pts != AV_NOPTS_VALUE) + aresample->next_pts += av_rescale_q(n_out, (AVRational){1 ,outlink->sample_rate}, outlink->time_base); +#else + outsamplesref->pts = swr_next_pts(aresample->swr, INT64_MIN); + outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate); +#endif + + return ff_filter_frame(outlink, outsamplesref); + } + return ret; +} + +static const AVFilterPad aresample_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL }, +}; + +static const AVFilterPad aresample_outputs[] = { + { + .name = "default", + .config_props = config_output, + .request_frame = request_frame, + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL }, +}; + +AVFilter avfilter_af_aresample = { + .name = "aresample", + .description = NULL_IF_CONFIG_SMALL("Resample audio data."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(AResampleContext), + .inputs = aresample_inputs, + .outputs = aresample_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_asetnsamples.c b/ffmpeg1/libavfilter/af_asetnsamples.c new file mode 100644 index 0000000..08e5279 --- /dev/null +++ b/ffmpeg1/libavfilter/af_asetnsamples.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2012 Andrey Utkin + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Filter that changes number of samples on single output operation + */ + +#include "libavutil/audio_fifo.h" +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" +#include "formats.h" + +typedef struct { + const AVClass *class; + int nb_out_samples; ///< how many samples to output + AVAudioFifo *fifo; ///< samples are queued here + int64_t next_out_pts; + int req_fullfilled; + int pad; +} ASNSContext; + +#define OFFSET(x) offsetof(ASNSContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption asetnsamples_options[] = { +{ "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS }, +{ "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS }, +{ "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS }, +{ "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS }, +{ NULL } +}; + +AVFILTER_DEFINE_CLASS(asetnsamples); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ASNSContext *asns = ctx->priv; + int err; + + asns->class = &asetnsamples_class; + av_opt_set_defaults(asns); + + if ((err = av_set_options_string(asns, args, "=", ":")) < 0) + return err; + + asns->next_out_pts = AV_NOPTS_VALUE; + av_log(ctx, AV_LOG_VERBOSE, "nb_out_samples:%d pad:%d\n", asns->nb_out_samples, asns->pad); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ASNSContext *asns = ctx->priv; + av_audio_fifo_free(asns->fifo); +} + +static int config_props_output(AVFilterLink *outlink) +{ + ASNSContext *asns = outlink->src->priv; + int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + + asns->fifo = av_audio_fifo_alloc(outlink->format, nb_channels, asns->nb_out_samples); + if (!asns->fifo) + return AVERROR(ENOMEM); + + return 0; +} + +static int push_samples(AVFilterLink *outlink) +{ + ASNSContext *asns = outlink->src->priv; + AVFrame *outsamples = NULL; + int ret, nb_out_samples, nb_pad_samples; + + if (asns->pad) { + nb_out_samples = av_audio_fifo_size(asns->fifo) ? asns->nb_out_samples : 0; + nb_pad_samples = nb_out_samples - FFMIN(nb_out_samples, av_audio_fifo_size(asns->fifo)); + } else { + nb_out_samples = FFMIN(asns->nb_out_samples, av_audio_fifo_size(asns->fifo)); + nb_pad_samples = 0; + } + + if (!nb_out_samples) + return 0; + + outsamples = ff_get_audio_buffer(outlink, nb_out_samples); + av_assert0(outsamples); + + av_audio_fifo_read(asns->fifo, + (void **)outsamples->extended_data, nb_out_samples); + + if (nb_pad_samples) + av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples, + nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout), + outlink->format); + outsamples->nb_samples = nb_out_samples; + outsamples->channel_layout = outlink->channel_layout; + outsamples->sample_rate = outlink->sample_rate; + outsamples->pts = asns->next_out_pts; + + if (asns->next_out_pts != AV_NOPTS_VALUE) + asns->next_out_pts += nb_out_samples; + + ret = ff_filter_frame(outlink, outsamples); + if (ret < 0) + return ret; + asns->req_fullfilled = 1; + return nb_out_samples; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterContext *ctx = inlink->dst; + ASNSContext *asns = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + int ret; + int nb_samples = insamples->nb_samples; + + if (av_audio_fifo_space(asns->fifo) < nb_samples) { + av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples); + ret = av_audio_fifo_realloc(asns->fifo, av_audio_fifo_size(asns->fifo) + nb_samples); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, + "Stretching audio fifo failed, discarded %d samples\n", nb_samples); + return -1; + } + } + av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples); + if (asns->next_out_pts == AV_NOPTS_VALUE) + asns->next_out_pts = insamples->pts; + av_frame_free(&insamples); + + while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples) + push_samples(outlink); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + ASNSContext *asns = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + asns->req_fullfilled = 0; + do { + ret = ff_request_frame(inlink); + } while (!asns->req_fullfilled && ret >= 0); + + if (ret == AVERROR_EOF) { + do { + ret = push_samples(outlink); + } while (ret > 0); + } + + return ret; +} + +static const AVFilterPad asetnsamples_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .needs_writable = 1, + }, + { NULL } +}; + +static const AVFilterPad asetnsamples_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .request_frame = request_frame, + .config_props = config_props_output, + }, + { NULL } +}; + +AVFilter avfilter_af_asetnsamples = { + .name = "asetnsamples", + .description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."), + .priv_size = sizeof(ASNSContext), + .init = init, + .uninit = uninit, + .inputs = asetnsamples_inputs, + .outputs = asetnsamples_outputs, + .priv_class = &asetnsamples_class, +}; diff --git a/ffmpeg1/libavfilter/af_ashowinfo.c b/ffmpeg1/libavfilter/af_ashowinfo.c new file mode 100644 index 0000000..f53584e --- /dev/null +++ b/ffmpeg1/libavfilter/af_ashowinfo.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * filter for showing textual audio frame information + */ + +#include +#include + +#include "libavutil/adler32.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/mem.h" +#include "libavutil/timestamp.h" +#include "libavutil/samplefmt.h" + +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct AShowInfoContext { + /** + * Scratch space for individual plane checksums for planar audio + */ + uint32_t *plane_checksums; + + /** + * Frame counter + */ + uint64_t frame; +} AShowInfoContext; + +static void uninit(AVFilterContext *ctx) +{ + AShowInfoContext *s = ctx->priv; + av_freep(&s->plane_checksums); +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AVFilterContext *ctx = inlink->dst; + AShowInfoContext *s = ctx->priv; + char chlayout_str[128]; + uint32_t checksum = 0; + int channels = av_get_channel_layout_nb_channels(buf->channel_layout); + int planar = av_sample_fmt_is_planar(buf->format); + int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); + int data_size = buf->nb_samples * block_align; + int planes = planar ? channels : 1; + int i; + void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums)); + + if (!tmp_ptr) + return AVERROR(ENOMEM); + s->plane_checksums = tmp_ptr; + + for (i = 0; i < planes; i++) { + uint8_t *data = buf->extended_data[i]; + + s->plane_checksums[i] = av_adler32_update(0, data, data_size); + checksum = i ? av_adler32_update(checksum, data, data_size) : + s->plane_checksums[0]; + } + + av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, + buf->channel_layout); + + av_log(ctx, AV_LOG_INFO, + "n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" " + "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d " + "checksum:%08X ", + s->frame, + av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), + av_frame_get_pkt_pos(buf), + av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str, + buf->sample_rate, buf->nb_samples, + checksum); + + av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); + for (i = 0; i < planes; i++) + av_log(ctx, AV_LOG_INFO, "%08X ", s->plane_checksums[i]); + av_log(ctx, AV_LOG_INFO, "]\n"); + + s->frame++; + return ff_filter_frame(inlink->dst->outputs[0], buf); +} + +static const AVFilterPad inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL }, +}; + +static const AVFilterPad outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL }, +}; + +AVFilter avfilter_af_ashowinfo = { + .name = "ashowinfo", + .description = NULL_IF_CONFIG_SMALL("Show textual information for each audio frame."), + .priv_size = sizeof(AShowInfoContext), + .uninit = uninit, + .inputs = inputs, + .outputs = outputs, +}; diff --git a/ffmpeg1/libavfilter/af_astreamsync.c b/ffmpeg1/libavfilter/af_astreamsync.c new file mode 100644 index 0000000..79f703a --- /dev/null +++ b/ffmpeg1/libavfilter/af_astreamsync.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2011 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Stream (de)synchronization filter + */ + +#include "libavutil/eval.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +#define QUEUE_SIZE 16 + +static const char * const var_names[] = { + "b1", "b2", + "s1", "s2", + "t1", "t2", + NULL +}; + +enum var_name { + VAR_B1, VAR_B2, + VAR_S1, VAR_S2, + VAR_T1, VAR_T2, + VAR_NB +}; + +typedef struct { + AVExpr *expr; + double var_values[VAR_NB]; + struct buf_queue { + AVFrame *buf[QUEUE_SIZE]; + unsigned tail, nb; + /* buf[tail] is the oldest, + buf[(tail + nb) % QUEUE_SIZE] is where the next is added */ + } queue[2]; + int req[2]; + int next_out; + int eof; /* bitmask, one bit for each stream */ +} AStreamSyncContext; + +static const char *default_expr = "t1-t2"; + +static av_cold int init(AVFilterContext *ctx, const char *args0) +{ + AStreamSyncContext *as = ctx->priv; + const char *expr = args0 ? args0 : default_expr; + int r, i; + + r = av_expr_parse(&as->expr, expr, var_names, + NULL, NULL, NULL, NULL, 0, ctx); + if (r < 0) { + av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", expr); + return r; + } + for (i = 0; i < 42; i++) + av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */ + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + int i; + AVFilterFormats *formats, *rates; + AVFilterChannelLayouts *layouts; + + for (i = 0; i < 2; i++) { + formats = ctx->inputs[i]->in_formats; + ff_formats_ref(formats, &ctx->inputs[i]->out_formats); + ff_formats_ref(formats, &ctx->outputs[i]->in_formats); + rates = ff_all_samplerates(); + ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates); + ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates); + layouts = ctx->inputs[i]->in_channel_layouts; + ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts); + ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts); + } + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + int id = outlink == ctx->outputs[1]; + + outlink->sample_rate = ctx->inputs[id]->sample_rate; + outlink->time_base = ctx->inputs[id]->time_base; + return 0; +} + +static int send_out(AVFilterContext *ctx, int out_id) +{ + AStreamSyncContext *as = ctx->priv; + struct buf_queue *queue = &as->queue[out_id]; + AVFrame *buf = queue->buf[queue->tail]; + int ret; + + queue->buf[queue->tail] = NULL; + as->var_values[VAR_B1 + out_id]++; + as->var_values[VAR_S1 + out_id] += buf->nb_samples; + if (buf->pts != AV_NOPTS_VALUE) + as->var_values[VAR_T1 + out_id] = + av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; + as->var_values[VAR_T1 + out_id] += buf->nb_samples / + (double)ctx->inputs[out_id]->sample_rate; + ret = ff_filter_frame(ctx->outputs[out_id], buf); + queue->nb--; + queue->tail = (queue->tail + 1) % QUEUE_SIZE; + if (as->req[out_id]) + as->req[out_id]--; + return ret; +} + +static void send_next(AVFilterContext *ctx) +{ + AStreamSyncContext *as = ctx->priv; + int i; + + while (1) { + if (!as->queue[as->next_out].nb) + break; + send_out(ctx, as->next_out); + if (!as->eof) + as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0; + } + for (i = 0; i < 2; i++) + if (as->queue[i].nb == QUEUE_SIZE) + send_out(ctx, i); +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AStreamSyncContext *as = ctx->priv; + int id = outlink == ctx->outputs[1]; + + as->req[id]++; + while (as->req[id] && !(as->eof & (1 << id))) { + if (as->queue[as->next_out].nb) { + send_next(ctx); + } else { + as->eof |= 1 << as->next_out; + ff_request_frame(ctx->inputs[as->next_out]); + if (as->eof & (1 << as->next_out)) + as->next_out = !as->next_out; + } + } + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AStreamSyncContext *as = ctx->priv; + int id = inlink == ctx->inputs[1]; + + as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] = + insamples; + as->eof &= ~(1 << id); + send_next(ctx); + return 0; +} + +static const AVFilterPad astreamsync_inputs[] = { + { + .name = "in1", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + },{ + .name = "in2", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad astreamsync_outputs[] = { + { + .name = "out1", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame, + },{ + .name = "out2", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_astreamsync = { + .name = "astreamsync", + .description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data " + "in a configurable order."), + .priv_size = sizeof(AStreamSyncContext), + .init = init, + .query_formats = query_formats, + .inputs = astreamsync_inputs, + .outputs = astreamsync_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_asyncts.c b/ffmpeg1/libavfilter/af_asyncts.c new file mode 100644 index 0000000..c2441a4 --- /dev/null +++ b/ffmpeg1/libavfilter/af_asyncts.c @@ -0,0 +1,307 @@ +/* + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavresample/avresample.h" +#include "libavutil/audio_fifo.h" +#include "libavutil/common.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" + +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct ASyncContext { + const AVClass *class; + + AVAudioResampleContext *avr; + int64_t pts; ///< timestamp in samples of the first sample in fifo + int min_delta; ///< pad/trim min threshold in samples + int first_frame; ///< 1 until filter_frame() has processed at least 1 frame with a pts != AV_NOPTS_VALUE + int64_t first_pts; ///< user-specified first expected pts, in samples + + /* options */ + int resample; + float min_delta_sec; + int max_comp; + + /* set by filter_frame() to signal an output frame to request_frame() */ + int got_output; +} ASyncContext; + +#define OFFSET(x) offsetof(ASyncContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption asyncts_options[] = { + { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A|F }, + { "min_delta", "Minimum difference between timestamps and audio data " + "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F }, + { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F }, + { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(asyncts); + +static int init(AVFilterContext *ctx, const char *args) +{ + ASyncContext *s = ctx->priv; + int ret; + + s->class = &asyncts_class; + av_opt_set_defaults(s); + + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) + return ret; + av_opt_free(s); + + s->pts = AV_NOPTS_VALUE; + s->first_frame = 1; + + return 0; +} + +static void uninit(AVFilterContext *ctx) +{ + ASyncContext *s = ctx->priv; + + if (s->avr) { + avresample_close(s->avr); + avresample_free(&s->avr); + } +} + +static int config_props(AVFilterLink *link) +{ + ASyncContext *s = link->src->priv; + int ret; + + s->min_delta = s->min_delta_sec * link->sample_rate; + link->time_base = (AVRational){1, link->sample_rate}; + + s->avr = avresample_alloc_context(); + if (!s->avr) + return AVERROR(ENOMEM); + + av_opt_set_int(s->avr, "in_channel_layout", link->channel_layout, 0); + av_opt_set_int(s->avr, "out_channel_layout", link->channel_layout, 0); + av_opt_set_int(s->avr, "in_sample_fmt", link->format, 0); + av_opt_set_int(s->avr, "out_sample_fmt", link->format, 0); + av_opt_set_int(s->avr, "in_sample_rate", link->sample_rate, 0); + av_opt_set_int(s->avr, "out_sample_rate", link->sample_rate, 0); + + if (s->resample) + av_opt_set_int(s->avr, "force_resampling", 1, 0); + + if ((ret = avresample_open(s->avr)) < 0) + return ret; + + return 0; +} + +/* get amount of data currently buffered, in samples */ +static int64_t get_delay(ASyncContext *s) +{ + return avresample_available(s->avr) + avresample_get_delay(s->avr); +} + +static void handle_trimming(AVFilterContext *ctx) +{ + ASyncContext *s = ctx->priv; + + if (s->pts < s->first_pts) { + int delta = FFMIN(s->first_pts - s->pts, avresample_available(s->avr)); + av_log(ctx, AV_LOG_VERBOSE, "Trimming %d samples from start\n", + delta); + avresample_read(s->avr, NULL, delta); + s->pts += delta; + } else if (s->first_frame) + s->pts = s->first_pts; +} + +static int request_frame(AVFilterLink *link) +{ + AVFilterContext *ctx = link->src; + ASyncContext *s = ctx->priv; + int ret = 0; + int nb_samples; + + s->got_output = 0; + while (ret >= 0 && !s->got_output) + ret = ff_request_frame(ctx->inputs[0]); + + /* flush the fifo */ + if (ret == AVERROR_EOF) { + if (s->first_pts != AV_NOPTS_VALUE) + handle_trimming(ctx); + + if (nb_samples = get_delay(s)) { + AVFrame *buf = ff_get_audio_buffer(link, nb_samples); + if (!buf) + return AVERROR(ENOMEM); + ret = avresample_convert(s->avr, buf->extended_data, + buf->linesize[0], nb_samples, NULL, 0, 0); + if (ret <= 0) { + av_frame_free(&buf); + return (ret < 0) ? ret : AVERROR_EOF; + } + + buf->pts = s->pts; + return ff_filter_frame(link, buf); + } + } + + return ret; +} + +static int write_to_fifo(ASyncContext *s, AVFrame *buf) +{ + int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, + buf->linesize[0], buf->nb_samples); + av_frame_free(&buf); + return ret; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AVFilterContext *ctx = inlink->dst; + ASyncContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout); + int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts : + av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); + int out_size, ret; + int64_t delta; + + /* buffer data until we get the next timestamp */ + if (s->pts == AV_NOPTS_VALUE || pts == AV_NOPTS_VALUE) { + if (pts != AV_NOPTS_VALUE) { + s->pts = pts - get_delay(s); + } + return write_to_fifo(s, buf); + } + + if (s->first_pts != AV_NOPTS_VALUE) { + handle_trimming(ctx); + if (!avresample_available(s->avr)) + return write_to_fifo(s, buf); + } + + /* when we have two timestamps, compute how many samples would we have + * to add/remove to get proper sync between data and timestamps */ + delta = pts - s->pts - get_delay(s); + out_size = avresample_available(s->avr); + + if (labs(delta) > s->min_delta || + (s->first_frame && delta && s->first_pts != AV_NOPTS_VALUE)) { + av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta); + out_size = av_clipl_int32((int64_t)out_size + delta); + } else { + if (s->resample) { + int comp = av_clip(delta, -s->max_comp, s->max_comp); + av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp); + avresample_set_compensation(s->avr, comp, inlink->sample_rate); + } + delta = 0; + } + + if (out_size > 0) { + AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size); + if (!buf_out) { + ret = AVERROR(ENOMEM); + goto fail; + } + + if (s->first_frame && delta > 0) { + int ch; + + av_samples_set_silence(buf_out->extended_data, 0, delta, + nb_channels, buf->format); + + for (ch = 0; ch < nb_channels; ch++) + buf_out->extended_data[ch] += delta; + + avresample_read(s->avr, buf_out->extended_data, out_size); + + for (ch = 0; ch < nb_channels; ch++) + buf_out->extended_data[ch] -= delta; + } else { + avresample_read(s->avr, buf_out->extended_data, out_size); + + if (delta > 0) { + av_samples_set_silence(buf_out->extended_data, out_size - delta, + delta, nb_channels, buf->format); + } + } + buf_out->pts = s->pts; + ret = ff_filter_frame(outlink, buf_out); + if (ret < 0) + goto fail; + s->got_output = 1; + } else if (avresample_available(s->avr)) { + av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping " + "whole buffer.\n"); + } + + /* drain any remaining buffered data */ + avresample_read(s->avr, NULL, avresample_available(s->avr)); + + s->pts = pts - avresample_get_delay(s->avr); + ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, + buf->linesize[0], buf->nb_samples); + + s->first_frame = 0; +fail: + av_frame_free(&buf); + + return ret; +} + +static const AVFilterPad avfilter_af_asyncts_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_asyncts_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .request_frame = request_frame + }, + { NULL } +}; + +AVFilter avfilter_af_asyncts = { + .name = "asyncts", + .description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"), + + .init = init, + .uninit = uninit, + + .priv_size = sizeof(ASyncContext), + + .inputs = avfilter_af_asyncts_inputs, + .outputs = avfilter_af_asyncts_outputs, + .priv_class = &asyncts_class, +}; diff --git a/ffmpeg1/libavfilter/af_atempo.c b/ffmpeg1/libavfilter/af_atempo.c new file mode 100644 index 0000000..9547969 --- /dev/null +++ b/ffmpeg1/libavfilter/af_atempo.c @@ -0,0 +1,1172 @@ +/* + * Copyright (c) 2012 Pavel Koshevoy + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * tempo scaling audio filter -- an implementation of WSOLA algorithm + * + * Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h + * from Apprentice Video player by Pavel Koshevoy. + * https://sourceforge.net/projects/apprenticevideo/ + * + * An explanation of SOLA algorithm is available at + * http://www.surina.net/article/time-and-pitch-scaling.html + * + * WSOLA is very similar to SOLA, only one major difference exists between + * these algorithms. SOLA shifts audio fragments along the output stream, + * where as WSOLA shifts audio fragments along the input stream. + * + * The advantage of WSOLA algorithm is that the overlap region size is + * always the same, therefore the blending function is constant and + * can be precomputed. + */ + +#include +#include "libavcodec/avfft.h" +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/eval.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +/** + * A fragment of audio waveform + */ +typedef struct { + // index of the first sample of this fragment in the overall waveform; + // 0: input sample position + // 1: output sample position + int64_t position[2]; + + // original packed multi-channel samples: + uint8_t *data; + + // number of samples in this fragment: + int nsamples; + + // rDFT transform of the down-mixed mono fragment, used for + // fast waveform alignment via correlation in frequency domain: + FFTSample *xdat; +} AudioFragment; + +/** + * Filter state machine states + */ +typedef enum { + YAE_LOAD_FRAGMENT, + YAE_ADJUST_POSITION, + YAE_RELOAD_FRAGMENT, + YAE_OUTPUT_OVERLAP_ADD, + YAE_FLUSH_OUTPUT, +} FilterState; + +/** + * Filter state machine + */ +typedef struct { + // ring-buffer of input samples, necessary because some times + // input fragment position may be adjusted backwards: + uint8_t *buffer; + + // ring-buffer maximum capacity, expressed in sample rate time base: + int ring; + + // ring-buffer house keeping: + int size; + int head; + int tail; + + // 0: input sample position corresponding to the ring buffer tail + // 1: output sample position + int64_t position[2]; + + // sample format: + enum AVSampleFormat format; + + // number of channels: + int channels; + + // row of bytes to skip from one sample to next, across multple channels; + // stride = (number-of-channels * bits-per-sample-per-channel) / 8 + int stride; + + // fragment window size, power-of-two integer: + int window; + + // Hann window coefficients, for feathering + // (blending) the overlapping fragment region: + float *hann; + + // tempo scaling factor: + double tempo; + + // cumulative alignment drift: + int drift; + + // current/previous fragment ring-buffer: + AudioFragment frag[2]; + + // current fragment index: + uint64_t nfrag; + + // current state: + FilterState state; + + // for fast correlation calculation in frequency domain: + RDFTContext *real_to_complex; + RDFTContext *complex_to_real; + FFTSample *correlation; + + // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame + int request_fulfilled; + AVFrame *dst_buffer; + uint8_t *dst; + uint8_t *dst_end; + uint64_t nsamples_in; + uint64_t nsamples_out; +} ATempoContext; + +/** + * Reset filter to initial state, do not deallocate existing local buffers. + */ +static void yae_clear(ATempoContext *atempo) +{ + atempo->size = 0; + atempo->head = 0; + atempo->tail = 0; + + atempo->drift = 0; + atempo->nfrag = 0; + atempo->state = YAE_LOAD_FRAGMENT; + + atempo->position[0] = 0; + atempo->position[1] = 0; + + atempo->frag[0].position[0] = 0; + atempo->frag[0].position[1] = 0; + atempo->frag[0].nsamples = 0; + + atempo->frag[1].position[0] = 0; + atempo->frag[1].position[1] = 0; + atempo->frag[1].nsamples = 0; + + // shift left position of 1st fragment by half a window + // so that no re-normalization would be required for + // the left half of the 1st fragment: + atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2); + atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2); + + av_frame_free(&atempo->dst_buffer); + atempo->dst = NULL; + atempo->dst_end = NULL; + + atempo->request_fulfilled = 0; + atempo->nsamples_in = 0; + atempo->nsamples_out = 0; +} + +/** + * Reset filter to initial state and deallocate all buffers. + */ +static void yae_release_buffers(ATempoContext *atempo) +{ + yae_clear(atempo); + + av_freep(&atempo->frag[0].data); + av_freep(&atempo->frag[1].data); + av_freep(&atempo->frag[0].xdat); + av_freep(&atempo->frag[1].xdat); + + av_freep(&atempo->buffer); + av_freep(&atempo->hann); + av_freep(&atempo->correlation); + + av_rdft_end(atempo->real_to_complex); + atempo->real_to_complex = NULL; + + av_rdft_end(atempo->complex_to_real); + atempo->complex_to_real = NULL; +} + +/* av_realloc is not aligned enough; fortunately, the data does not need to + * be preserved */ +#define RE_MALLOC_OR_FAIL(field, field_size) \ + do { \ + av_freep(&field); \ + field = av_malloc(field_size); \ + if (!field) { \ + yae_release_buffers(atempo); \ + return AVERROR(ENOMEM); \ + } \ + } while (0) + +/** + * Prepare filter for processing audio data of given format, + * sample rate and number of channels. + */ +static int yae_reset(ATempoContext *atempo, + enum AVSampleFormat format, + int sample_rate, + int channels) +{ + const int sample_size = av_get_bytes_per_sample(format); + uint32_t nlevels = 0; + uint32_t pot; + int i; + + atempo->format = format; + atempo->channels = channels; + atempo->stride = sample_size * channels; + + // pick a segment window size: + atempo->window = sample_rate / 24; + + // adjust window size to be a power-of-two integer: + nlevels = av_log2(atempo->window); + pot = 1 << nlevels; + av_assert0(pot <= atempo->window); + + if (pot < atempo->window) { + atempo->window = pot * 2; + nlevels++; + } + + // initialize audio fragment buffers: + RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride); + RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride); + RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex)); + RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex)); + + // initialize rDFT contexts: + av_rdft_end(atempo->real_to_complex); + atempo->real_to_complex = NULL; + + av_rdft_end(atempo->complex_to_real); + atempo->complex_to_real = NULL; + + atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C); + if (!atempo->real_to_complex) { + yae_release_buffers(atempo); + return AVERROR(ENOMEM); + } + + atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R); + if (!atempo->complex_to_real) { + yae_release_buffers(atempo); + return AVERROR(ENOMEM); + } + + RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex)); + + atempo->ring = atempo->window * 3; + RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride); + + // initialize the Hann window function: + RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float)); + + for (i = 0; i < atempo->window; i++) { + double t = (double)i / (double)(atempo->window - 1); + double h = 0.5 * (1.0 - cos(2.0 * M_PI * t)); + atempo->hann[i] = (float)h; + } + + yae_clear(atempo); + return 0; +} + +static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo) +{ + ATempoContext *atempo = ctx->priv; + char *tail = NULL; + double tempo = av_strtod(arg_tempo, &tail); + + if (tail && *tail) { + av_log(ctx, AV_LOG_ERROR, "Invalid tempo value '%s'\n", arg_tempo); + return AVERROR(EINVAL); + } + + if (tempo < 0.5 || tempo > 2.0) { + av_log(ctx, AV_LOG_ERROR, "Tempo value %f exceeds [0.5, 2.0] range\n", + tempo); + return AVERROR(EINVAL); + } + + atempo->tempo = tempo; + return 0; +} + +inline static AudioFragment *yae_curr_frag(ATempoContext *atempo) +{ + return &atempo->frag[atempo->nfrag % 2]; +} + +inline static AudioFragment *yae_prev_frag(ATempoContext *atempo) +{ + return &atempo->frag[(atempo->nfrag + 1) % 2]; +} + +/** + * A helper macro for initializing complex data buffer with scalar data + * of a given type. + */ +#define yae_init_xdat(scalar_type, scalar_max) \ + do { \ + const uint8_t *src_end = src + \ + frag->nsamples * atempo->channels * sizeof(scalar_type); \ + \ + FFTSample *xdat = frag->xdat; \ + scalar_type tmp; \ + \ + if (atempo->channels == 1) { \ + for (; src < src_end; xdat++) { \ + tmp = *(const scalar_type *)src; \ + src += sizeof(scalar_type); \ + \ + *xdat = (FFTSample)tmp; \ + } \ + } else { \ + FFTSample s, max, ti, si; \ + int i; \ + \ + for (; src < src_end; xdat++) { \ + tmp = *(const scalar_type *)src; \ + src += sizeof(scalar_type); \ + \ + max = (FFTSample)tmp; \ + s = FFMIN((FFTSample)scalar_max, \ + (FFTSample)fabsf(max)); \ + \ + for (i = 1; i < atempo->channels; i++) { \ + tmp = *(const scalar_type *)src; \ + src += sizeof(scalar_type); \ + \ + ti = (FFTSample)tmp; \ + si = FFMIN((FFTSample)scalar_max, \ + (FFTSample)fabsf(ti)); \ + \ + if (s < si) { \ + s = si; \ + max = ti; \ + } \ + } \ + \ + *xdat = max; \ + } \ + } \ + } while (0) + +/** + * Initialize complex data buffer of a given audio fragment + * with down-mixed mono data of appropriate scalar type. + */ +static void yae_downmix(ATempoContext *atempo, AudioFragment *frag) +{ + // shortcuts: + const uint8_t *src = frag->data; + + // init complex data buffer used for FFT and Correlation: + memset(frag->xdat, 0, sizeof(FFTComplex) * atempo->window); + + if (atempo->format == AV_SAMPLE_FMT_U8) { + yae_init_xdat(uint8_t, 127); + } else if (atempo->format == AV_SAMPLE_FMT_S16) { + yae_init_xdat(int16_t, 32767); + } else if (atempo->format == AV_SAMPLE_FMT_S32) { + yae_init_xdat(int, 2147483647); + } else if (atempo->format == AV_SAMPLE_FMT_FLT) { + yae_init_xdat(float, 1); + } else if (atempo->format == AV_SAMPLE_FMT_DBL) { + yae_init_xdat(double, 1); + } +} + +/** + * Populate the internal data buffer on as-needed basis. + * + * @return + * 0 if requested data was already available or was successfully loaded, + * AVERROR(EAGAIN) if more input data is required. + */ +static int yae_load_data(ATempoContext *atempo, + const uint8_t **src_ref, + const uint8_t *src_end, + int64_t stop_here) +{ + // shortcut: + const uint8_t *src = *src_ref; + const int read_size = stop_here - atempo->position[0]; + + if (stop_here <= atempo->position[0]) { + return 0; + } + + // samples are not expected to be skipped: + av_assert0(read_size <= atempo->ring); + + while (atempo->position[0] < stop_here && src < src_end) { + int src_samples = (src_end - src) / atempo->stride; + + // load data piece-wise, in order to avoid complicating the logic: + int nsamples = FFMIN(read_size, src_samples); + int na; + int nb; + + nsamples = FFMIN(nsamples, atempo->ring); + na = FFMIN(nsamples, atempo->ring - atempo->tail); + nb = FFMIN(nsamples - na, atempo->ring); + + if (na) { + uint8_t *a = atempo->buffer + atempo->tail * atempo->stride; + memcpy(a, src, na * atempo->stride); + + src += na * atempo->stride; + atempo->position[0] += na; + + atempo->size = FFMIN(atempo->size + na, atempo->ring); + atempo->tail = (atempo->tail + na) % atempo->ring; + atempo->head = + atempo->size < atempo->ring ? + atempo->tail - atempo->size : + atempo->tail; + } + + if (nb) { + uint8_t *b = atempo->buffer; + memcpy(b, src, nb * atempo->stride); + + src += nb * atempo->stride; + atempo->position[0] += nb; + + atempo->size = FFMIN(atempo->size + nb, atempo->ring); + atempo->tail = (atempo->tail + nb) % atempo->ring; + atempo->head = + atempo->size < atempo->ring ? + atempo->tail - atempo->size : + atempo->tail; + } + } + + // pass back the updated source buffer pointer: + *src_ref = src; + + // sanity check: + av_assert0(atempo->position[0] <= stop_here); + + return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN); +} + +/** + * Populate current audio fragment data buffer. + * + * @return + * 0 when the fragment is ready, + * AVERROR(EAGAIN) if more input data is required. + */ +static int yae_load_frag(ATempoContext *atempo, + const uint8_t **src_ref, + const uint8_t *src_end) +{ + // shortcuts: + AudioFragment *frag = yae_curr_frag(atempo); + uint8_t *dst; + int64_t missing, start, zeros; + uint32_t nsamples; + const uint8_t *a, *b; + int i0, i1, n0, n1, na, nb; + + int64_t stop_here = frag->position[0] + atempo->window; + if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) { + return AVERROR(EAGAIN); + } + + // calculate the number of samples we don't have: + missing = + stop_here > atempo->position[0] ? + stop_here - atempo->position[0] : 0; + + nsamples = + missing < (int64_t)atempo->window ? + (uint32_t)(atempo->window - missing) : 0; + + // setup the output buffer: + frag->nsamples = nsamples; + dst = frag->data; + + start = atempo->position[0] - atempo->size; + zeros = 0; + + if (frag->position[0] < start) { + // what we don't have we substitute with zeros: + zeros = FFMIN(start - frag->position[0], (int64_t)nsamples); + av_assert0(zeros != nsamples); + + memset(dst, 0, zeros * atempo->stride); + dst += zeros * atempo->stride; + } + + if (zeros == nsamples) { + return 0; + } + + // get the remaining data from the ring buffer: + na = (atempo->head < atempo->tail ? + atempo->tail - atempo->head : + atempo->ring - atempo->head); + + nb = atempo->head < atempo->tail ? 0 : atempo->tail; + + // sanity check: + av_assert0(nsamples <= zeros + na + nb); + + a = atempo->buffer + atempo->head * atempo->stride; + b = atempo->buffer; + + i0 = frag->position[0] + zeros - start; + i1 = i0 < na ? 0 : i0 - na; + + n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0; + n1 = nsamples - zeros - n0; + + if (n0) { + memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride); + dst += n0 * atempo->stride; + } + + if (n1) { + memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride); + } + + return 0; +} + +/** + * Prepare for loading next audio fragment. + */ +static void yae_advance_to_next_frag(ATempoContext *atempo) +{ + const double fragment_step = atempo->tempo * (double)(atempo->window / 2); + + const AudioFragment *prev; + AudioFragment *frag; + + atempo->nfrag++; + prev = yae_prev_frag(atempo); + frag = yae_curr_frag(atempo); + + frag->position[0] = prev->position[0] + (int64_t)fragment_step; + frag->position[1] = prev->position[1] + atempo->window / 2; + frag->nsamples = 0; +} + +/** + * Calculate cross-correlation via rDFT. + * + * Multiply two vectors of complex numbers (result of real_to_complex rDFT) + * and transform back via complex_to_real rDFT. + */ +static void yae_xcorr_via_rdft(FFTSample *xcorr, + RDFTContext *complex_to_real, + const FFTComplex *xa, + const FFTComplex *xb, + const int window) +{ + FFTComplex *xc = (FFTComplex *)xcorr; + int i; + + // NOTE: first element requires special care -- Given Y = rDFT(X), + // Im(Y[0]) and Im(Y[N/2]) are always zero, therefore av_rdft_calc + // stores Re(Y[N/2]) in place of Im(Y[0]). + + xc->re = xa->re * xb->re; + xc->im = xa->im * xb->im; + xa++; + xb++; + xc++; + + for (i = 1; i < window; i++, xa++, xb++, xc++) { + xc->re = (xa->re * xb->re + xa->im * xb->im); + xc->im = (xa->im * xb->re - xa->re * xb->im); + } + + // apply inverse rDFT: + av_rdft_calc(complex_to_real, xcorr); +} + +/** + * Calculate alignment offset for given fragment + * relative to the previous fragment. + * + * @return alignment offset of current fragment relative to previous. + */ +static int yae_align(AudioFragment *frag, + const AudioFragment *prev, + const int window, + const int delta_max, + const int drift, + FFTSample *correlation, + RDFTContext *complex_to_real) +{ + int best_offset = -drift; + FFTSample best_metric = -FLT_MAX; + FFTSample *xcorr; + + int i0; + int i1; + int i; + + yae_xcorr_via_rdft(correlation, + complex_to_real, + (const FFTComplex *)prev->xdat, + (const FFTComplex *)frag->xdat, + window); + + // identify search window boundaries: + i0 = FFMAX(window / 2 - delta_max - drift, 0); + i0 = FFMIN(i0, window); + + i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16); + i1 = FFMAX(i1, 0); + + // identify cross-correlation peaks within search window: + xcorr = correlation + i0; + + for (i = i0; i < i1; i++, xcorr++) { + FFTSample metric = *xcorr; + + // normalize: + FFTSample drifti = (FFTSample)(drift + i); + metric *= drifti * (FFTSample)(i - i0) * (FFTSample)(i1 - i); + + if (metric > best_metric) { + best_metric = metric; + best_offset = i - window / 2; + } + } + + return best_offset; +} + +/** + * Adjust current fragment position for better alignment + * with previous fragment. + * + * @return alignment correction. + */ +static int yae_adjust_position(ATempoContext *atempo) +{ + const AudioFragment *prev = yae_prev_frag(atempo); + AudioFragment *frag = yae_curr_frag(atempo); + + const int delta_max = atempo->window / 2; + const int correction = yae_align(frag, + prev, + atempo->window, + delta_max, + atempo->drift, + atempo->correlation, + atempo->complex_to_real); + + if (correction) { + // adjust fragment position: + frag->position[0] -= correction; + + // clear so that the fragment can be reloaded: + frag->nsamples = 0; + + // update cumulative correction drift counter: + atempo->drift += correction; + } + + return correction; +} + +/** + * A helper macro for blending the overlap region of previous + * and current audio fragment. + */ +#define yae_blend(scalar_type) \ + do { \ + const scalar_type *aaa = (const scalar_type *)a; \ + const scalar_type *bbb = (const scalar_type *)b; \ + \ + scalar_type *out = (scalar_type *)dst; \ + scalar_type *out_end = (scalar_type *)dst_end; \ + int64_t i; \ + \ + for (i = 0; i < overlap && out < out_end; \ + i++, atempo->position[1]++, wa++, wb++) { \ + float w0 = *wa; \ + float w1 = *wb; \ + int j; \ + \ + for (j = 0; j < atempo->channels; \ + j++, aaa++, bbb++, out++) { \ + float t0 = (float)*aaa; \ + float t1 = (float)*bbb; \ + \ + *out = \ + frag->position[0] + i < 0 ? \ + *aaa : \ + (scalar_type)(t0 * w0 + t1 * w1); \ + } \ + } \ + dst = (uint8_t *)out; \ + } while (0) + +/** + * Blend the overlap region of previous and current audio fragment + * and output the results to the given destination buffer. + * + * @return + * 0 if the overlap region was completely stored in the dst buffer, + * AVERROR(EAGAIN) if more destination buffer space is required. + */ +static int yae_overlap_add(ATempoContext *atempo, + uint8_t **dst_ref, + uint8_t *dst_end) +{ + // shortcuts: + const AudioFragment *prev = yae_prev_frag(atempo); + const AudioFragment *frag = yae_curr_frag(atempo); + + const int64_t start_here = FFMAX(atempo->position[1], + frag->position[1]); + + const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples, + frag->position[1] + frag->nsamples); + + const int64_t overlap = stop_here - start_here; + + const int64_t ia = start_here - prev->position[1]; + const int64_t ib = start_here - frag->position[1]; + + const float *wa = atempo->hann + ia; + const float *wb = atempo->hann + ib; + + const uint8_t *a = prev->data + ia * atempo->stride; + const uint8_t *b = frag->data + ib * atempo->stride; + + uint8_t *dst = *dst_ref; + + av_assert0(start_here <= stop_here && + frag->position[1] <= start_here && + overlap <= frag->nsamples); + + if (atempo->format == AV_SAMPLE_FMT_U8) { + yae_blend(uint8_t); + } else if (atempo->format == AV_SAMPLE_FMT_S16) { + yae_blend(int16_t); + } else if (atempo->format == AV_SAMPLE_FMT_S32) { + yae_blend(int); + } else if (atempo->format == AV_SAMPLE_FMT_FLT) { + yae_blend(float); + } else if (atempo->format == AV_SAMPLE_FMT_DBL) { + yae_blend(double); + } + + // pass-back the updated destination buffer pointer: + *dst_ref = dst; + + return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN); +} + +/** + * Feed as much data to the filter as it is able to consume + * and receive as much processed data in the destination buffer + * as it is able to produce or store. + */ +static void +yae_apply(ATempoContext *atempo, + const uint8_t **src_ref, + const uint8_t *src_end, + uint8_t **dst_ref, + uint8_t *dst_end) +{ + while (1) { + if (atempo->state == YAE_LOAD_FRAGMENT) { + // load additional data for the current fragment: + if (yae_load_frag(atempo, src_ref, src_end) != 0) { + break; + } + + // down-mix to mono: + yae_downmix(atempo, yae_curr_frag(atempo)); + + // apply rDFT: + av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat); + + // must load the second fragment before alignment can start: + if (!atempo->nfrag) { + yae_advance_to_next_frag(atempo); + continue; + } + + atempo->state = YAE_ADJUST_POSITION; + } + + if (atempo->state == YAE_ADJUST_POSITION) { + // adjust position for better alignment: + if (yae_adjust_position(atempo)) { + // reload the fragment at the corrected position, so that the + // Hann window blending would not require normalization: + atempo->state = YAE_RELOAD_FRAGMENT; + } else { + atempo->state = YAE_OUTPUT_OVERLAP_ADD; + } + } + + if (atempo->state == YAE_RELOAD_FRAGMENT) { + // load additional data if necessary due to position adjustment: + if (yae_load_frag(atempo, src_ref, src_end) != 0) { + break; + } + + // down-mix to mono: + yae_downmix(atempo, yae_curr_frag(atempo)); + + // apply rDFT: + av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat); + + atempo->state = YAE_OUTPUT_OVERLAP_ADD; + } + + if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) { + // overlap-add and output the result: + if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) { + break; + } + + // advance to the next fragment, repeat: + yae_advance_to_next_frag(atempo); + atempo->state = YAE_LOAD_FRAGMENT; + } + } +} + +/** + * Flush any buffered data from the filter. + * + * @return + * 0 if all data was completely stored in the dst buffer, + * AVERROR(EAGAIN) if more destination buffer space is required. + */ +static int yae_flush(ATempoContext *atempo, + uint8_t **dst_ref, + uint8_t *dst_end) +{ + AudioFragment *frag = yae_curr_frag(atempo); + int64_t overlap_end; + int64_t start_here; + int64_t stop_here; + int64_t offset; + + const uint8_t *src; + uint8_t *dst; + + int src_size; + int dst_size; + int nbytes; + + atempo->state = YAE_FLUSH_OUTPUT; + + if (atempo->position[0] == frag->position[0] + frag->nsamples && + atempo->position[1] == frag->position[1] + frag->nsamples) { + // the current fragment is already flushed: + return 0; + } + + if (frag->position[0] + frag->nsamples < atempo->position[0]) { + // finish loading the current (possibly partial) fragment: + yae_load_frag(atempo, NULL, NULL); + + if (atempo->nfrag) { + // down-mix to mono: + yae_downmix(atempo, frag); + + // apply rDFT: + av_rdft_calc(atempo->real_to_complex, frag->xdat); + + // align current fragment to previous fragment: + if (yae_adjust_position(atempo)) { + // reload the current fragment due to adjusted position: + yae_load_frag(atempo, NULL, NULL); + } + } + } + + // flush the overlap region: + overlap_end = frag->position[1] + FFMIN(atempo->window / 2, + frag->nsamples); + + while (atempo->position[1] < overlap_end) { + if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) { + return AVERROR(EAGAIN); + } + } + + // flush the remaininder of the current fragment: + start_here = FFMAX(atempo->position[1], overlap_end); + stop_here = frag->position[1] + frag->nsamples; + offset = start_here - frag->position[1]; + av_assert0(start_here <= stop_here && frag->position[1] <= start_here); + + src = frag->data + offset * atempo->stride; + dst = (uint8_t *)*dst_ref; + + src_size = (int)(stop_here - start_here) * atempo->stride; + dst_size = dst_end - dst; + nbytes = FFMIN(src_size, dst_size); + + memcpy(dst, src, nbytes); + dst += nbytes; + + atempo->position[1] += (nbytes / atempo->stride); + + // pass-back the updated destination buffer pointer: + *dst_ref = (uint8_t *)dst; + + return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ATempoContext *atempo = ctx->priv; + + // NOTE: this assumes that the caller has memset ctx->priv to 0: + atempo->format = AV_SAMPLE_FMT_NONE; + atempo->tempo = 1.0; + atempo->state = YAE_LOAD_FRAGMENT; + + return args ? yae_set_tempo(ctx, args) : 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ATempoContext *atempo = ctx->priv; + yae_release_buffers(atempo); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterChannelLayouts *layouts = NULL; + AVFilterFormats *formats = NULL; + + // WSOLA necessitates an internal sliding window ring buffer + // for incoming audio stream. + // + // Planar sample formats are too cumbersome to store in a ring buffer, + // therefore planar sample formats are not supported. + // + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_U8, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S32, + AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) { + return AVERROR(ENOMEM); + } + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) { + return AVERROR(ENOMEM); + } + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) { + return AVERROR(ENOMEM); + } + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + ATempoContext *atempo = ctx->priv; + + enum AVSampleFormat format = inlink->format; + int sample_rate = (int)inlink->sample_rate; + int channels = av_get_channel_layout_nb_channels(inlink->channel_layout); + + return yae_reset(atempo, format, sample_rate, channels); +} + +static int push_samples(ATempoContext *atempo, + AVFilterLink *outlink, + int n_out) +{ + int ret; + + atempo->dst_buffer->sample_rate = outlink->sample_rate; + atempo->dst_buffer->nb_samples = n_out; + + // adjust the PTS: + atempo->dst_buffer->pts = + av_rescale_q(atempo->nsamples_out, + (AVRational){ 1, outlink->sample_rate }, + outlink->time_base); + + ret = ff_filter_frame(outlink, atempo->dst_buffer); + if (ret < 0) + return ret; + atempo->dst_buffer = NULL; + atempo->dst = NULL; + atempo->dst_end = NULL; + + atempo->nsamples_out += n_out; + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer) +{ + AVFilterContext *ctx = inlink->dst; + ATempoContext *atempo = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + + int ret = 0; + int n_in = src_buffer->nb_samples; + int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo); + + const uint8_t *src = src_buffer->data[0]; + const uint8_t *src_end = src + n_in * atempo->stride; + + while (src < src_end) { + if (!atempo->dst_buffer) { + atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out); + av_frame_copy_props(atempo->dst_buffer, src_buffer); + + atempo->dst = atempo->dst_buffer->data[0]; + atempo->dst_end = atempo->dst + n_out * atempo->stride; + } + + yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end); + + if (atempo->dst == atempo->dst_end) { + ret = push_samples(atempo, outlink, n_out); + if (ret < 0) + goto end; + atempo->request_fulfilled = 1; + } + } + + atempo->nsamples_in += n_in; +end: + av_frame_free(&src_buffer); + return ret; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ATempoContext *atempo = ctx->priv; + int ret; + + atempo->request_fulfilled = 0; + do { + ret = ff_request_frame(ctx->inputs[0]); + } + while (!atempo->request_fulfilled && ret >= 0); + + if (ret == AVERROR_EOF) { + // flush the filter: + int n_max = atempo->ring; + int n_out; + int err = AVERROR(EAGAIN); + + while (err == AVERROR(EAGAIN)) { + if (!atempo->dst_buffer) { + atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max); + + atempo->dst = atempo->dst_buffer->data[0]; + atempo->dst_end = atempo->dst + n_max * atempo->stride; + } + + err = yae_flush(atempo, &atempo->dst, atempo->dst_end); + + n_out = ((atempo->dst - atempo->dst_buffer->data[0]) / + atempo->stride); + + if (n_out) { + ret = push_samples(atempo, outlink, n_out); + } + } + + av_frame_free(&atempo->dst_buffer); + atempo->dst = NULL; + atempo->dst_end = NULL; + + return AVERROR_EOF; + } + + return ret; +} + +static int process_command(AVFilterContext *ctx, + const char *cmd, + const char *arg, + char *res, + int res_len, + int flags) +{ + return !strcmp(cmd, "tempo") ? yae_set_tempo(ctx, arg) : AVERROR(ENOSYS); +} + +static const AVFilterPad atempo_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .config_props = config_props, + }, + { NULL } +}; + +static const AVFilterPad atempo_outputs[] = { + { + .name = "default", + .request_frame = request_frame, + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_atempo = { + .name = "atempo", + .description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .process_command = process_command, + .priv_size = sizeof(ATempoContext), + .inputs = atempo_inputs, + .outputs = atempo_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_biquads.c b/ffmpeg1/libavfilter/af_biquads.c new file mode 100644 index 0000000..0bd61fd --- /dev/null +++ b/ffmpeg1/libavfilter/af_biquads.c @@ -0,0 +1,627 @@ +/* + * Copyright (c) 2013 Paul B Mahol + * Copyright (c) 2006-2008 Rob Sykes + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * 2-pole filters designed by Robert Bristow-Johnson + * see http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt + * + * 1-pole filters based on code (c) 2000 Chris Bagwell + * Algorithms: Recursive single pole low/high pass filter + * Reference: The Scientist and Engineer's Guide to Digital Signal Processing + * + * low-pass: output[N] = input[N] * A + output[N-1] * B + * X = exp(-2.0 * pi * Fc) + * A = 1 - X + * B = X + * Fc = cutoff freq / sample rate + * + * Mimics an RC low-pass filter: + * + * ---/\/\/\/\-----------> + * | + * --- C + * --- + * | + * | + * V + * + * high-pass: output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1] + * X = exp(-2.0 * pi * Fc) + * A0 = (1 + X) / 2 + * A1 = -(1 + X) / 2 + * B1 = X + * Fc = cutoff freq / sample rate + * + * Mimics an RC high-pass filter: + * + * || C + * ----||---------> + * || | + * < + * > R + * < + * | + * V + */ + +#include "libavutil/opt.h" +#include "libavutil/avassert.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +enum FilterType { + biquad, + equalizer, + bass, + treble, + band, + bandpass, + bandreject, + allpass, + highpass, + lowpass, +}; + +enum WidthType { + NONE, + HZ, + OCTAVE, + QFACTOR, + SLOPE, +}; + +typedef struct ChanCache { + double i1, i2; + double o1, o2; +} ChanCache; + +typedef struct { + const AVClass *class; + + enum FilterType filter_type; + enum WidthType width_type; + int poles; + int csg; + + double gain; + double frequency; + double width; + + double a0, a1, a2; + double b0, b1, b2; + + ChanCache *cache; + + void (*filter)(const void *ibuf, void *obuf, int len, + double *i1, double *i2, double *o1, double *o2, + double b0, double b1, double b2, double a1, double a2); +} BiquadsContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + BiquadsContext *p = ctx->priv; + int ret; + + av_opt_set_defaults(p); + + if ((ret = av_set_options_string(p, args, "=", ":")) < 0) + return ret; + + if (p->filter_type != biquad) { + if (p->frequency <= 0 || p->width <= 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frequency %f and/or width %f <= 0\n", + p->frequency, p->width); + return AVERROR(EINVAL); + } + } + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +#define BIQUAD_FILTER(name, type, min, max) \ +static void biquad_## name (const void *input, void *output, int len, \ + double *in1, double *in2, \ + double *out1, double *out2, \ + double b0, double b1, double b2, \ + double a1, double a2) \ +{ \ + const type *ibuf = input; \ + type *obuf = output; \ + double i1 = *in1; \ + double i2 = *in2; \ + double o1 = *out1; \ + double o2 = *out2; \ + int i; \ + a1 = -a1; \ + a2 = -a2; \ + \ + for (i = 0; i+1 < len; i++) { \ + o2 = i2 * b2 + i1 * b1 + ibuf[i] * b0 + o2 * a2 + o1 * a1; \ + i2 = ibuf[i]; \ + if (o2 < min) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = min; \ + } else if (o2 > max) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = max; \ + } else { \ + obuf[i] = o2; \ + } \ + i++; \ + o1 = i1 * b2 + i2 * b1 + ibuf[i] * b0 + o1 * a2 + o2 * a1; \ + i1 = ibuf[i]; \ + if (o1 < min) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = min; \ + } else if (o1 > max) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = max; \ + } else { \ + obuf[i] = o1; \ + } \ + } \ + if (i < len) { \ + double o0 = ibuf[i] * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \ + i2 = i1; \ + i1 = ibuf[i]; \ + o2 = o1; \ + o1 = o0; \ + if (o0 < min) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = min; \ + } else if (o0 > max) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = max; \ + } else { \ + obuf[i] = o0; \ + } \ + } \ + *in1 = i1; \ + *in2 = i2; \ + *out1 = o1; \ + *out2 = o2; \ +} + +BIQUAD_FILTER(s16, int16_t, INT16_MIN, INT16_MAX) +BIQUAD_FILTER(s32, int32_t, INT32_MIN, INT32_MAX) +BIQUAD_FILTER(flt, float, -1., 1.) +BIQUAD_FILTER(dbl, double, -1., 1.) + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + BiquadsContext *p = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + double A = exp(p->gain / 40 * log(10.)); + double w0 = 2 * M_PI * p->frequency / inlink->sample_rate; + double alpha; + + if (w0 > M_PI) { + av_log(ctx, AV_LOG_ERROR, + "Invalid frequency %f. Frequency must be less than half the sample-rate %d.\n", + p->frequency, inlink->sample_rate); + return AVERROR(EINVAL); + } + + switch (p->width_type) { + case NONE: + alpha = 0.0; + break; + case HZ: + alpha = sin(w0) / (2 * p->frequency / p->width); + break; + case OCTAVE: + alpha = sin(w0) * sinh(log(2.) / 2 * p->width * w0 / sin(w0)); + break; + case QFACTOR: + alpha = sin(w0) / (2 * p->width); + break; + case SLOPE: + alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / p->width - 1) + 2); + break; + default: + av_assert0(0); + } + + switch (p->filter_type) { + case biquad: + break; + case equalizer: + p->a0 = 1 + alpha / A; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha / A; + p->b0 = 1 + alpha * A; + p->b1 = -2 * cos(w0); + p->b2 = 1 - alpha * A; + break; + case bass: + p->a0 = (A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha; + p->a1 = -2 * ((A - 1) + (A + 1) * cos(w0)); + p->a2 = (A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha; + p->b0 = A * ((A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha); + p->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0)); + p->b2 = A * ((A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha); + break; + case treble: + p->a0 = (A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha; + p->a1 = 2 * ((A - 1) - (A + 1) * cos(w0)); + p->a2 = (A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha; + p->b0 = A * ((A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha); + p->b1 =-2 * A * ((A - 1) + (A + 1) * cos(w0)); + p->b2 = A * ((A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha); + break; + case bandpass: + if (p->csg) { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = sin(w0) / 2; + p->b1 = 0; + p->b2 = -sin(w0) / 2; + } else { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = alpha; + p->b1 = 0; + p->b2 = -alpha; + } + break; + case bandreject: + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = 1; + p->b1 = -2 * cos(w0); + p->b2 = 1; + break; + case lowpass: + if (p->poles == 1) { + p->a0 = 1; + p->a1 = -exp(-w0); + p->a2 = 0; + p->b0 = 1 + p->a1; + p->b1 = 0; + p->b2 = 0; + } else { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = (1 - cos(w0)) / 2; + p->b1 = 1 - cos(w0); + p->b2 = (1 - cos(w0)) / 2; + } + break; + case highpass: + if (p->poles == 1) { + p->a0 = 1; + p->a1 = -exp(-w0); + p->a2 = 0; + p->b0 = (1 - p->a1) / 2; + p->b1 = -p->b0; + p->b2 = 0; + } else { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = (1 + cos(w0)) / 2; + p->b1 = -(1 + cos(w0)); + p->b2 = (1 + cos(w0)) / 2; + } + break; + case allpass: + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = 1 - alpha; + p->b1 = -2 * cos(w0); + p->b2 = 1 + alpha; + break; + default: + av_assert0(0); + } + + p->a1 /= p->a0; + p->a2 /= p->a0; + p->b0 /= p->a0; + p->b1 /= p->a0; + p->b2 /= p->a0; + + p->cache = av_realloc_f(p->cache, sizeof(ChanCache), inlink->channels); + if (!p->cache) + return AVERROR(ENOMEM); + memset(p->cache, 0, sizeof(ChanCache) * inlink->channels); + + switch (inlink->format) { + case AV_SAMPLE_FMT_S16P: p->filter = biquad_s16; break; + case AV_SAMPLE_FMT_S32P: p->filter = biquad_s32; break; + case AV_SAMPLE_FMT_FLTP: p->filter = biquad_flt; break; + case AV_SAMPLE_FMT_DBLP: p->filter = biquad_dbl; break; + default: av_assert0(0); + } + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + BiquadsContext *p = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFrame *out_buf; + int nb_samples = buf->nb_samples; + int ch; + + if (av_frame_is_writable(buf)) { + out_buf = buf; + } else { + out_buf = ff_get_audio_buffer(inlink, nb_samples); + if (!out_buf) + return AVERROR(ENOMEM); + out_buf->pts = buf->pts; + } + + for (ch = 0; ch < av_frame_get_channels(buf); ch++) + p->filter(buf->extended_data[ch], + out_buf->extended_data[ch], nb_samples, + &p->cache[ch].i1, &p->cache[ch].i2, + &p->cache[ch].o1, &p->cache[ch].o2, + p->b0, p->b1, p->b2, p->a1, p->a2); + + if (buf != out_buf) + av_frame_free(&buf); + + return ff_filter_frame(outlink, out_buf); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + BiquadsContext *p = ctx->priv; + + av_freep(&p->cache); + av_opt_free(p); +} + +static const AVFilterPad inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +#define OFFSET(x) offsetof(BiquadsContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +#define DEFINE_BIQUAD_FILTER(name_, description_) \ +AVFILTER_DEFINE_CLASS(name_); \ +static av_cold int name_##_init(AVFilterContext *ctx, const char *args) \ +{ \ + BiquadsContext *p = ctx->priv; \ + p->class = &name_##_class; \ + p->filter_type = name_; \ + return init(ctx, args); \ +} \ + \ +AVFilter avfilter_af_##name_ = { \ + .name = #name_, \ + .description = NULL_IF_CONFIG_SMALL(description_), \ + .priv_size = sizeof(BiquadsContext), \ + .init = name_##_init, \ + .uninit = uninit, \ + .query_formats = query_formats, \ + .inputs = inputs, \ + .outputs = outputs, \ + .priv_class = &name_##_class, \ +} + +#if CONFIG_EQUALIZER_FILTER +static const AVOption equalizer_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS}, + {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS}, + {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter."); +#endif /* CONFIG_EQUALIZER_FILTER */ +#if CONFIG_BASS_FILTER +static const AVOption bass_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies."); +#endif /* CONFIG_BASS_FILTER */ +#if CONFIG_TREBLE_FILTER +static const AVOption treble_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies."); +#endif /* CONFIG_TREBLE_FILTER */ +#if CONFIG_BANDPASS_FILTER +static const AVOption bandpass_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter."); +#endif /* CONFIG_BANDPASS_FILTER */ +#if CONFIG_BANDREJECT_FILTER +static const AVOption bandreject_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter."); +#endif /* CONFIG_BANDREJECT_FILTER */ +#if CONFIG_LOWPASS_FILTER +static const AVOption lowpass_options[] = { + {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS}, + {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency."); +#endif /* CONFIG_LOWPASS_FILTER */ +#if CONFIG_HIGHPASS_FILTER +static const AVOption highpass_options[] = { + {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency."); +#endif /* CONFIG_HIGHPASS_FILTER */ +#if CONFIG_ALLPASS_FILTER +static const AVOption allpass_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HZ}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS}, + {"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter."); +#endif /* CONFIG_ALLPASS_FILTER */ +#if CONFIG_BIQUAD_FILTER +static const AVOption biquad_options[] = { + {"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients."); +#endif /* CONFIG_BIQUAD_FILTER */ diff --git a/ffmpeg1/libavfilter/af_channelmap.c b/ffmpeg1/libavfilter/af_channelmap.c new file mode 100644 index 0000000..e73c4bc --- /dev/null +++ b/ffmpeg1/libavfilter/af_channelmap.c @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2012 Google, Inc. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio channel mapping filter + */ + +#include + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +struct ChannelMap { + uint64_t in_channel; + uint64_t out_channel; + int in_channel_idx; + int out_channel_idx; +}; + +enum MappingMode { + MAP_NONE, + MAP_ONE_INT, + MAP_ONE_STR, + MAP_PAIR_INT_INT, + MAP_PAIR_INT_STR, + MAP_PAIR_STR_INT, + MAP_PAIR_STR_STR +}; + +#define MAX_CH 64 +typedef struct ChannelMapContext { + const AVClass *class; + AVFilterChannelLayouts *channel_layouts; + char *mapping_str; + char *channel_layout_str; + uint64_t output_layout; + struct ChannelMap map[MAX_CH]; + int nch; + enum MappingMode mode; +} ChannelMapContext; + +#define OFFSET(x) offsetof(ChannelMapContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption options[] = { + { "map", "A comma-separated list of input channel numbers in output order.", + OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "channel_layout", "Output channel layout.", + OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { NULL }, +}; + +static const AVClass channelmap_class = { + .class_name = "channel map filter", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static char* split(char *message, char delim) { + char *next = strchr(message, delim); + if (next) + *next++ = '\0'; + return next; +} + +static int get_channel_idx(char **map, int *ch, char delim, int max_ch) +{ + char *next = split(*map, delim); + int len; + int n = 0; + if (!next && delim == '-') + return AVERROR(EINVAL); + len = strlen(*map); + sscanf(*map, "%d%n", ch, &n); + if (n != len) + return AVERROR(EINVAL); + if (*ch < 0 || *ch > max_ch) + return AVERROR(EINVAL); + *map = next; + return 0; +} + +static int get_channel(char **map, uint64_t *ch, char delim) +{ + char *next = split(*map, delim); + if (!next && delim == '-') + return AVERROR(EINVAL); + *ch = av_get_channel_layout(*map); + if (av_get_channel_layout_nb_channels(*ch) != 1) + return AVERROR(EINVAL); + *map = next; + return 0; +} + +static av_cold int channelmap_init(AVFilterContext *ctx, const char *args) +{ + ChannelMapContext *s = ctx->priv; + int ret; + char *mapping; + int map_entries = 0; + char buf[256]; + enum MappingMode mode; + uint64_t out_ch_mask = 0; + int i; + + if (!args) { + av_log(ctx, AV_LOG_ERROR, "No parameters supplied.\n"); + return AVERROR(EINVAL); + } + + s->class = &channelmap_class; + av_opt_set_defaults(s); + + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) + return ret; + + mapping = s->mapping_str; + + if (!mapping) { + mode = MAP_NONE; + } else { + char *dash = strchr(mapping, '-'); + if (!dash) { // short mapping + if (av_isdigit(*mapping)) + mode = MAP_ONE_INT; + else + mode = MAP_ONE_STR; + } else if (av_isdigit(*mapping)) { + if (av_isdigit(*(dash+1))) + mode = MAP_PAIR_INT_INT; + else + mode = MAP_PAIR_INT_STR; + } else { + if (av_isdigit(*(dash+1))) + mode = MAP_PAIR_STR_INT; + else + mode = MAP_PAIR_STR_STR; + } + } + + if (mode != MAP_NONE) { + char *comma = mapping; + map_entries = 1; + while ((comma = strchr(comma, ','))) { + if (*++comma) // Allow trailing comma + map_entries++; + } + } + + if (map_entries > MAX_CH) { + av_log(ctx, AV_LOG_ERROR, "Too many channels mapped: '%d'.\n", map_entries); + ret = AVERROR(EINVAL); + goto fail; + } + + for (i = 0; i < map_entries; i++) { + int in_ch_idx = -1, out_ch_idx = -1; + uint64_t in_ch = 0, out_ch = 0; + static const char err[] = "Failed to parse channel map\n"; + switch (mode) { + case MAP_ONE_INT: + if (get_channel_idx(&mapping, &in_ch_idx, ',', MAX_CH) < 0) { + ret = AVERROR(EINVAL); + av_log(ctx, AV_LOG_ERROR, err); + goto fail; + } + s->map[i].in_channel_idx = in_ch_idx; + s->map[i].out_channel_idx = i; + break; + case MAP_ONE_STR: + if (!get_channel(&mapping, &in_ch, ',')) { + av_log(ctx, AV_LOG_ERROR, err); + ret = AVERROR(EINVAL); + goto fail; + } + s->map[i].in_channel = in_ch; + s->map[i].out_channel_idx = i; + break; + case MAP_PAIR_INT_INT: + if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 || + get_channel_idx(&mapping, &out_ch_idx, ',', MAX_CH) < 0) { + av_log(ctx, AV_LOG_ERROR, err); + ret = AVERROR(EINVAL); + goto fail; + } + s->map[i].in_channel_idx = in_ch_idx; + s->map[i].out_channel_idx = out_ch_idx; + break; + case MAP_PAIR_INT_STR: + if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 || + get_channel(&mapping, &out_ch, ',') < 0 || + out_ch & out_ch_mask) { + av_log(ctx, AV_LOG_ERROR, err); + ret = AVERROR(EINVAL); + goto fail; + } + s->map[i].in_channel_idx = in_ch_idx; + s->map[i].out_channel = out_ch; + out_ch_mask |= out_ch; + break; + case MAP_PAIR_STR_INT: + if (get_channel(&mapping, &in_ch, '-') < 0 || + get_channel_idx(&mapping, &out_ch_idx, ',', MAX_CH) < 0) { + av_log(ctx, AV_LOG_ERROR, err); + ret = AVERROR(EINVAL); + goto fail; + } + s->map[i].in_channel = in_ch; + s->map[i].out_channel_idx = out_ch_idx; + break; + case MAP_PAIR_STR_STR: + if (get_channel(&mapping, &in_ch, '-') < 0 || + get_channel(&mapping, &out_ch, ',') < 0 || + out_ch & out_ch_mask) { + av_log(ctx, AV_LOG_ERROR, err); + ret = AVERROR(EINVAL); + goto fail; + } + s->map[i].in_channel = in_ch; + s->map[i].out_channel = out_ch; + out_ch_mask |= out_ch; + break; + } + } + s->mode = mode; + s->nch = map_entries; + s->output_layout = out_ch_mask ? out_ch_mask : + av_get_default_channel_layout(map_entries); + + if (s->channel_layout_str) { + uint64_t fmt; + if ((fmt = av_get_channel_layout(s->channel_layout_str)) == 0) { + av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout: '%s'.\n", + s->channel_layout_str); + ret = AVERROR(EINVAL); + goto fail; + } + if (mode == MAP_NONE) { + int i; + s->nch = av_get_channel_layout_nb_channels(fmt); + for (i = 0; i < s->nch; i++) { + s->map[i].in_channel_idx = i; + s->map[i].out_channel_idx = i; + } + } else if (out_ch_mask && out_ch_mask != fmt) { + av_get_channel_layout_string(buf, sizeof(buf), 0, out_ch_mask); + av_log(ctx, AV_LOG_ERROR, + "Output channel layout '%s' does not match the list of channel mapped: '%s'.\n", + s->channel_layout_str, buf); + ret = AVERROR(EINVAL); + goto fail; + } else if (s->nch != av_get_channel_layout_nb_channels(fmt)) { + av_log(ctx, AV_LOG_ERROR, + "Output channel layout %s does not match the number of channels mapped %d.\n", + s->channel_layout_str, s->nch); + ret = AVERROR(EINVAL); + goto fail; + } + s->output_layout = fmt; + } + ff_add_channel_layout(&s->channel_layouts, s->output_layout); + + if (mode == MAP_PAIR_INT_STR || mode == MAP_PAIR_STR_STR) { + for (i = 0; i < s->nch; i++) { + s->map[i].out_channel_idx = av_get_channel_layout_channel_index( + s->output_layout, s->map[i].out_channel); + } + } + +fail: + av_opt_free(s); + return ret; +} + +static int channelmap_query_formats(AVFilterContext *ctx) +{ + ChannelMapContext *s = ctx->priv; + + ff_set_common_formats(ctx, ff_planar_sample_fmts()); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + ff_channel_layouts_ref(ff_all_channel_layouts(), &ctx->inputs[0]->out_channel_layouts); + ff_channel_layouts_ref(s->channel_layouts, &ctx->outputs[0]->in_channel_layouts); + + return 0; +} + +static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + const ChannelMapContext *s = ctx->priv; + const int nch_in = av_get_channel_layout_nb_channels(inlink->channel_layout); + const int nch_out = s->nch; + int ch; + uint8_t *source_planes[MAX_CH]; + + memcpy(source_planes, buf->extended_data, + nch_in * sizeof(source_planes[0])); + + if (nch_out > nch_in) { + if (nch_out > FF_ARRAY_ELEMS(buf->data)) { + uint8_t **new_extended_data = + av_mallocz(nch_out * sizeof(*buf->extended_data)); + if (!new_extended_data) { + av_frame_free(&buf); + return AVERROR(ENOMEM); + } + if (buf->extended_data == buf->data) { + buf->extended_data = new_extended_data; + } else { + av_free(buf->extended_data); + buf->extended_data = new_extended_data; + } + } else if (buf->extended_data != buf->data) { + av_free(buf->extended_data); + buf->extended_data = buf->data; + } + } + + for (ch = 0; ch < nch_out; ch++) { + buf->extended_data[s->map[ch].out_channel_idx] = + source_planes[s->map[ch].in_channel_idx]; + } + + if (buf->data != buf->extended_data) + memcpy(buf->data, buf->extended_data, + FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0])); + + return ff_filter_frame(outlink, buf); +} + +static int channelmap_config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + ChannelMapContext *s = ctx->priv; + int i, err = 0; + const char *channel_name; + char layout_name[256]; + + if (s->mode == MAP_PAIR_STR_INT || s->mode == MAP_PAIR_STR_STR) { + for (i = 0; i < s->nch; i++) { + s->map[i].in_channel_idx = av_get_channel_layout_channel_index( + inlink->channel_layout, s->map[i].in_channel); + if (s->map[i].in_channel_idx < 0) { + channel_name = av_get_channel_name(s->map[i].in_channel); + av_get_channel_layout_string(layout_name, sizeof(layout_name), + 0, inlink->channel_layout); + av_log(ctx, AV_LOG_ERROR, + "input channel '%s' not available from input layout '%s'\n", + channel_name, layout_name); + err = AVERROR(EINVAL); + } + } + } + + return err; +} + +static const AVFilterPad avfilter_af_channelmap_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = channelmap_filter_frame, + .config_props = channelmap_config_input, + .needs_writable = 1, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_channelmap_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO + }, + { NULL } +}; + +AVFilter avfilter_af_channelmap = { + .name = "channelmap", + .description = NULL_IF_CONFIG_SMALL("Remap audio channels."), + .init = channelmap_init, + .query_formats = channelmap_query_formats, + .priv_size = sizeof(ChannelMapContext), + + .inputs = avfilter_af_channelmap_inputs, + .outputs = avfilter_af_channelmap_outputs, + .priv_class = &channelmap_class, +}; diff --git a/ffmpeg1/libavfilter/af_channelsplit.c b/ffmpeg1/libavfilter/af_channelsplit.c new file mode 100644 index 0000000..9bcdc54 --- /dev/null +++ b/ffmpeg1/libavfilter/af_channelsplit.c @@ -0,0 +1,153 @@ +/* + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Channel split filter + * + * Split an audio stream into per-channel streams. + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/internal.h" +#include "libavutil/opt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +typedef struct ChannelSplitContext { + const AVClass *class; + + uint64_t channel_layout; + char *channel_layout_str; +} ChannelSplitContext; + +#define OFFSET(x) offsetof(ChannelSplitContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption channelsplit_options[] = { + { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(channelsplit); + +static int init(AVFilterContext *ctx, const char *arg) +{ + ChannelSplitContext *s = ctx->priv; + int nb_channels; + int ret = 0, i; + + s->class = &channelsplit_class; + av_opt_set_defaults(s); + if ((ret = av_set_options_string(s, arg, "=", ":")) < 0) + return ret; + if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) { + av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n", + s->channel_layout_str); + ret = AVERROR(EINVAL); + goto fail; + } + + nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); + for (i = 0; i < nb_channels; i++) { + uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i); + AVFilterPad pad = { 0 }; + + pad.type = AVMEDIA_TYPE_AUDIO; + pad.name = av_get_channel_name(channel); + + ff_insert_outpad(ctx, i, &pad); + } + +fail: + av_opt_free(s); + return ret; +} + +static int query_formats(AVFilterContext *ctx) +{ + ChannelSplitContext *s = ctx->priv; + AVFilterChannelLayouts *in_layouts = NULL; + int i; + + ff_set_common_formats (ctx, ff_planar_sample_fmts()); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + + ff_add_channel_layout(&in_layouts, s->channel_layout); + ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts); + + for (i = 0; i < ctx->nb_outputs; i++) { + AVFilterChannelLayouts *out_layouts = NULL; + uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i); + + ff_add_channel_layout(&out_layouts, channel); + ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts); + } + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AVFilterContext *ctx = inlink->dst; + int i, ret = 0; + + for (i = 0; i < ctx->nb_outputs; i++) { + AVFrame *buf_out = av_frame_clone(buf); + + if (!buf_out) { + ret = AVERROR(ENOMEM); + break; + } + + buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i]; + buf_out->channel_layout = + av_channel_layout_extract_channel(buf->channel_layout, i); + + ret = ff_filter_frame(ctx->outputs[i], buf_out); + if (ret < 0) + break; + } + av_frame_free(&buf); + return ret; +} + +static const AVFilterPad avfilter_af_channelsplit_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_channelsplit = { + .name = "channelsplit", + .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams"), + .priv_size = sizeof(ChannelSplitContext), + + .init = init, + .query_formats = query_formats, + + .inputs = avfilter_af_channelsplit_inputs, + .outputs = NULL, + .priv_class = &channelsplit_class, +}; diff --git a/ffmpeg1/libavfilter/af_earwax.c b/ffmpeg1/libavfilter/af_earwax.c new file mode 100644 index 0000000..b1d3d6f --- /dev/null +++ b/ffmpeg1/libavfilter/af_earwax.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2011 Mina Nagy Zaki + * Copyright (c) 2000 Edward Beingessner And Sundry Contributors. + * This source code is freely redistributable and may be used for any purpose. + * This copyright notice must be maintained. Edward Beingessner And Sundry + * Contributors are not responsible for the consequences of using this + * software. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Stereo Widening Effect. Adds audio cues to move stereo image in + * front of the listener. Adapted from the libsox earwax effect. + */ + +#include "libavutil/channel_layout.h" +#include "avfilter.h" +#include "audio.h" +#include "formats.h" + +#define NUMTAPS 64 + +static const int8_t filt[NUMTAPS] = { +/* 30° 330° */ + 4, -6, /* 32 tap stereo FIR filter. */ + 4, -11, /* One side filters as if the */ + -1, -5, /* signal was from 30 degrees */ + 3, 3, /* from the ear, the other as */ + -2, 5, /* if 330 degrees. */ + -5, 0, + 9, 1, + 6, 3, /* Input */ + -4, -1, /* Left Right */ + -5, -3, /* __________ __________ */ + -2, -5, /* | | | | */ + -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */ + 6, -7, /* / |__________| |__________| \ */ + 30, -29, /* / \ / \ */ + 12, -3, /* / X \ */ + -11, 4, /* / / \ \ */ + -3, 7, /* ____V_____ __________V V__________ _____V____ */ + -20, 23, /* | | | | | | | | */ + 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */ + 1, -6, /* |__________| |__________| |__________| |__________| */ + -14, -5, /* \ ___ / \ ___ / */ + 15, -18, /* \ / \ / _____ \ / \ / */ + 6, 7, /* `->| + |<--' / \ `-->| + |<-' */ + 15, -10, /* \___/ _/ \_ \___/ */ + -14, 22, /* \ / \ / \ / */ + -7, -2, /* `--->| | | |<---' */ + -4, 9, /* \_/ \_/ */ + 6, -12, /* */ + 6, -6, /* Headphones */ + 0, -11, + 0, -5, + 4, 0}; + +typedef struct { + int16_t taps[NUMTAPS * 2]; +} EarwaxContext; + +static int query_formats(AVFilterContext *ctx) +{ + static const int sample_rates[] = { 44100, -1 }; + + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layout = NULL; + + ff_add_format(&formats, AV_SAMPLE_FMT_S16); + ff_set_common_formats(ctx, formats); + ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO); + ff_set_common_channel_layouts(ctx, layout); + ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates)); + + return 0; +} + +//FIXME: replace with DSPContext.scalarproduct_int16 +static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out) +{ + int32_t sample; + int16_t j; + + while (in < endin) { + sample = 32; + for (j = 0; j < NUMTAPS; j++) + sample += in[j] * filt[j]; + *out = sample >> 6; + out++; + in++; + } + + return out; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterLink *outlink = inlink->dst->outputs[0]; + int16_t *taps, *endin, *in, *out; + AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples); + int ret; + + if (!outsamples) { + av_frame_free(&insamples); + return AVERROR(ENOMEM); + } + av_frame_copy_props(outsamples, insamples); + + taps = ((EarwaxContext *)inlink->dst->priv)->taps; + out = (int16_t *)outsamples->data[0]; + in = (int16_t *)insamples ->data[0]; + + // copy part of new input and process with saved input + memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps)); + out = scalarproduct(taps, taps + NUMTAPS, out); + + // process current input + endin = in + insamples->nb_samples * 2 - NUMTAPS; + scalarproduct(in, endin, out); + + // save part of input for next round + memcpy(taps, endin, NUMTAPS * sizeof(*taps)); + + ret = ff_filter_frame(outlink, outsamples); + av_frame_free(&insamples); + return ret; +} + +static const AVFilterPad earwax_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad earwax_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_earwax = { + .name = "earwax", + .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."), + .query_formats = query_formats, + .priv_size = sizeof(EarwaxContext), + .inputs = earwax_inputs, + .outputs = earwax_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_join.c b/ffmpeg1/libavfilter/af_join.c new file mode 100644 index 0000000..8dffda0 --- /dev/null +++ b/ffmpeg1/libavfilter/af_join.c @@ -0,0 +1,522 @@ +/* + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio join filter + * + * Join multiple audio inputs as different channels in + * a single output + */ + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/opt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +typedef struct ChannelMap { + int input; ///< input stream index + int in_channel_idx; ///< index of in_channel in the input stream data + uint64_t in_channel; ///< layout describing the input channel + uint64_t out_channel; ///< layout describing the output channel +} ChannelMap; + +typedef struct JoinContext { + const AVClass *class; + + int inputs; + char *map; + char *channel_layout_str; + uint64_t channel_layout; + + int nb_channels; + ChannelMap *channels; + + /** + * Temporary storage for input frames, until we get one on each input. + */ + AVFrame **input_frames; + + /** + * Temporary storage for buffer references, for assembling the output frame. + */ + AVBufferRef **buffers; +} JoinContext; + +#define OFFSET(x) offsetof(JoinContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption join_options[] = { + { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F }, + { "channel_layout", "Channel layout of the " + "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F }, + { "map", "A comma-separated list of channels maps in the format " + "'input_stream.input_channel-output_channel.", + OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F }, + { NULL }, +}; + +static const AVClass join_class = { + .class_name = "join filter", + .item_name = av_default_item_name, + .option = join_options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static int filter_frame(AVFilterLink *link, AVFrame *frame) +{ + AVFilterContext *ctx = link->dst; + JoinContext *s = ctx->priv; + int i; + + for (i = 0; i < ctx->nb_inputs; i++) + if (link == ctx->inputs[i]) + break; + av_assert0(i < ctx->nb_inputs); + av_assert0(!s->input_frames[i]); + s->input_frames[i] = frame; + + return 0; +} + +static int parse_maps(AVFilterContext *ctx) +{ + JoinContext *s = ctx->priv; + char *cur = s->map; + + while (cur && *cur) { + char *sep, *next, *p; + uint64_t in_channel = 0, out_channel = 0; + int input_idx, out_ch_idx, in_ch_idx; + + next = strchr(cur, ','); + if (next) + *next++ = 0; + + /* split the map into input and output parts */ + if (!(sep = strchr(cur, '-'))) { + av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel " + "map '%s'\n", cur); + return AVERROR(EINVAL); + } + *sep++ = 0; + +#define PARSE_CHANNEL(str, var, inout) \ + if (!(var = av_get_channel_layout(str))) { \ + av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\ + return AVERROR(EINVAL); \ + } \ + if (av_get_channel_layout_nb_channels(var) != 1) { \ + av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \ + inout " channel.\n"); \ + return AVERROR(EINVAL); \ + } + + /* parse output channel */ + PARSE_CHANNEL(sep, out_channel, "output"); + if (!(out_channel & s->channel_layout)) { + av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in " + "requested channel layout.\n", sep); + return AVERROR(EINVAL); + } + + out_ch_idx = av_get_channel_layout_channel_index(s->channel_layout, + out_channel); + if (s->channels[out_ch_idx].input >= 0) { + av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel " + "'%s'.\n", sep); + return AVERROR(EINVAL); + } + + /* parse input channel */ + input_idx = strtol(cur, &cur, 0); + if (input_idx < 0 || input_idx >= s->inputs) { + av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n", + input_idx); + return AVERROR(EINVAL); + } + + if (*cur) + cur++; + + in_ch_idx = strtol(cur, &p, 0); + if (p == cur) { + /* channel specifier is not a number, + * try to parse as channel name */ + PARSE_CHANNEL(cur, in_channel, "input"); + } + + s->channels[out_ch_idx].input = input_idx; + if (in_channel) + s->channels[out_ch_idx].in_channel = in_channel; + else + s->channels[out_ch_idx].in_channel_idx = in_ch_idx; + + cur = next; + } + return 0; +} + +static int join_init(AVFilterContext *ctx, const char *args) +{ + JoinContext *s = ctx->priv; + int ret, i; + + s->class = &join_class; + av_opt_set_defaults(s); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) + return ret; + + if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) { + av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n", + s->channel_layout_str); + ret = AVERROR(EINVAL); + goto fail; + } + + s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); + s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels); + s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels); + s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs); + if (!s->channels || !s->buffers|| !s->input_frames) { + ret = AVERROR(ENOMEM); + goto fail; + } + + for (i = 0; i < s->nb_channels; i++) { + s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i); + s->channels[i].input = -1; + } + + if ((ret = parse_maps(ctx)) < 0) + goto fail; + + for (i = 0; i < s->inputs; i++) { + char name[32]; + AVFilterPad pad = { 0 }; + + snprintf(name, sizeof(name), "input%d", i); + pad.type = AVMEDIA_TYPE_AUDIO; + pad.name = av_strdup(name); + pad.filter_frame = filter_frame; + + pad.needs_fifo = 1; + + ff_insert_inpad(ctx, i, &pad); + } + +fail: + av_opt_free(s); + return ret; +} + +static void join_uninit(AVFilterContext *ctx) +{ + JoinContext *s = ctx->priv; + int i; + + for (i = 0; i < ctx->nb_inputs; i++) { + av_freep(&ctx->input_pads[i].name); + av_frame_free(&s->input_frames[i]); + } + + av_freep(&s->channels); + av_freep(&s->buffers); + av_freep(&s->input_frames); +} + +static int join_query_formats(AVFilterContext *ctx) +{ + JoinContext *s = ctx->priv; + AVFilterChannelLayouts *layouts = NULL; + int i; + + ff_add_channel_layout(&layouts, s->channel_layout); + ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts); + + for (i = 0; i < ctx->nb_inputs; i++) + ff_channel_layouts_ref(ff_all_channel_layouts(), + &ctx->inputs[i]->out_channel_layouts); + + ff_set_common_formats (ctx, ff_planar_sample_fmts()); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + + return 0; +} + +static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch, + uint64_t *inputs) +{ + int i; + + for (i = 0; i < ctx->nb_inputs; i++) { + AVFilterLink *link = ctx->inputs[i]; + + if (ch->out_channel & link->channel_layout && + !(ch->out_channel & inputs[i])) { + ch->input = i; + ch->in_channel = ch->out_channel; + inputs[i] |= ch->out_channel; + return; + } + } +} + +static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch, + uint64_t *inputs) +{ + int i; + + for (i = 0; i < ctx->nb_inputs; i++) { + AVFilterLink *link = ctx->inputs[i]; + + if ((inputs[i] & link->channel_layout) != link->channel_layout) { + uint64_t unused = link->channel_layout & ~inputs[i]; + + ch->input = i; + ch->in_channel = av_channel_layout_extract_channel(unused, 0); + inputs[i] |= ch->in_channel; + return; + } + } +} + +static int join_config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + JoinContext *s = ctx->priv; + uint64_t *inputs; // nth element tracks which channels are used from nth input + int i, ret = 0; + + /* initialize inputs to user-specified mappings */ + if (!(inputs = av_mallocz(sizeof(*inputs) * ctx->nb_inputs))) + return AVERROR(ENOMEM); + for (i = 0; i < s->nb_channels; i++) { + ChannelMap *ch = &s->channels[i]; + AVFilterLink *inlink; + + if (ch->input < 0) + continue; + + inlink = ctx->inputs[ch->input]; + + if (!ch->in_channel) + ch->in_channel = av_channel_layout_extract_channel(inlink->channel_layout, + ch->in_channel_idx); + + if (!(ch->in_channel & inlink->channel_layout)) { + av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in " + "input stream #%d.\n", av_get_channel_name(ch->in_channel), + ch->input); + ret = AVERROR(EINVAL); + goto fail; + } + + inputs[ch->input] |= ch->in_channel; + } + + /* guess channel maps when not explicitly defined */ + /* first try unused matching channels */ + for (i = 0; i < s->nb_channels; i++) { + ChannelMap *ch = &s->channels[i]; + + if (ch->input < 0) + guess_map_matching(ctx, ch, inputs); + } + + /* if the above failed, try to find _any_ unused input channel */ + for (i = 0; i < s->nb_channels; i++) { + ChannelMap *ch = &s->channels[i]; + + if (ch->input < 0) + guess_map_any(ctx, ch, inputs); + + if (ch->input < 0) { + av_log(ctx, AV_LOG_ERROR, "Could not find input channel for " + "output channel '%s'.\n", + av_get_channel_name(ch->out_channel)); + goto fail; + } + + ch->in_channel_idx = av_get_channel_layout_channel_index(ctx->inputs[ch->input]->channel_layout, + ch->in_channel); + } + + /* print mappings */ + av_log(ctx, AV_LOG_VERBOSE, "mappings: "); + for (i = 0; i < s->nb_channels; i++) { + ChannelMap *ch = &s->channels[i]; + av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input, + av_get_channel_name(ch->in_channel), + av_get_channel_name(ch->out_channel)); + } + av_log(ctx, AV_LOG_VERBOSE, "\n"); + + for (i = 0; i < ctx->nb_inputs; i++) { + if (!inputs[i]) + av_log(ctx, AV_LOG_WARNING, "No channels are used from input " + "stream %d.\n", i); + } + +fail: + av_freep(&inputs); + return ret; +} + +static int join_request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + JoinContext *s = ctx->priv; + AVFrame *frame; + int linesize = INT_MAX; + int nb_samples = 0; + int nb_buffers = 0; + int i, j, ret; + + /* get a frame on each input */ + for (i = 0; i < ctx->nb_inputs; i++) { + AVFilterLink *inlink = ctx->inputs[i]; + + if (!s->input_frames[i] && + (ret = ff_request_frame(inlink)) < 0) + return ret; + + /* request the same number of samples on all inputs */ + if (i == 0) { + nb_samples = s->input_frames[0]->nb_samples; + + for (j = 1; !i && j < ctx->nb_inputs; j++) + ctx->inputs[j]->request_samples = nb_samples; + } + } + + /* setup the output frame */ + frame = av_frame_alloc(); + if (!frame) + return AVERROR(ENOMEM); + if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) { + frame->extended_data = av_mallocz(s->nb_channels * + sizeof(*frame->extended_data)); + if (!frame->extended_data) { + ret = AVERROR(ENOMEM); + goto fail; + } + } + + /* copy the data pointers */ + for (i = 0; i < s->nb_channels; i++) { + ChannelMap *ch = &s->channels[i]; + AVFrame *cur = s->input_frames[ch->input]; + AVBufferRef *buf; + + frame->extended_data[i] = cur->extended_data[ch->in_channel_idx]; + linesize = FFMIN(linesize, cur->linesize[0]); + + /* add the buffer where this plan is stored to the list if it's + * not already there */ + buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx); + if (!buf) { + ret = AVERROR(EINVAL); + goto fail; + } + for (j = 0; j < nb_buffers; j++) + if (s->buffers[j]->buffer == buf->buffer) + break; + if (j == i) + s->buffers[nb_buffers++] = buf; + } + + /* create references to the buffers we copied to output */ + if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) { + frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf); + frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * + frame->nb_extended_buf); + if (!frame->extended_buf) { + frame->nb_extended_buf = 0; + ret = AVERROR(ENOMEM); + goto fail; + } + } + for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) { + frame->buf[i] = av_buffer_ref(s->buffers[i]); + if (!frame->buf[i]) { + ret = AVERROR(ENOMEM); + goto fail; + } + } + for (i = 0; i < frame->nb_extended_buf; i++) { + frame->extended_buf[i] = av_buffer_ref(s->buffers[i + + FF_ARRAY_ELEMS(frame->buf)]); + if (!frame->extended_buf[i]) { + ret = AVERROR(ENOMEM); + goto fail; + } + } + + frame->nb_samples = nb_samples; + frame->channel_layout = outlink->channel_layout; + frame->sample_rate = outlink->sample_rate; + frame->pts = s->input_frames[0]->pts; + frame->linesize[0] = linesize; + if (frame->data != frame->extended_data) { + memcpy(frame->data, frame->extended_data, sizeof(*frame->data) * + FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels)); + } + + ret = ff_filter_frame(outlink, frame); + + for (i = 0; i < ctx->nb_inputs; i++) + av_frame_free(&s->input_frames[i]); + + return ret; + +fail: + av_frame_free(&frame); + return ret; +} + +static const AVFilterPad avfilter_af_join_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = join_config_output, + .request_frame = join_request_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_join = { + .name = "join", + .description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into " + "multi-channel output"), + .priv_size = sizeof(JoinContext), + + .init = join_init, + .uninit = join_uninit, + .query_formats = join_query_formats, + + .inputs = NULL, + .outputs = avfilter_af_join_outputs, + .priv_class = &join_class, +}; diff --git a/ffmpeg1/libavfilter/af_pan.c b/ffmpeg1/libavfilter/af_pan.c new file mode 100644 index 0000000..ae2e0aa --- /dev/null +++ b/ffmpeg1/libavfilter/af_pan.c @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2002 Anders Johansson + * Copyright (c) 2011 Clément BÅ“sch + * Copyright (c) 2011 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio panning filter (channels mixing) + * Original code written by Anders Johansson for MPlayer, + * reimplemented for FFmpeg. + */ + +#include +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libswresample/swresample.h" +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +#define MAX_CHANNELS 63 + +typedef struct PanContext { + int64_t out_channel_layout; + double gain[MAX_CHANNELS][MAX_CHANNELS]; + int64_t need_renorm; + int need_renumber; + int nb_input_channels; + int nb_output_channels; + + int pure_gains; + /* channel mapping specific */ + int channel_map[SWR_CH_MAX]; + struct SwrContext *swr; +} PanContext; + +static int parse_channel_name(char **arg, int *rchannel, int *rnamed) +{ + char buf[8]; + int len, i, channel_id = 0; + int64_t layout, layout0; + + /* try to parse a channel name, e.g. "FL" */ + if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) { + layout0 = layout = av_get_channel_layout(buf); + /* channel_id <- first set bit in layout */ + for (i = 32; i > 0; i >>= 1) { + if (layout >= (int64_t)1 << i) { + channel_id += i; + layout >>= i; + } + } + /* reject layouts that are not a single channel */ + if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id) + return AVERROR(EINVAL); + *rchannel = channel_id; + *rnamed = 1; + *arg += len; + return 0; + } + /* try to parse a channel number, e.g. "c2" */ + if (sscanf(*arg, "c%d%n", &channel_id, &len) && + channel_id >= 0 && channel_id < MAX_CHANNELS) { + *rchannel = channel_id; + *rnamed = 0; + *arg += len; + return 0; + } + return AVERROR(EINVAL); +} + +static void skip_spaces(char **arg) +{ + int len = 0; + + sscanf(*arg, " %n", &len); + *arg += len; +} + +static av_cold int init(AVFilterContext *ctx, const char *args0) +{ + PanContext *const pan = ctx->priv; + char *arg, *arg0, *tokenizer, *args = av_strdup(args0); + int out_ch_id, in_ch_id, len, named, ret; + int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels + double gain; + + if (!args0) { + av_log(ctx, AV_LOG_ERROR, + "pan filter needs a channel layout and a set " + "of channels definitions as parameter\n"); + return AVERROR(EINVAL); + } + if (!args) + return AVERROR(ENOMEM); + arg = av_strtok(args, ":", &tokenizer); + ret = ff_parse_channel_layout(&pan->out_channel_layout, arg, ctx); + if (ret < 0) + goto fail; + pan->nb_output_channels = av_get_channel_layout_nb_channels(pan->out_channel_layout); + + /* parse channel specifications */ + while ((arg = arg0 = av_strtok(NULL, ":", &tokenizer))) { + /* channel name */ + if (parse_channel_name(&arg, &out_ch_id, &named)) { + av_log(ctx, AV_LOG_ERROR, + "Expected out channel name, got \"%.8s\"\n", arg); + ret = AVERROR(EINVAL); + goto fail; + } + if (named) { + if (!((pan->out_channel_layout >> out_ch_id) & 1)) { + av_log(ctx, AV_LOG_ERROR, + "Channel \"%.8s\" does not exist in the chosen layout\n", arg0); + ret = AVERROR(EINVAL); + goto fail; + } + /* get the channel number in the output channel layout: + * out_channel_layout & ((1 << out_ch_id) - 1) are all the + * channels that come before out_ch_id, + * so their count is the index of out_ch_id */ + out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1)); + } + if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) { + av_log(ctx, AV_LOG_ERROR, + "Invalid out channel name \"%.8s\"\n", arg0); + ret = AVERROR(EINVAL); + goto fail; + } + skip_spaces(&arg); + if (*arg == '=') { + arg++; + } else if (*arg == '<') { + pan->need_renorm |= (int64_t)1 << out_ch_id; + arg++; + } else { + av_log(ctx, AV_LOG_ERROR, + "Syntax error after channel name in \"%.8s\"\n", arg0); + ret = AVERROR(EINVAL); + goto fail; + } + /* gains */ + while (1) { + gain = 1; + if (sscanf(arg, "%lf%n *%n", &gain, &len, &len)) + arg += len; + if (parse_channel_name(&arg, &in_ch_id, &named)){ + av_log(ctx, AV_LOG_ERROR, + "Expected in channel name, got \"%.8s\"\n", arg); + ret = AVERROR(EINVAL); + goto fail; + } + nb_in_channels[named]++; + if (nb_in_channels[!named]) { + av_log(ctx, AV_LOG_ERROR, + "Can not mix named and numbered channels\n"); + ret = AVERROR(EINVAL); + goto fail; + } + pan->gain[out_ch_id][in_ch_id] = gain; + skip_spaces(&arg); + if (!*arg) + break; + if (*arg != '+') { + av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg); + ret = AVERROR(EINVAL); + goto fail; + } + arg++; + } + } + pan->need_renumber = !!nb_in_channels[1]; + + ret = 0; +fail: + av_free(args); + return ret; +} + +static int are_gains_pure(const PanContext *pan) +{ + int i, j; + + for (i = 0; i < MAX_CHANNELS; i++) { + int nb_gain = 0; + + for (j = 0; j < MAX_CHANNELS; j++) { + double gain = pan->gain[i][j]; + + /* channel mapping is effective only if 0% or 100% of a channel is + * selected... */ + if (gain != 0. && gain != 1.) + return 0; + /* ...and if the output channel is only composed of one input */ + if (gain && nb_gain++) + return 0; + } + } + return 1; +} + +static int query_formats(AVFilterContext *ctx) +{ + PanContext *pan = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts; + + pan->pure_gains = are_gains_pure(pan); + /* libswr supports any sample and packing formats */ + ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO)); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + // inlink supports any channel layout + layouts = ff_all_channel_layouts(); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + // outlink supports only requested output channel layout + layouts = NULL; + ff_add_channel_layout(&layouts, pan->out_channel_layout); + ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts); + return 0; +} + +static int config_props(AVFilterLink *link) +{ + AVFilterContext *ctx = link->dst; + PanContext *pan = ctx->priv; + char buf[1024], *cur; + int i, j, k, r; + double t; + + pan->nb_input_channels = av_get_channel_layout_nb_channels(link->channel_layout); + if (pan->need_renumber) { + // input channels were given by their name: renumber them + for (i = j = 0; i < MAX_CHANNELS; i++) { + if ((link->channel_layout >> i) & 1) { + for (k = 0; k < pan->nb_output_channels; k++) + pan->gain[k][j] = pan->gain[k][i]; + j++; + } + } + } + + // sanity check; can't be done in query_formats since the inlink + // channel layout is unknown at that time + if (pan->nb_input_channels > SWR_CH_MAX || + pan->nb_output_channels > SWR_CH_MAX) { + av_log(ctx, AV_LOG_ERROR, + "libswresample support a maximum of %d channels. " + "Feel free to ask for a higher limit.\n", SWR_CH_MAX); + return AVERROR_PATCHWELCOME; + } + + // init libswresample context + pan->swr = swr_alloc_set_opts(pan->swr, + pan->out_channel_layout, link->format, link->sample_rate, + link->channel_layout, link->format, link->sample_rate, + 0, ctx); + if (!pan->swr) + return AVERROR(ENOMEM); + + // gains are pure, init the channel mapping + if (pan->pure_gains) { + + // get channel map from the pure gains + for (i = 0; i < pan->nb_output_channels; i++) { + int ch_id = -1; + for (j = 0; j < pan->nb_input_channels; j++) { + if (pan->gain[i][j]) { + ch_id = j; + break; + } + } + pan->channel_map[i] = ch_id; + } + + av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0); + av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0); + swr_set_channel_mapping(pan->swr, pan->channel_map); + } else { + // renormalize + for (i = 0; i < pan->nb_output_channels; i++) { + if (!((pan->need_renorm >> i) & 1)) + continue; + t = 0; + for (j = 0; j < pan->nb_input_channels; j++) + t += pan->gain[i][j]; + if (t > -1E-5 && t < 1E-5) { + // t is almost 0 but not exactly, this is probably a mistake + if (t) + av_log(ctx, AV_LOG_WARNING, + "Degenerate coefficients while renormalizing\n"); + continue; + } + for (j = 0; j < pan->nb_input_channels; j++) + pan->gain[i][j] /= t; + } + av_opt_set_int(pan->swr, "icl", link->channel_layout, 0); + av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0); + swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]); + } + + r = swr_init(pan->swr); + if (r < 0) + return r; + + // summary + for (i = 0; i < pan->nb_output_channels; i++) { + cur = buf; + for (j = 0; j < pan->nb_input_channels; j++) { + r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d", + j ? " + " : "", pan->gain[i][j], j); + cur += FFMIN(buf + sizeof(buf) - cur, r); + } + av_log(ctx, AV_LOG_VERBOSE, "o%d = %s\n", i, buf); + } + // add channel mapping summary if possible + if (pan->pure_gains) { + av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:"); + for (i = 0; i < pan->nb_output_channels; i++) + if (pan->channel_map[i] < 0) + av_log(ctx, AV_LOG_INFO, " M"); + else + av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]); + av_log(ctx, AV_LOG_INFO, "\n"); + return 0; + } + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + int ret; + int n = insamples->nb_samples; + AVFilterLink *const outlink = inlink->dst->outputs[0]; + AVFrame *outsamples = ff_get_audio_buffer(outlink, n); + PanContext *pan = inlink->dst->priv; + + swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n); + av_frame_copy_props(outsamples, insamples); + outsamples->channel_layout = outlink->channel_layout; + av_frame_set_channels(outsamples, outlink->channels); + + ret = ff_filter_frame(outlink, outsamples); + av_frame_free(&insamples); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + PanContext *pan = ctx->priv; + swr_free(&pan->swr); +} + +static const AVFilterPad pan_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad pan_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_pan = { + .name = "pan", + .description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."), + .priv_size = sizeof(PanContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = pan_inputs, + .outputs = pan_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_resample.c b/ffmpeg1/libavfilter/af_resample.c new file mode 100644 index 0000000..f82a970 --- /dev/null +++ b/ffmpeg1/libavfilter/af_resample.c @@ -0,0 +1,305 @@ +/* + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * sample format and channel layout conversion audio filter + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" + +#include "libavresample/avresample.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +typedef struct ResampleContext { + AVAudioResampleContext *avr; + AVDictionary *options; + + int64_t next_pts; + + /* set by filter_frame() to signal an output frame to request_frame() */ + int got_output; +} ResampleContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ResampleContext *s = ctx->priv; + + if (args) { + int ret = av_dict_parse_string(&s->options, args, "=", ":", 0); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "error setting option string: %s\n", args); + return ret; + } + + /* do not allow the user to override basic format options */ + av_dict_set(&s->options, "in_channel_layout", NULL, 0); + av_dict_set(&s->options, "out_channel_layout", NULL, 0); + av_dict_set(&s->options, "in_sample_fmt", NULL, 0); + av_dict_set(&s->options, "out_sample_fmt", NULL, 0); + av_dict_set(&s->options, "in_sample_rate", NULL, 0); + av_dict_set(&s->options, "out_sample_rate", NULL, 0); + } + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ResampleContext *s = ctx->priv; + + if (s->avr) { + avresample_close(s->avr); + avresample_free(&s->avr); + } + av_dict_free(&s->options); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + + AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); + AVFilterFormats *out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); + AVFilterFormats *in_samplerates = ff_all_samplerates(); + AVFilterFormats *out_samplerates = ff_all_samplerates(); + AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts(); + AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts(); + + ff_formats_ref(in_formats, &inlink->out_formats); + ff_formats_ref(out_formats, &outlink->in_formats); + + ff_formats_ref(in_samplerates, &inlink->out_samplerates); + ff_formats_ref(out_samplerates, &outlink->in_samplerates); + + ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts); + ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + ResampleContext *s = ctx->priv; + char buf1[64], buf2[64]; + int ret; + + if (s->avr) { + avresample_close(s->avr); + avresample_free(&s->avr); + } + + if (inlink->channel_layout == outlink->channel_layout && + inlink->sample_rate == outlink->sample_rate && + (inlink->format == outlink->format || + (av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 && + av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 && + av_get_planar_sample_fmt(inlink->format) == + av_get_planar_sample_fmt(outlink->format)))) + return 0; + + if (!(s->avr = avresample_alloc_context())) + return AVERROR(ENOMEM); + + if (s->options) { + AVDictionaryEntry *e = NULL; + while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX))) + av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value); + + av_opt_set_dict(s->avr, &s->options); + } + + av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0); + av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0); + av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0); + av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0); + av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0); + av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0); + + if ((ret = avresample_open(s->avr)) < 0) + return ret; + + outlink->time_base = (AVRational){ 1, outlink->sample_rate }; + s->next_pts = AV_NOPTS_VALUE; + + av_get_channel_layout_string(buf1, sizeof(buf1), + -1, inlink ->channel_layout); + av_get_channel_layout_string(buf2, sizeof(buf2), + -1, outlink->channel_layout); + av_log(ctx, AV_LOG_VERBOSE, + "fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n", + av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1, + av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ResampleContext *s = ctx->priv; + int ret = 0; + + s->got_output = 0; + while (ret >= 0 && !s->got_output) + ret = ff_request_frame(ctx->inputs[0]); + + /* flush the lavr delay buffer */ + if (ret == AVERROR_EOF && s->avr) { + AVFrame *frame; + int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), + outlink->sample_rate, + ctx->inputs[0]->sample_rate, + AV_ROUND_UP); + + if (!nb_samples) + return ret; + + frame = ff_get_audio_buffer(outlink, nb_samples); + if (!frame) + return AVERROR(ENOMEM); + + ret = avresample_convert(s->avr, frame->extended_data, + frame->linesize[0], nb_samples, + NULL, 0, 0); + if (ret <= 0) { + av_frame_free(&frame); + return (ret == 0) ? AVERROR_EOF : ret; + } + + frame->pts = s->next_pts; + return ff_filter_frame(outlink, frame); + } + return ret; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *in) +{ + AVFilterContext *ctx = inlink->dst; + ResampleContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + int ret; + + if (s->avr) { + AVFrame *out; + int delay, nb_samples; + + /* maximum possible samples lavr can output */ + delay = avresample_get_delay(s->avr); + nb_samples = av_rescale_rnd(in->nb_samples + delay, + outlink->sample_rate, inlink->sample_rate, + AV_ROUND_UP); + + out = ff_get_audio_buffer(outlink, nb_samples); + if (!out) { + ret = AVERROR(ENOMEM); + goto fail; + } + + ret = avresample_convert(s->avr, out->extended_data, out->linesize[0], + nb_samples, in->extended_data, in->linesize[0], + in->nb_samples); + if (ret <= 0) { + av_frame_free(&out); + if (ret < 0) + goto fail; + } + + av_assert0(!avresample_available(s->avr)); + + if (s->next_pts == AV_NOPTS_VALUE) { + if (in->pts == AV_NOPTS_VALUE) { + av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " + "assuming 0.\n"); + s->next_pts = 0; + } else + s->next_pts = av_rescale_q(in->pts, inlink->time_base, + outlink->time_base); + } + + if (ret > 0) { + out->nb_samples = ret; + if (in->pts != AV_NOPTS_VALUE) { + out->pts = av_rescale_q(in->pts, inlink->time_base, + outlink->time_base) - + av_rescale(delay, outlink->sample_rate, + inlink->sample_rate); + } else + out->pts = s->next_pts; + + s->next_pts = out->pts + out->nb_samples; + + ret = ff_filter_frame(outlink, out); + s->got_output = 1; + } + +fail: + av_frame_free(&in); + } else { + in->format = outlink->format; + ret = ff_filter_frame(outlink, in); + s->got_output = 1; + } + + return ret; +} + +static const AVFilterPad avfilter_af_resample_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_resample_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame + }, + { NULL } +}; + +AVFilter avfilter_af_resample = { + .name = "resample", + .description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."), + .priv_size = sizeof(ResampleContext), + + .init = init, + .uninit = uninit, + .query_formats = query_formats, + + .inputs = avfilter_af_resample_inputs, + .outputs = avfilter_af_resample_outputs, +}; diff --git a/ffmpeg1/libavfilter/af_silencedetect.c b/ffmpeg1/libavfilter/af_silencedetect.c new file mode 100644 index 0000000..dbd9f5f --- /dev/null +++ b/ffmpeg1/libavfilter/af_silencedetect.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2012 Clément BÅ“sch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio silence detector + */ + +#include /* DBL_MAX */ + +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/timestamp.h" +#include "audio.h" +#include "formats.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + double noise; ///< noise amplitude ratio + double duration; ///< minimum duration of silence until notification + int64_t nb_null_samples; ///< current number of continuous zero samples + int64_t start; ///< if silence is detected, this value contains the time of the first zero sample + int last_sample_rate; ///< last sample rate to check for sample rate changes +} SilenceDetectContext; + +#define OFFSET(x) offsetof(SilenceDetectContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM +static const AVOption silencedetect_options[] = { + { "n", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS }, + { "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS }, + { "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS }, + { "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(silencedetect); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + SilenceDetectContext *silence = ctx->priv; + + silence->class = &silencedetect_class; + av_opt_set_defaults(silence); + + if ((ret = av_set_options_string(silence, args, "=", ":")) < 0) + return ret; + + av_opt_free(silence); + + return 0; +} + +static char *get_metadata_val(AVFrame *insamples, const char *key) +{ + AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0); + return e && e->value ? e->value : NULL; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + int i; + SilenceDetectContext *silence = inlink->dst->priv; + const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout); + const int srate = inlink->sample_rate; + const int nb_samples = insamples->nb_samples * nb_channels; + const int64_t nb_samples_notify = srate * silence->duration * nb_channels; + + // scale number of null samples to the new sample rate + if (silence->last_sample_rate && silence->last_sample_rate != srate) + silence->nb_null_samples = + srate * silence->nb_null_samples / silence->last_sample_rate; + silence->last_sample_rate = srate; + + // TODO: support more sample formats + // TODO: document metadata + if (insamples->format == AV_SAMPLE_FMT_DBL) { + double *p = (double *)insamples->data[0]; + + for (i = 0; i < nb_samples; i++, p++) { + if (*p < silence->noise && *p > -silence->noise) { + if (!silence->start) { + silence->nb_null_samples++; + if (silence->nb_null_samples >= nb_samples_notify) { + silence->start = insamples->pts - (int64_t)(silence->duration / av_q2d(inlink->time_base) + .5); + av_dict_set(&insamples->metadata, "lavfi.silence_start", + av_ts2timestr(silence->start, &inlink->time_base), 0); + av_log(silence, AV_LOG_INFO, "silence_start: %s\n", + get_metadata_val(insamples, "lavfi.silence_start")); + } + } + } else { + if (silence->start) { + av_dict_set(&insamples->metadata, "lavfi.silence_end", + av_ts2timestr(insamples->pts, &inlink->time_base), 0); + av_dict_set(&insamples->metadata, "lavfi.silence_duration", + av_ts2timestr(insamples->pts - silence->start, &inlink->time_base), 0); + av_log(silence, AV_LOG_INFO, + "silence_end: %s | silence_duration: %s\n", + get_metadata_val(insamples, "lavfi.silence_end"), + get_metadata_val(insamples, "lavfi.silence_duration")); + } + silence->nb_null_samples = silence->start = 0; + } + } + } + + return ff_filter_frame(inlink->dst->outputs[0], insamples); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static const AVFilterPad silencedetect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad silencedetect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_silencedetect = { + .name = "silencedetect", + .description = NULL_IF_CONFIG_SMALL("Detect silence."), + .priv_size = sizeof(SilenceDetectContext), + .init = init, + .query_formats = query_formats, + .inputs = silencedetect_inputs, + .outputs = silencedetect_outputs, + .priv_class = &silencedetect_class, +}; diff --git a/ffmpeg1/libavfilter/af_volume.c b/ffmpeg1/libavfilter/af_volume.c new file mode 100644 index 0000000..447e8d5 --- /dev/null +++ b/ffmpeg1/libavfilter/af_volume.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio volume filter + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/eval.h" +#include "libavutil/float_dsp.h" +#include "libavutil/opt.h" +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "af_volume.h" + +static const char *precision_str[] = { + "fixed", "float", "double" +}; + +#define OFFSET(x) offsetof(VolumeContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption volume_options[] = { + { "volume", "set volume adjustment", + OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A|F }, + { "precision", "select mathematical precision", + OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" }, + { "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" }, + { "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" }, + { "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(volume); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + VolumeContext *vol = ctx->priv; + + if (vol->precision == PRECISION_FIXED) { + vol->volume_i = (int)(vol->volume * 256 + 0.5); + vol->volume = vol->volume_i / 256.0; + av_log(ctx, AV_LOG_VERBOSE, "volume:(%d/256)(%f)(%1.2fdB) precision:fixed\n", + vol->volume_i, vol->volume, 20.0*log(vol->volume)/M_LN10); + } else { + av_log(ctx, AV_LOG_VERBOSE, "volume:(%f)(%1.2fdB) precision:%s\n", + vol->volume, 20.0*log(vol->volume)/M_LN10, + precision_str[vol->precision]); + } + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + VolumeContext *vol = ctx->priv; + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts; + static const enum AVSampleFormat sample_fmts[][7] = { + /* PRECISION_FIXED */ + { + AV_SAMPLE_FMT_U8, + AV_SAMPLE_FMT_U8P, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_NONE + }, + /* PRECISION_FLOAT */ + { + AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_NONE + }, + /* PRECISION_DOUBLE */ + { + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE + } + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts[vol->precision]); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static inline void scale_samples_u8(uint8_t *dst, const uint8_t *src, + int nb_samples, int volume) +{ + int i; + for (i = 0; i < nb_samples; i++) + dst[i] = av_clip_uint8(((((int64_t)src[i] - 128) * volume + 128) >> 8) + 128); +} + +static inline void scale_samples_u8_small(uint8_t *dst, const uint8_t *src, + int nb_samples, int volume) +{ + int i; + for (i = 0; i < nb_samples; i++) + dst[i] = av_clip_uint8((((src[i] - 128) * volume + 128) >> 8) + 128); +} + +static inline void scale_samples_s16(uint8_t *dst, const uint8_t *src, + int nb_samples, int volume) +{ + int i; + int16_t *smp_dst = (int16_t *)dst; + const int16_t *smp_src = (const int16_t *)src; + for (i = 0; i < nb_samples; i++) + smp_dst[i] = av_clip_int16(((int64_t)smp_src[i] * volume + 128) >> 8); +} + +static inline void scale_samples_s16_small(uint8_t *dst, const uint8_t *src, + int nb_samples, int volume) +{ + int i; + int16_t *smp_dst = (int16_t *)dst; + const int16_t *smp_src = (const int16_t *)src; + for (i = 0; i < nb_samples; i++) + smp_dst[i] = av_clip_int16((smp_src[i] * volume + 128) >> 8); +} + +static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src, + int nb_samples, int volume) +{ + int i; + int32_t *smp_dst = (int32_t *)dst; + const int32_t *smp_src = (const int32_t *)src; + for (i = 0; i < nb_samples; i++) + smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8)); +} + +static void volume_init(VolumeContext *vol) +{ + vol->samples_align = 1; + + switch (av_get_packed_sample_fmt(vol->sample_fmt)) { + case AV_SAMPLE_FMT_U8: + if (vol->volume_i < 0x1000000) + vol->scale_samples = scale_samples_u8_small; + else + vol->scale_samples = scale_samples_u8; + break; + case AV_SAMPLE_FMT_S16: + if (vol->volume_i < 0x10000) + vol->scale_samples = scale_samples_s16_small; + else + vol->scale_samples = scale_samples_s16; + break; + case AV_SAMPLE_FMT_S32: + vol->scale_samples = scale_samples_s32; + break; + case AV_SAMPLE_FMT_FLT: + avpriv_float_dsp_init(&vol->fdsp, 0); + vol->samples_align = 4; + break; + case AV_SAMPLE_FMT_DBL: + avpriv_float_dsp_init(&vol->fdsp, 0); + vol->samples_align = 8; + break; + } + + if (ARCH_X86) + ff_volume_init_x86(vol); +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + VolumeContext *vol = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + vol->sample_fmt = inlink->format; + vol->channels = av_get_channel_layout_nb_channels(inlink->channel_layout); + vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels : 1; + + volume_init(vol); + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + VolumeContext *vol = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + int nb_samples = buf->nb_samples; + AVFrame *out_buf; + + if (vol->volume == 1.0 || vol->volume_i == 256) + return ff_filter_frame(outlink, buf); + + /* do volume scaling in-place if input buffer is writable */ + if (av_frame_is_writable(buf)) { + out_buf = buf; + } else { + out_buf = ff_get_audio_buffer(inlink, nb_samples); + if (!out_buf) + return AVERROR(ENOMEM); + out_buf->pts = buf->pts; + } + + if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) { + int p, plane_samples; + + if (av_sample_fmt_is_planar(buf->format)) + plane_samples = FFALIGN(nb_samples, vol->samples_align); + else + plane_samples = FFALIGN(nb_samples * vol->channels, vol->samples_align); + + if (vol->precision == PRECISION_FIXED) { + for (p = 0; p < vol->planes; p++) { + vol->scale_samples(out_buf->extended_data[p], + buf->extended_data[p], plane_samples, + vol->volume_i); + } + } else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) { + for (p = 0; p < vol->planes; p++) { + vol->fdsp.vector_fmul_scalar((float *)out_buf->extended_data[p], + (const float *)buf->extended_data[p], + vol->volume, plane_samples); + } + } else { + for (p = 0; p < vol->planes; p++) { + vol->fdsp.vector_dmul_scalar((double *)out_buf->extended_data[p], + (const double *)buf->extended_data[p], + vol->volume, plane_samples); + } + } + } + + if (buf != out_buf) + av_frame_free(&buf); + + return ff_filter_frame(outlink, out_buf); +} + +static const AVFilterPad avfilter_af_volume_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_volume_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +static const char *const shorthand[] = { "volume", "precision", NULL }; + +AVFilter avfilter_af_volume = { + .name = "volume", + .description = NULL_IF_CONFIG_SMALL("Change input volume."), + .query_formats = query_formats, + .priv_size = sizeof(VolumeContext), + .init = init, + .inputs = avfilter_af_volume_inputs, + .outputs = avfilter_af_volume_outputs, + .priv_class = &volume_class, + .shorthand = shorthand, +}; diff --git a/ffmpeg1/libavfilter/af_volume.h b/ffmpeg1/libavfilter/af_volume.h new file mode 100644 index 0000000..bd7932e --- /dev/null +++ b/ffmpeg1/libavfilter/af_volume.h @@ -0,0 +1,55 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio volume filter + */ + +#ifndef AVFILTER_AF_VOLUME_H +#define AVFILTER_AF_VOLUME_H + +#include "libavutil/common.h" +#include "libavutil/float_dsp.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" + +enum PrecisionType { + PRECISION_FIXED = 0, + PRECISION_FLOAT, + PRECISION_DOUBLE, +}; + +typedef struct VolumeContext { + const AVClass *class; + AVFloatDSPContext fdsp; + enum PrecisionType precision; + double volume; + int volume_i; + int channels; + int planes; + enum AVSampleFormat sample_fmt; + + void (*scale_samples)(uint8_t *dst, const uint8_t *src, int nb_samples, + int volume); + int samples_align; +} VolumeContext; + +void ff_volume_init_x86(VolumeContext *vol); + +#endif /* AVFILTER_AF_VOLUME_H */ diff --git a/ffmpeg1/libavfilter/af_volumedetect.c b/ffmpeg1/libavfilter/af_volumedetect.c new file mode 100644 index 0000000..79d992e --- /dev/null +++ b/ffmpeg1/libavfilter/af_volumedetect.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/avassert.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + /** + * Number of samples at each PCM value. + * histogram[0x8000 + i] is the number of samples at value i. + * The extra element is there for symmetry. + */ + uint64_t histogram[0x10001]; +} VolDetectContext; + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_NONE + }; + AVFilterFormats *formats; + + if (!(formats = ff_make_format_list(sample_fmts))) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *samples) +{ + AVFilterContext *ctx = inlink->dst; + VolDetectContext *vd = ctx->priv; + int64_t layout = samples->channel_layout; + int nb_samples = samples->nb_samples; + int nb_channels = av_get_channel_layout_nb_channels(layout); + int nb_planes = nb_channels; + int plane, i; + int16_t *pcm; + + if (!av_sample_fmt_is_planar(samples->format)) { + nb_samples *= nb_channels; + nb_planes = 1; + } + for (plane = 0; plane < nb_planes; plane++) { + pcm = (int16_t *)samples->extended_data[plane]; + for (i = 0; i < nb_samples; i++) + vd->histogram[pcm[i] + 0x8000]++; + } + + return ff_filter_frame(inlink->dst->outputs[0], samples); +} + +#define MAX_DB 91 + +static inline double logdb(uint64_t v) +{ + double d = v / (double)(0x8000 * 0x8000); + if (!v) + return MAX_DB; + return log(d) * -4.3429448190325182765112891891660508229; /* -10/log(10) */ +} + +static void print_stats(AVFilterContext *ctx) +{ + VolDetectContext *vd = ctx->priv; + int i, max_volume, shift; + uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0; + uint64_t histdb[MAX_DB + 1] = { 0 }; + + for (i = 0; i < 0x10000; i++) + nb_samples += vd->histogram[i]; + av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples); + if (!nb_samples) + return; + + /* If nb_samples > 1<<34, there is a risk of overflow in the + multiplication or the sum: shift all histogram values to avoid that. + The total number of samples must be recomputed to avoid rounding + errors. */ + shift = av_log2(nb_samples >> 33); + for (i = 0; i < 0x10000; i++) { + nb_samples_shift += vd->histogram[i] >> shift; + power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift); + } + if (!nb_samples_shift) + return; + power = (power + nb_samples_shift / 2) / nb_samples_shift; + av_assert0(power <= 0x8000 * 0x8000); + av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power)); + + max_volume = 0x8000; + while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] && + !vd->histogram[0x8000 - max_volume]) + max_volume--; + av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume)); + + for (i = 0; i < 0x10000; i++) + histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i]; + for (i = 0; i <= MAX_DB && !histdb[i]; i++); + for (; i <= MAX_DB && sum < nb_samples / 1000; i++) { + av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]); + sum += histdb[i]; + } +} + +static void uninit(AVFilterContext *ctx) +{ + print_stats(ctx); +} + +static const AVFilterPad volumedetect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad volumedetect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_volumedetect = { + .name = "volumedetect", + .description = NULL_IF_CONFIG_SMALL("Detect audio volume."), + + .priv_size = sizeof(VolDetectContext), + .query_formats = query_formats, + .uninit = uninit, + .inputs = volumedetect_inputs, + .outputs = volumedetect_outputs, +}; diff --git a/ffmpeg1/libavfilter/all_channel_layouts.inc b/ffmpeg1/libavfilter/all_channel_layouts.inc new file mode 100644 index 0000000..878e1f5 --- /dev/null +++ b/ffmpeg1/libavfilter/all_channel_layouts.inc @@ -0,0 +1,68 @@ +AV_CH_FRONT_CENTER, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER, +AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, diff --git a/ffmpeg1/libavfilter/allfilters.c b/ffmpeg1/libavfilter/allfilters.c new file mode 100644 index 0000000..45a67e5 --- /dev/null +++ b/ffmpeg1/libavfilter/allfilters.c @@ -0,0 +1,202 @@ +/* + * filter registration + * Copyright (c) 2008 Vitor Sessak + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "avfilter.h" +#include "config.h" + + +#define REGISTER_FILTER(X, x, y) \ + { \ + extern AVFilter avfilter_##y##_##x; \ + if (CONFIG_##X##_FILTER) \ + avfilter_register(&avfilter_##y##_##x); \ + } + +#define REGISTER_FILTER_UNCONDITIONAL(x) \ + { \ + extern AVFilter avfilter_##x; \ + avfilter_register(&avfilter_##x); \ + } + +void avfilter_register_all(void) +{ + static int initialized; + + if (initialized) + return; + initialized = 1; + + REGISTER_FILTER(ACONVERT, aconvert, af); + REGISTER_FILTER(AFADE, afade, af); + REGISTER_FILTER(AFORMAT, aformat, af); + REGISTER_FILTER(ALLPASS, allpass, af); + REGISTER_FILTER(AMERGE, amerge, af); + REGISTER_FILTER(AMIX, amix, af); + REGISTER_FILTER(ANULL, anull, af); + REGISTER_FILTER(APAD, apad, af); + REGISTER_FILTER(APERMS, aperms, af); + REGISTER_FILTER(ARESAMPLE, aresample, af); + REGISTER_FILTER(ASELECT, aselect, af); + REGISTER_FILTER(ASENDCMD, asendcmd, af); + REGISTER_FILTER(ASETNSAMPLES, asetnsamples, af); + REGISTER_FILTER(ASETPTS, asetpts, af); + REGISTER_FILTER(ASETTB, asettb, af); + REGISTER_FILTER(ASHOWINFO, ashowinfo, af); + REGISTER_FILTER(ASPLIT, asplit, af); + REGISTER_FILTER(ASTREAMSYNC, astreamsync, af); + REGISTER_FILTER(ASYNCTS, asyncts, af); + REGISTER_FILTER(ATEMPO, atempo, af); + REGISTER_FILTER(BANDPASS, bandpass, af); + REGISTER_FILTER(BANDREJECT, bandreject, af); + REGISTER_FILTER(BASS, bass, af); + REGISTER_FILTER(BIQUAD, biquad, af); + REGISTER_FILTER(CHANNELMAP, channelmap, af); + REGISTER_FILTER(CHANNELSPLIT, channelsplit, af); + REGISTER_FILTER(EARWAX, earwax, af); + REGISTER_FILTER(EBUR128, ebur128, af); + REGISTER_FILTER(EQUALIZER, equalizer, af); + REGISTER_FILTER(HIGHPASS, highpass, af); + REGISTER_FILTER(JOIN, join, af); + REGISTER_FILTER(LOWPASS, lowpass, af); + REGISTER_FILTER(PAN, pan, af); + REGISTER_FILTER(RESAMPLE, resample, af); + REGISTER_FILTER(SILENCEDETECT, silencedetect, af); + REGISTER_FILTER(TREBLE, treble, af); + REGISTER_FILTER(VOLUME, volume, af); + REGISTER_FILTER(VOLUMEDETECT, volumedetect, af); + + REGISTER_FILTER(AEVALSRC, aevalsrc, asrc); + REGISTER_FILTER(ANULLSRC, anullsrc, asrc); + REGISTER_FILTER(FLITE, flite, asrc); + REGISTER_FILTER(SINE, sine, asrc); + + REGISTER_FILTER(ANULLSINK, anullsink, asink); + + REGISTER_FILTER(ALPHAEXTRACT, alphaextract, vf); + REGISTER_FILTER(ALPHAMERGE, alphamerge, vf); + REGISTER_FILTER(ASS, ass, vf); + REGISTER_FILTER(BBOX, bbox, vf); + REGISTER_FILTER(BLACKDETECT, blackdetect, vf); + REGISTER_FILTER(BLACKFRAME, blackframe, vf); + REGISTER_FILTER(BLEND, blend, vf); + REGISTER_FILTER(BOXBLUR, boxblur, vf); + REGISTER_FILTER(COLORMATRIX, colormatrix, vf); + REGISTER_FILTER(COPY, copy, vf); + REGISTER_FILTER(CROP, crop, vf); + REGISTER_FILTER(CROPDETECT, cropdetect, vf); + REGISTER_FILTER(CURVES, curves, vf); + REGISTER_FILTER(DECIMATE, decimate, vf); + REGISTER_FILTER(DELOGO, delogo, vf); + REGISTER_FILTER(DESHAKE, deshake, vf); + REGISTER_FILTER(DRAWBOX, drawbox, vf); + REGISTER_FILTER(DRAWTEXT, drawtext, vf); + REGISTER_FILTER(EDGEDETECT, edgedetect, vf); + REGISTER_FILTER(FADE, fade, vf); + REGISTER_FILTER(FIELD, field, vf); + REGISTER_FILTER(FIELDORDER, fieldorder, vf); + REGISTER_FILTER(FORMAT, format, vf); + REGISTER_FILTER(FPS, fps, vf); + REGISTER_FILTER(FRAMESTEP, framestep, vf); + REGISTER_FILTER(FREI0R, frei0r, vf); + REGISTER_FILTER(GEQ, geq, vf); + REGISTER_FILTER(GRADFUN, gradfun, vf); + REGISTER_FILTER(HFLIP, hflip, vf); + REGISTER_FILTER(HISTEQ, histeq, vf); + REGISTER_FILTER(HISTOGRAM, histogram, vf); + REGISTER_FILTER(HQDN3D, hqdn3d, vf); + REGISTER_FILTER(HUE, hue, vf); + REGISTER_FILTER(IDET, idet, vf); + REGISTER_FILTER(IL, il, vf); + REGISTER_FILTER(KERNDEINT, kerndeint, vf); + REGISTER_FILTER(LUT, lut, vf); + REGISTER_FILTER(LUTRGB, lutrgb, vf); + REGISTER_FILTER(LUTYUV, lutyuv, vf); + REGISTER_FILTER(MP, mp, vf); + REGISTER_FILTER(NEGATE, negate, vf); + REGISTER_FILTER(NOFORMAT, noformat, vf); + REGISTER_FILTER(NOISE, noise, vf); + REGISTER_FILTER(NULL, null, vf); + REGISTER_FILTER(OCV, ocv, vf); + REGISTER_FILTER(OVERLAY, overlay, vf); + REGISTER_FILTER(PAD, pad, vf); + REGISTER_FILTER(PERMS, perms, vf); + REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf); + REGISTER_FILTER(PP, pp, vf); + REGISTER_FILTER(REMOVELOGO, removelogo, vf); + REGISTER_FILTER(SCALE, scale, vf); + REGISTER_FILTER(SELECT, select, vf); + REGISTER_FILTER(SENDCMD, sendcmd, vf); + REGISTER_FILTER(SETDAR, setdar, vf); + REGISTER_FILTER(SETFIELD, setfield, vf); + REGISTER_FILTER(SETPTS, setpts, vf); + REGISTER_FILTER(SETSAR, setsar, vf); + REGISTER_FILTER(SETTB, settb, vf); + REGISTER_FILTER(SHOWINFO, showinfo, vf); + REGISTER_FILTER(SMARTBLUR, smartblur, vf); + REGISTER_FILTER(SPLIT, split, vf); + REGISTER_FILTER(STEREO3D, stereo3d, vf); + REGISTER_FILTER(SUBTITLES, subtitles, vf); + REGISTER_FILTER(SUPER2XSAI, super2xsai, vf); + REGISTER_FILTER(SWAPUV, swapuv, vf); + REGISTER_FILTER(THUMBNAIL, thumbnail, vf); + REGISTER_FILTER(TILE, tile, vf); + REGISTER_FILTER(TINTERLACE, tinterlace, vf); + REGISTER_FILTER(TRANSPOSE, transpose, vf); + REGISTER_FILTER(UNSHARP, unsharp, vf); + REGISTER_FILTER(VFLIP, vflip, vf); + REGISTER_FILTER(YADIF, yadif, vf); + + REGISTER_FILTER(CELLAUTO, cellauto, vsrc); + REGISTER_FILTER(COLOR, color, vsrc); + REGISTER_FILTER(FREI0R, frei0r_src, vsrc); + REGISTER_FILTER(LIFE, life, vsrc); + REGISTER_FILTER(MANDELBROT, mandelbrot, vsrc); + REGISTER_FILTER(MPTESTSRC, mptestsrc, vsrc); + REGISTER_FILTER(NULLSRC, nullsrc, vsrc); + REGISTER_FILTER(RGBTESTSRC, rgbtestsrc, vsrc); + REGISTER_FILTER(SMPTEBARS, smptebars, vsrc); + REGISTER_FILTER(TESTSRC, testsrc, vsrc); + + REGISTER_FILTER(NULLSINK, nullsink, vsink); + + /* multimedia filters */ + REGISTER_FILTER(CONCAT, concat, avf); + REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf); + REGISTER_FILTER(SHOWWAVES, showwaves, avf); + + /* multimedia sources */ + REGISTER_FILTER(AMOVIE, amovie, avsrc); + REGISTER_FILTER(MOVIE, movie, avsrc); + +#if FF_API_AVFILTERBUFFER + REGISTER_FILTER_UNCONDITIONAL(vsink_ffbuffersink); + REGISTER_FILTER_UNCONDITIONAL(asink_ffabuffersink); +#endif + + /* those filters are part of public or internal API => registered + * unconditionally */ + REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer); + REGISTER_FILTER_UNCONDITIONAL(vsrc_buffer); + REGISTER_FILTER_UNCONDITIONAL(asink_abuffer); + REGISTER_FILTER_UNCONDITIONAL(vsink_buffer); + REGISTER_FILTER_UNCONDITIONAL(af_afifo); + REGISTER_FILTER_UNCONDITIONAL(vf_fifo); +} diff --git a/ffmpeg1/libavfilter/asink_anullsink.c b/ffmpeg1/libavfilter/asink_anullsink.c new file mode 100644 index 0000000..8015da2 --- /dev/null +++ b/ffmpeg1/libavfilter/asink_anullsink.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/internal.h" +#include "avfilter.h" +#include "internal.h" + +static int null_filter_frame(AVFilterLink *link, AVFrame *frame) +{ + av_frame_free(&frame); + return 0; +} + +static const AVFilterPad avfilter_asink_anullsink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = null_filter_frame, + }, + { NULL }, +}; + +AVFilter avfilter_asink_anullsink = { + .name = "anullsink", + .description = NULL_IF_CONFIG_SMALL("Do absolutely nothing with the input audio."), + + .priv_size = 0, + + .inputs = avfilter_asink_anullsink_inputs, + .outputs = NULL, +}; diff --git a/ffmpeg1/libavfilter/asrc_abuffer.h b/ffmpeg1/libavfilter/asrc_abuffer.h new file mode 100644 index 0000000..aa34461 --- /dev/null +++ b/ffmpeg1/libavfilter/asrc_abuffer.h @@ -0,0 +1,91 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_ASRC_ABUFFER_H +#define AVFILTER_ASRC_ABUFFER_H + +#include "avfilter.h" + +/** + * @file + * memory buffer source for audio + * + * @deprecated use buffersrc.h instead. + */ + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param data pointers to the samples planes + * @param linesize linesizes of each audio buffer plane + * @param nb_samples number of samples per channel + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param planar flag to indicate if audio data is planar or packed + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc, + uint8_t *data[8], int linesize[8], + int nb_samples, int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * This is similar to av_asrc_buffer_add_samples(), but the samples + * are stored in a buffer with known size. + * + * @param abuffersrc audio source buffer context + * @param buf pointer to the samples data, packed is assumed + * @param size the size in bytes of the buffer, it must contain an + * integer number of samples + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc, + uint8_t *buf, int buf_size, + int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param samplesref buffer ref to queue + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc, + AVFilterBufferRef *samplesref, + int av_unused flags); + +#endif /* AVFILTER_ASRC_ABUFFER_H */ diff --git a/ffmpeg1/libavfilter/asrc_aevalsrc.c b/ffmpeg1/libavfilter/asrc_aevalsrc.c new file mode 100644 index 0000000..409399f --- /dev/null +++ b/ffmpeg1/libavfilter/asrc_aevalsrc.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * eval audio source + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/eval.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +static const char * const var_names[] = { + "n", ///< number of frame + "t", ///< timestamp expressed in seconds + "s", ///< sample rate + NULL +}; + +enum var_name { + VAR_N, + VAR_T, + VAR_S, + VAR_VARS_NB +}; + +typedef struct { + const AVClass *class; + char *sample_rate_str; + int sample_rate; + int64_t chlayout; + char *chlayout_str; + int nb_channels; + int64_t pts; + AVExpr *expr[8]; + char *expr_str[8]; + int nb_samples; ///< number of samples per requested frame + char *duration_str; ///< total duration of the generated audio + double duration; + uint64_t n; + double var_values[VAR_VARS_NB]; +} EvalContext; + +#define OFFSET(x) offsetof(EvalContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption aevalsrc_options[]= { + { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "duration", "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "d", "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, +{NULL}, +}; + +AVFILTER_DEFINE_CLASS(aevalsrc); + +static int init(AVFilterContext *ctx, const char *args) +{ + EvalContext *eval = ctx->priv; + char *args1 = av_strdup(args); + char *expr, *buf, *bufptr; + int ret, i; + + eval->class = &aevalsrc_class; + av_opt_set_defaults(eval); + + if (!args1) { + av_log(ctx, AV_LOG_ERROR, "Argument is empty\n"); + ret = args ? AVERROR(ENOMEM) : AVERROR(EINVAL); + goto end; + } + + /* parse expressions */ + buf = args1; + i = 0; + while (expr = av_strtok(buf, ":", &bufptr)) { + ret = av_expr_parse(&eval->expr[i], expr, var_names, + NULL, NULL, NULL, NULL, 0, ctx); + if (ret < 0) + goto end; + i++; + if (bufptr && *bufptr == ':') { /* found last expression */ + bufptr++; + break; + } + buf = NULL; + } + eval->nb_channels = i; + + if (bufptr && (ret = av_set_options_string(eval, bufptr, "=", ":")) < 0) + goto end; + + if (eval->chlayout_str) { + int n; + ret = ff_parse_channel_layout(&eval->chlayout, eval->chlayout_str, ctx); + if (ret < 0) + goto end; + + n = av_get_channel_layout_nb_channels(eval->chlayout); + if (n != eval->nb_channels) { + av_log(ctx, AV_LOG_ERROR, + "Mismatch between the specified number of channels '%d' " + "and the number of channels '%d' in the specified channel layout '%s'\n", + eval->nb_channels, n, eval->chlayout_str); + ret = AVERROR(EINVAL); + goto end; + } + } else { + /* guess channel layout from nb expressions/channels */ + eval->chlayout = av_get_default_channel_layout(eval->nb_channels); + if (!eval->chlayout) { + av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n", + eval->nb_channels); + ret = AVERROR(EINVAL); + goto end; + } + } + + if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx))) + goto end; + + eval->duration = -1; + if (eval->duration_str) { + int64_t us = -1; + if ((ret = av_parse_time(&us, eval->duration_str, 1)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", eval->duration_str); + goto end; + } + eval->duration = (double)us / 1000000; + } + eval->n = 0; + +end: + av_free(args1); + return ret; +} + +static void uninit(AVFilterContext *ctx) +{ + EvalContext *eval = ctx->priv; + int i; + + for (i = 0; i < 8; i++) { + av_expr_free(eval->expr[i]); + eval->expr[i] = NULL; + } + av_freep(&eval->chlayout_str); + av_freep(&eval->duration_str); + av_freep(&eval->sample_rate_str); +} + +static int config_props(AVFilterLink *outlink) +{ + EvalContext *eval = outlink->src->priv; + char buf[128]; + + outlink->time_base = (AVRational){1, eval->sample_rate}; + outlink->sample_rate = eval->sample_rate; + + eval->var_values[VAR_S] = eval->sample_rate; + + av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout); + + av_log(outlink->src, AV_LOG_VERBOSE, + "sample_rate:%d chlayout:%s duration:%f\n", + eval->sample_rate, buf, eval->duration); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + EvalContext *eval = ctx->priv; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE }; + int64_t chlayouts[] = { eval->chlayout, -1 }; + int sample_rates[] = { eval->sample_rate, -1 }; + + ff_set_common_formats (ctx, ff_make_format_list(sample_fmts)); + ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts)); + ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates)); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + EvalContext *eval = outlink->src->priv; + AVFrame *samplesref; + int i, j; + double t = eval->n * (double)1/eval->sample_rate; + + if (eval->duration >= 0 && t >= eval->duration) + return AVERROR_EOF; + + samplesref = ff_get_audio_buffer(outlink, eval->nb_samples); + + /* evaluate expression for each single sample and for each channel */ + for (i = 0; i < eval->nb_samples; i++, eval->n++) { + eval->var_values[VAR_N] = eval->n; + eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate; + + for (j = 0; j < eval->nb_channels; j++) { + *((double *) samplesref->extended_data[j] + i) = + av_expr_eval(eval->expr[j], eval->var_values, NULL); + } + } + + samplesref->pts = eval->pts; + samplesref->sample_rate = eval->sample_rate; + eval->pts += eval->nb_samples; + + return ff_filter_frame(outlink, samplesref); +} + +static const AVFilterPad aevalsrc_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_asrc_aevalsrc = { + .name = "aevalsrc", + .description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."), + + .query_formats = query_formats, + .init = init, + .uninit = uninit, + .priv_size = sizeof(EvalContext), + .inputs = NULL, + .outputs = aevalsrc_outputs, + .priv_class = &aevalsrc_class, +}; diff --git a/ffmpeg1/libavfilter/asrc_anullsrc.c b/ffmpeg1/libavfilter/asrc_anullsrc.c new file mode 100644 index 0000000..f8e6ac5 --- /dev/null +++ b/ffmpeg1/libavfilter/asrc_anullsrc.c @@ -0,0 +1,141 @@ +/* + * Copyright 2010 S.N. Hemanth Meenakshisundaram + * Copyright 2010 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * null audio source + */ + +#include +#include + +#include "libavutil/channel_layout.h" +#include "libavutil/internal.h" +#include "libavutil/opt.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + char *channel_layout_str; + uint64_t channel_layout; + char *sample_rate_str; + int sample_rate; + int nb_samples; ///< number of samples per requested frame + int64_t pts; +} ANullContext; + +#define OFFSET(x) offsetof(ANullContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption anullsrc_options[]= { + { "channel_layout", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS }, + { "cl", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS }, + { "sample_rate", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS }, + { "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS }, + { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(anullsrc); + +static int init(AVFilterContext *ctx, const char *args) +{ + ANullContext *null = ctx->priv; + int ret; + + null->class = &anullsrc_class; + av_opt_set_defaults(null); + + if ((ret = (av_set_options_string(null, args, "=", ":"))) < 0) + return ret; + + if ((ret = ff_parse_sample_rate(&null->sample_rate, + null->sample_rate_str, ctx)) < 0) + return ret; + + if ((ret = ff_parse_channel_layout(&null->channel_layout, + null->channel_layout_str, ctx)) < 0) + return ret; + + return 0; +} + +static int config_props(AVFilterLink *outlink) +{ + ANullContext *null = outlink->src->priv; + char buf[128]; + int chans_nb; + + outlink->sample_rate = null->sample_rate; + outlink->channel_layout = null->channel_layout; + + chans_nb = av_get_channel_layout_nb_channels(null->channel_layout); + av_get_channel_layout_string(buf, sizeof(buf), chans_nb, null->channel_layout); + av_log(outlink->src, AV_LOG_VERBOSE, + "sample_rate:%d channel_layout:'%s' nb_samples:%d\n", + null->sample_rate, buf, null->nb_samples); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + int ret; + ANullContext *null = outlink->src->priv; + AVFrame *samplesref; + + samplesref = ff_get_audio_buffer(outlink, null->nb_samples); + samplesref->pts = null->pts; + samplesref->channel_layout = null->channel_layout; + samplesref->sample_rate = outlink->sample_rate; + + ret = ff_filter_frame(outlink, av_frame_clone(samplesref)); + av_frame_free(&samplesref); + + null->pts += null->nb_samples; + return ret; +} + +static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_asrc_anullsrc = { + .name = "anullsrc", + .description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."), + + .init = init, + .priv_size = sizeof(ANullContext), + + .inputs = NULL, + + .outputs = avfilter_asrc_anullsrc_outputs, + .priv_class = &anullsrc_class, +}; diff --git a/ffmpeg1/libavfilter/asrc_flite.c b/ffmpeg1/libavfilter/asrc_flite.c new file mode 100644 index 0000000..c13eb8b --- /dev/null +++ b/ffmpeg1/libavfilter/asrc_flite.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * flite voice synth source + */ + +#include +#include "libavutil/channel_layout.h" +#include "libavutil/file.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "audio.h" +#include "formats.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + char *voice_str; + char *textfile; + char *text; + cst_wave *wave; + int16_t *wave_samples; + int wave_nb_samples; + int list_voices; + cst_voice *voice; + struct voice_entry *voice_entry; + int64_t pts; + int frame_nb_samples; ///< number of samples per frame +} FliteContext; + +#define OFFSET(x) offsetof(FliteContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption flite_options[] = { + { "list_voices", "list voices and exit", OFFSET(list_voices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + { "nb_samples", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS }, + { "n", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS }, + { "text", "set text to speak", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "textfile", "set filename of the text to speak", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "v", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "voice", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(flite); + +static volatile int flite_inited = 0; + +/* declare functions for all the supported voices */ +#define DECLARE_REGISTER_VOICE_FN(name) \ + cst_voice *register_cmu_us_## name(const char *); \ + void unregister_cmu_us_## name(cst_voice *); +DECLARE_REGISTER_VOICE_FN(awb); +DECLARE_REGISTER_VOICE_FN(kal); +DECLARE_REGISTER_VOICE_FN(kal16); +DECLARE_REGISTER_VOICE_FN(rms); +DECLARE_REGISTER_VOICE_FN(slt); + +struct voice_entry { + const char *name; + cst_voice * (*register_fn)(const char *); + void (*unregister_fn)(cst_voice *); + cst_voice *voice; + unsigned usage_count; +} voice_entry; + +#define MAKE_VOICE_STRUCTURE(voice_name) { \ + .name = #voice_name, \ + .register_fn = register_cmu_us_ ## voice_name, \ + .unregister_fn = unregister_cmu_us_ ## voice_name, \ +} +static struct voice_entry voice_entries[] = { + MAKE_VOICE_STRUCTURE(awb), + MAKE_VOICE_STRUCTURE(kal), + MAKE_VOICE_STRUCTURE(kal16), + MAKE_VOICE_STRUCTURE(rms), + MAKE_VOICE_STRUCTURE(slt), +}; + +static void list_voices(void *log_ctx, const char *sep) +{ + int i, n = FF_ARRAY_ELEMS(voice_entries); + for (i = 0; i < n; i++) + av_log(log_ctx, AV_LOG_INFO, "%s%s", + voice_entries[i].name, i < (n-1) ? sep : "\n"); +} + +static int select_voice(struct voice_entry **entry_ret, const char *voice_name, void *log_ctx) +{ + int i; + + for (i = 0; i < FF_ARRAY_ELEMS(voice_entries); i++) { + struct voice_entry *entry = &voice_entries[i]; + if (!strcmp(entry->name, voice_name)) { + if (!entry->voice) + entry->voice = entry->register_fn(NULL); + if (!entry->voice) { + av_log(log_ctx, AV_LOG_ERROR, + "Could not register voice '%s'\n", voice_name); + return AVERROR_UNKNOWN; + } + entry->usage_count++; + *entry_ret = entry; + return 0; + } + } + + av_log(log_ctx, AV_LOG_ERROR, "Could not find voice '%s'\n", voice_name); + av_log(log_ctx, AV_LOG_INFO, "Choose between the voices: "); + list_voices(log_ctx, ", "); + + return AVERROR(EINVAL); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + FliteContext *flite = ctx->priv; + int ret = 0; + + flite->class = &flite_class; + av_opt_set_defaults(flite); + + if ((ret = av_set_options_string(flite, args, "=", ":")) < 0) + return ret; + + if (flite->list_voices) { + list_voices(ctx, "\n"); + return AVERROR_EXIT; + } + + if (!flite_inited) { + if (flite_init() < 0) { + av_log(ctx, AV_LOG_ERROR, "flite initialization failed\n"); + return AVERROR_UNKNOWN; + } + flite_inited++; + } + + if ((ret = select_voice(&flite->voice_entry, flite->voice_str, ctx)) < 0) + return ret; + flite->voice = flite->voice_entry->voice; + + if (flite->textfile && flite->text) { + av_log(ctx, AV_LOG_ERROR, + "Both text and textfile options set: only one must be specified\n"); + return AVERROR(EINVAL); + } + + if (flite->textfile) { + uint8_t *textbuf; + size_t textbuf_size; + + if ((ret = av_file_map(flite->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, + "The text file '%s' could not be read: %s\n", + flite->textfile, av_err2str(ret)); + return ret; + } + + if (!(flite->text = av_malloc(textbuf_size+1))) + return AVERROR(ENOMEM); + memcpy(flite->text, textbuf, textbuf_size); + flite->text[textbuf_size] = 0; + av_file_unmap(textbuf, textbuf_size); + } + + if (!flite->text) { + av_log(ctx, AV_LOG_ERROR, + "No speech text specified, specify the 'text' or 'textfile' option\n"); + return AVERROR(EINVAL); + } + + /* synth all the file data in block */ + flite->wave = flite_text_to_wave(flite->text, flite->voice); + flite->wave_samples = flite->wave->samples; + flite->wave_nb_samples = flite->wave->num_samples; + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + FliteContext *flite = ctx->priv; + + av_opt_free(flite); + + if (!--flite->voice_entry->usage_count) + flite->voice_entry->unregister_fn(flite->voice); + flite->voice = NULL; + flite->voice_entry = NULL; + delete_wave(flite->wave); + flite->wave = NULL; +} + +static int query_formats(AVFilterContext *ctx) +{ + FliteContext *flite = ctx->priv; + + AVFilterChannelLayouts *chlayouts = NULL; + int64_t chlayout = av_get_default_channel_layout(flite->wave->num_channels); + AVFilterFormats *sample_formats = NULL; + AVFilterFormats *sample_rates = NULL; + + ff_add_channel_layout(&chlayouts, chlayout); + ff_set_common_channel_layouts(ctx, chlayouts); + ff_add_format(&sample_formats, AV_SAMPLE_FMT_S16); + ff_set_common_formats(ctx, sample_formats); + ff_add_format(&sample_rates, flite->wave->sample_rate); + ff_set_common_samplerates (ctx, sample_rates); + + return 0; +} + +static int config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + FliteContext *flite = ctx->priv; + + outlink->sample_rate = flite->wave->sample_rate; + outlink->time_base = (AVRational){1, flite->wave->sample_rate}; + + av_log(ctx, AV_LOG_VERBOSE, "voice:%s fmt:%s sample_rate:%d\n", + flite->voice_str, + av_get_sample_fmt_name(outlink->format), outlink->sample_rate); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFrame *samplesref; + FliteContext *flite = outlink->src->priv; + int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples); + + if (!nb_samples) + return AVERROR_EOF; + + samplesref = ff_get_audio_buffer(outlink, nb_samples); + if (!samplesref) + return AVERROR(ENOMEM); + + memcpy(samplesref->data[0], flite->wave_samples, + nb_samples * flite->wave->num_channels * 2); + samplesref->pts = flite->pts; + av_frame_set_pkt_pos(samplesref, -1); + av_frame_set_sample_rate(samplesref, flite->wave->sample_rate); + flite->pts += nb_samples; + flite->wave_samples += nb_samples * flite->wave->num_channels; + flite->wave_nb_samples -= nb_samples; + + return ff_filter_frame(outlink, samplesref); +} + +static const AVFilterPad flite_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_asrc_flite = { + .name = "flite", + .description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."), + .query_formats = query_formats, + .init = init, + .uninit = uninit, + .priv_size = sizeof(FliteContext), + .inputs = NULL, + .outputs = flite_outputs, + .priv_class = &flite_class, +}; diff --git a/ffmpeg1/libavfilter/asrc_sine.c b/ffmpeg1/libavfilter/asrc_sine.c new file mode 100644 index 0000000..82a2bef --- /dev/null +++ b/ffmpeg1/libavfilter/asrc_sine.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2013 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + double frequency; + double beep_factor; + int samples_per_frame; + int sample_rate; + int64_t duration; + int16_t *sin; + int64_t pts; + uint32_t phi; ///< current phase of the sine (2pi = 1<<32) + uint32_t dphi; ///< phase increment between two samples + unsigned beep_period; + unsigned beep_index; + unsigned beep_length; + uint32_t phi_beep; ///< current phase of the beep + uint32_t dphi_beep; ///< phase increment of the beep +} SineContext; + +#define CONTEXT SineContext +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \ + { name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \ + { .deffield = def }, min, max, FLAGS, __VA_ARGS__ } + +#define OPT_INT(name, field, def, min, max, descr, ...) \ + OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__) + +#define OPT_DBL(name, field, def, min, max, descr, ...) \ + OPT_GENERIC(name, field, def, min, max, descr, DOUBLE, dbl, __VA_ARGS__) + +#define OPT_DUR(name, field, def, min, max, descr, ...) \ + OPT_GENERIC(name, field, def, min, max, descr, DURATION, str, __VA_ARGS__) + +static const AVOption sine_options[] = { + OPT_DBL("frequency", frequency, 440, 0, DBL_MAX, "set the sine frequency"), + OPT_DBL("f", frequency, 440, 0, DBL_MAX, "set the sine frequency"), + OPT_DBL("beep_factor", beep_factor, 0, 0, DBL_MAX, "set the beep fequency factor"), + OPT_DBL("b", beep_factor, 0, 0, DBL_MAX, "set the beep fequency factor"), + OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate"), + OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate"), + OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration"), + OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration"), + OPT_INT("samples_per_frame", samples_per_frame, 1024, 0, INT_MAX, "set the number of samples per frame"), + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(sine); + +#define LOG_PERIOD 15 +#define AMPLITUDE 4095 +#define AMPLITUDE_SHIFT 3 + +static void make_sin_table(int16_t *sin) +{ + unsigned half_pi = 1 << (LOG_PERIOD - 2); + unsigned ampls = AMPLITUDE << AMPLITUDE_SHIFT; + uint64_t unit2 = (uint64_t)(ampls * ampls) << 32; + unsigned step, i, c, s, k, new_k, n2; + + /* Principle: if u = exp(i*a1) and v = exp(i*a2), then + exp(i*(a1+a2)/2) = (u+v) / length(u+v) */ + sin[0] = 0; + sin[half_pi] = ampls; + for (step = half_pi; step > 1; step /= 2) { + /* k = (1 << 16) * amplitude / length(u+v) + In exact values, k is constant at a given step */ + k = 0x10000; + for (i = 0; i < half_pi / 2; i += step) { + s = sin[i] + sin[i + step]; + c = sin[half_pi - i] + sin[half_pi - i - step]; + n2 = s * s + c * c; + /* Newton's method to solve n² * k² = unit² */ + while (1) { + new_k = (k + unit2 / ((uint64_t)k * n2) + 1) >> 1; + if (k == new_k) + break; + k = new_k; + } + sin[i + step / 2] = (k * s + 0x7FFF) >> 16; + sin[half_pi - i - step / 2] = (k * c + 0x8000) >> 16; + } + } + /* Unshift amplitude */ + for (i = 0; i <= half_pi; i++) + sin[i] = (sin[i] + (1 << (AMPLITUDE_SHIFT - 1))) >> AMPLITUDE_SHIFT; + /* Use symmetries to fill the other three quarters */ + for (i = 0; i < half_pi; i++) + sin[half_pi * 2 - i] = sin[i]; + for (i = 0; i < 2 * half_pi; i++) + sin[i + 2 * half_pi] = -sin[i]; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + SineContext *sine = ctx->priv; + static const char *shorthand[] = { "frequency", "beep_factor", NULL }; + int ret; + + sine->class = &sine_class; + av_opt_set_defaults(sine); + + if ((ret = av_opt_set_from_string(sine, args, shorthand, "=", ":")) < 0) + return ret; + if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD))) + return AVERROR(ENOMEM); + sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5; + make_sin_table(sine->sin); + + if (sine->beep_factor) { + sine->beep_period = sine->sample_rate; + sine->beep_length = sine->beep_period / 25; + sine->dphi_beep = ldexp(sine->beep_factor * sine->frequency, 32) / + sine->sample_rate + 0.5; + } + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + SineContext *sine = ctx->priv; + + av_freep(&sine->sin); +} + +static av_cold int query_formats(AVFilterContext *ctx) +{ + SineContext *sine = ctx->priv; + static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 }; + int sample_rates[] = { sine->sample_rate, -1 }; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }; + + ff_set_common_formats (ctx, ff_make_format_list(sample_fmts)); + ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts)); + ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates)); + return 0; +} + +static av_cold int config_props(AVFilterLink *outlink) +{ + SineContext *sine = outlink->src->priv; + sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + SineContext *sine = outlink->src->priv; + AVFrame *frame; + int i, nb_samples = sine->samples_per_frame; + int16_t *samples; + + if (sine->duration) { + nb_samples = FFMIN(nb_samples, sine->duration - sine->pts); + av_assert1(nb_samples >= 0); + if (!nb_samples) + return AVERROR_EOF; + } + if (!(frame = ff_get_audio_buffer(outlink, nb_samples))) + return AVERROR(ENOMEM); + samples = (int16_t *)frame->data[0]; + + for (i = 0; i < nb_samples; i++) { + samples[i] = sine->sin[sine->phi >> (32 - LOG_PERIOD)]; + sine->phi += sine->dphi; + if (sine->beep_index < sine->beep_length) { + samples[i] += sine->sin[sine->phi_beep >> (32 - LOG_PERIOD)] << 1; + sine->phi_beep += sine->dphi_beep; + } + if (++sine->beep_index == sine->beep_period) + sine->beep_index = 0; + } + + frame->pts = sine->pts; + sine->pts += nb_samples; + return ff_filter_frame(outlink, frame); +} + +static const AVFilterPad sine_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL } +}; + +AVFilter avfilter_asrc_sine = { + .name = "sine", + .description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."), + .query_formats = query_formats, + .init = init, + .uninit = uninit, + .priv_size = sizeof(SineContext), + .inputs = NULL, + .outputs = sine_outputs, + .priv_class = &sine_class, +}; diff --git a/ffmpeg1/libavfilter/audio.c b/ffmpeg1/libavfilter/audio.c new file mode 100644 index 0000000..1075217 --- /dev/null +++ b/ffmpeg1/libavfilter/audio.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) Stefano Sabatini | stefasab at gmail.com + * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavcodec/avcodec.h" + +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +int avfilter_ref_get_channels(AVFilterBufferRef *ref) +{ + return ref->audio ? ref->audio->channels : 0; +} + +AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples) +{ + return ff_get_audio_buffer(link->dst->outputs[0], nb_samples); +} + +AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples) +{ + AVFrame *frame = av_frame_alloc(); + int channels = link->channels; + int buf_size, ret; + + av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout)); + + if (!frame) + return NULL; + + buf_size = av_samples_get_buffer_size(NULL, channels, nb_samples, + link->format, 0); + if (buf_size < 0) + goto fail; + + frame->buf[0] = av_buffer_alloc(buf_size); + if (!frame->buf[0]) + goto fail; + + frame->nb_samples = nb_samples; + ret = avcodec_fill_audio_frame(frame, channels, link->format, + frame->buf[0]->data, buf_size, 0); + if (ret < 0) + goto fail; + + av_samples_set_silence(frame->extended_data, 0, nb_samples, channels, + link->format); + + frame->nb_samples = nb_samples; + frame->format = link->format; + av_frame_set_channels(frame, link->channels); + frame->channel_layout = link->channel_layout; + frame->sample_rate = link->sample_rate; + + return frame; + +fail: + av_buffer_unref(&frame->buf[0]); + av_frame_free(&frame); + return NULL; +} + +AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples) +{ + AVFrame *ret = NULL; + + if (link->dstpad->get_audio_buffer) + ret = link->dstpad->get_audio_buffer(link, nb_samples); + + if (!ret) + ret = ff_default_get_audio_buffer(link, nb_samples); + + return ret; +} + +#if FF_API_AVFILTERBUFFER +AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize,int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout) +{ + int planes; + AVFilterBuffer *samples = av_mallocz(sizeof(*samples)); + AVFilterBufferRef *samplesref = av_mallocz(sizeof(*samplesref)); + + if (!samples || !samplesref) + goto fail; + + av_assert0(channels); + av_assert0(channel_layout == 0 || + channels == av_get_channel_layout_nb_channels(channel_layout)); + + samplesref->buf = samples; + samplesref->buf->free = ff_avfilter_default_free_buffer; + if (!(samplesref->audio = av_mallocz(sizeof(*samplesref->audio)))) + goto fail; + + samplesref->audio->nb_samples = nb_samples; + samplesref->audio->channel_layout = channel_layout; + samplesref->audio->channels = channels; + + planes = av_sample_fmt_is_planar(sample_fmt) ? channels : 1; + + /* make sure the buffer gets read permission or it's useless for output */ + samplesref->perms = perms | AV_PERM_READ; + + samples->refcount = 1; + samplesref->type = AVMEDIA_TYPE_AUDIO; + samplesref->format = sample_fmt; + + memcpy(samples->data, data, + FFMIN(FF_ARRAY_ELEMS(samples->data), planes)*sizeof(samples->data[0])); + memcpy(samplesref->data, samples->data, sizeof(samples->data)); + + samples->linesize[0] = samplesref->linesize[0] = linesize; + + if (planes > FF_ARRAY_ELEMS(samples->data)) { + samples-> extended_data = av_mallocz(sizeof(*samples->extended_data) * + planes); + samplesref->extended_data = av_mallocz(sizeof(*samplesref->extended_data) * + planes); + + if (!samples->extended_data || !samplesref->extended_data) + goto fail; + + memcpy(samples-> extended_data, data, sizeof(*data)*planes); + memcpy(samplesref->extended_data, data, sizeof(*data)*planes); + } else { + samples->extended_data = samples->data; + samplesref->extended_data = samplesref->data; + } + + samplesref->pts = AV_NOPTS_VALUE; + + return samplesref; + +fail: + if (samples && samples->extended_data != samples->data) + av_freep(&samples->extended_data); + if (samplesref) { + av_freep(&samplesref->audio); + if (samplesref->extended_data != samplesref->data) + av_freep(&samplesref->extended_data); + } + av_freep(&samplesref); + av_freep(&samples); + return NULL; +} + +AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, + int linesize,int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + uint64_t channel_layout) +{ + int channels = av_get_channel_layout_nb_channels(channel_layout); + return avfilter_get_audio_buffer_ref_from_arrays_channels(data, linesize, perms, + nb_samples, sample_fmt, + channels, channel_layout); +} +#endif diff --git a/ffmpeg1/libavfilter/audio.h b/ffmpeg1/libavfilter/audio.h new file mode 100644 index 0000000..3335c96 --- /dev/null +++ b/ffmpeg1/libavfilter/audio.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) Stefano Sabatini | stefasab at gmail.com + * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AUDIO_H +#define AVFILTER_AUDIO_H + +#include "avfilter.h" +#include "internal.h" + +static const enum AVSampleFormat ff_packed_sample_fmts_array[] = { + AV_SAMPLE_FMT_U8, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S32, + AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_NONE +}; + +static const enum AVSampleFormat ff_planar_sample_fmts_array[] = { + AV_SAMPLE_FMT_U8P, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE +}; + +/** default handler for get_audio_buffer() for audio inputs */ +AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples); + +/** get_audio_buffer() handler for filters which simply pass audio along */ +AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples); + +/** + * Request an audio samples buffer with a specific set of permissions. + * + * @param link the output link to the filter from which the buffer will + * be requested + * @param nb_samples the number of samples per channel + * @return A reference to the samples. This must be unreferenced with + * avfilter_unref_buffer when you are finished with it. + */ +AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples); + +/** + * Send a buffer of audio samples to the next filter. + * + * @param link the output link over which the audio samples are being sent + * @param samplesref a reference to the buffer of audio samples being sent. The + * receiving filter will free this reference when it no longer + * needs it or pass it on to the next filter. + * + * @return >= 0 on success, a negative AVERROR on error. The receiving filter + * is responsible for unreferencing samplesref in case of error. + */ +int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref); + +/** + * Send a buffer of audio samples to the next link, without checking + * min_samples. + */ +int ff_filter_samples_framed(AVFilterLink *link, + AVFilterBufferRef *samplesref); + +#endif /* AVFILTER_AUDIO_H */ diff --git a/ffmpeg1/libavfilter/avcodec.c b/ffmpeg1/libavfilter/avcodec.c new file mode 100644 index 0000000..605e5d2 --- /dev/null +++ b/ffmpeg1/libavfilter/avcodec.c @@ -0,0 +1,157 @@ +/* + * Copyright 2011 Stefano Sabatini | stefasab at gmail.com + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * libavcodec/libavfilter gluing utilities + */ + +#include "avcodec.h" +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" + +#if FF_API_AVFILTERBUFFER +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, + int perms) +{ + AVFilterBufferRef *picref = + avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms, + frame->width, frame->height, + frame->format); + if (!picref) + return NULL; + if (avfilter_copy_frame_props(picref, frame) < 0) { + picref->buf->data[0] = NULL; + avfilter_unref_bufferp(&picref); + } + return picref; +} + +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms) +{ + AVFilterBufferRef *samplesref; + int channels = av_frame_get_channels(frame); + int64_t layout = av_frame_get_channel_layout(frame); + + if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { + av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); + return NULL; + } + + samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( + (uint8_t **)frame->extended_data, frame->linesize[0], perms, + frame->nb_samples, frame->format, channels, layout); + if (!samplesref) + return NULL; + if (avfilter_copy_frame_props(samplesref, frame) < 0) { + samplesref->buf->data[0] = NULL; + avfilter_unref_bufferp(&samplesref); + } + return samplesref; +} + +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms) +{ + switch (type) { + case AVMEDIA_TYPE_VIDEO: + return avfilter_get_video_buffer_ref_from_frame(frame, perms); + case AVMEDIA_TYPE_AUDIO: + return avfilter_get_audio_buffer_ref_from_frame(frame, perms); + default: + return NULL; + } +} + +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src) +{ + int planes, nb_channels; + + if (!dst) + return AVERROR(EINVAL); + /* abort in case the src is NULL and dst is not, avoid inconsistent state in dst */ + av_assert0(src); + + memcpy(dst->data, src->data, sizeof(dst->data)); + memcpy(dst->linesize, src->linesize, sizeof(dst->linesize)); + + dst->pts = src->pts; + dst->format = src->format; + av_frame_set_pkt_pos(dst, src->pos); + + switch (src->type) { + case AVMEDIA_TYPE_VIDEO: + av_assert0(src->video); + dst->width = src->video->w; + dst->height = src->video->h; + dst->sample_aspect_ratio = src->video->sample_aspect_ratio; + dst->interlaced_frame = src->video->interlaced; + dst->top_field_first = src->video->top_field_first; + dst->key_frame = src->video->key_frame; + dst->pict_type = src->video->pict_type; + break; + case AVMEDIA_TYPE_AUDIO: + av_assert0(src->audio); + nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout); + planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1; + + if (planes > FF_ARRAY_ELEMS(dst->data)) { + dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data)); + if (!dst->extended_data) + return AVERROR(ENOMEM); + memcpy(dst->extended_data, src->extended_data, + planes * sizeof(*dst->extended_data)); + } else + dst->extended_data = dst->data; + dst->nb_samples = src->audio->nb_samples; + av_frame_set_sample_rate (dst, src->audio->sample_rate); + av_frame_set_channel_layout(dst, src->audio->channel_layout); + av_frame_set_channels (dst, src->audio->channels); + break; + default: + return AVERROR(EINVAL); + } + + return 0; +} +#endif + +#if FF_API_FILL_FRAME +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref) +{ + return avfilter_copy_buf_props(frame, samplesref); +} + +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref) +{ + return avfilter_copy_buf_props(frame, picref); +} + +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref) +{ + return avfilter_copy_buf_props(frame, ref); +} +#endif diff --git a/ffmpeg1/libavfilter/avcodec.h b/ffmpeg1/libavfilter/avcodec.h new file mode 100644 index 0000000..ae55df7 --- /dev/null +++ b/ffmpeg1/libavfilter/avcodec.h @@ -0,0 +1,110 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVCODEC_H +#define AVFILTER_AVCODEC_H + +/** + * @file + * libavcodec/libavfilter gluing utilities + * + * This should be included in an application ONLY if the installed + * libavfilter has been compiled with libavcodec support, otherwise + * symbols defined below will not be available. + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms); + + +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms); + +/** + * Create and return a buffer reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms); +#endif + +#if FF_API_FILL_FRAME +/** + * Fill an AVFrame with the information stored in samplesref. + * + * @param frame an already allocated AVFrame + * @param samplesref an audio buffer reference + * @return 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref); + +/** + * Fill an AVFrame with the information stored in picref. + * + * @param frame an already allocated AVFrame + * @param picref a video buffer reference + * @return 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref); + +/** + * Fill an AVFrame with information stored in ref. + * + * @param frame an already allocated AVFrame + * @param ref a video or audio buffer reference + * @return 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref); +#endif + +#endif /* AVFILTER_AVCODEC_H */ diff --git a/ffmpeg1/libavfilter/avf_concat.c b/ffmpeg1/libavfilter/avf_concat.c new file mode 100644 index 0000000..2b3640b --- /dev/null +++ b/ffmpeg1/libavfilter/avf_concat.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * concat audio-video filter + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#define FF_BUFQUEUE_SIZE 256 +#include "bufferqueue.h" +#include "internal.h" +#include "video.h" +#include "audio.h" + +#define TYPE_ALL 2 + +typedef struct { + const AVClass *class; + unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */ + unsigned nb_segments; + unsigned cur_idx; /**< index of the first input of current segment */ + int64_t delta_ts; /**< timestamp to add to produce output timestamps */ + unsigned nb_in_active; /**< number of active inputs in current segment */ + unsigned unsafe; + struct concat_in { + int64_t pts; + int64_t nb_frames; + unsigned eof; + struct FFBufQueue queue; + } *in; +} ConcatContext; + +#define OFFSET(x) offsetof(ConcatContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +#define V AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption concat_options[] = { + { "n", "specify the number of segments", OFFSET(nb_segments), + AV_OPT_TYPE_INT, { .i64 = 2 }, 2, INT_MAX, V|A|F}, + { "v", "specify the number of video streams", + OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]), + AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F }, + { "a", "specify the number of audio streams", + OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]), + AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F}, + { "unsafe", "enable unsafe mode", + OFFSET(unsafe), + AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|A|F}, + { 0 } +}; + +AVFILTER_DEFINE_CLASS(concat); + +static int query_formats(AVFilterContext *ctx) +{ + ConcatContext *cat = ctx->priv; + unsigned type, nb_str, idx0 = 0, idx, str, seg; + AVFilterFormats *formats, *rates = NULL; + AVFilterChannelLayouts *layouts = NULL; + + for (type = 0; type < TYPE_ALL; type++) { + nb_str = cat->nb_streams[type]; + for (str = 0; str < nb_str; str++) { + idx = idx0; + + /* Set the output formats */ + formats = ff_all_formats(type); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &ctx->outputs[idx]->in_formats); + if (type == AVMEDIA_TYPE_AUDIO) { + rates = ff_all_samplerates(); + if (!rates) + return AVERROR(ENOMEM); + ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates); + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts); + } + + /* Set the same formats for each corresponding input */ + for (seg = 0; seg < cat->nb_segments; seg++) { + ff_formats_ref(formats, &ctx->inputs[idx]->out_formats); + if (type == AVMEDIA_TYPE_AUDIO) { + ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates); + ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts); + } + idx += ctx->nb_outputs; + } + + idx0++; + } + } + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ConcatContext *cat = ctx->priv; + unsigned out_no = FF_OUTLINK_IDX(outlink); + unsigned in_no = out_no, seg; + AVFilterLink *inlink = ctx->inputs[in_no]; + + /* enhancement: find a common one */ + outlink->time_base = AV_TIME_BASE_Q; + outlink->w = inlink->w; + outlink->h = inlink->h; + outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; + outlink->format = inlink->format; + for (seg = 1; seg < cat->nb_segments; seg++) { + inlink = ctx->inputs[in_no += ctx->nb_outputs]; + /* possible enhancement: unsafe mode, do not check */ + if (outlink->w != inlink->w || + outlink->h != inlink->h || + outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num || + outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) { + av_log(ctx, AV_LOG_ERROR, "Input link %s parameters " + "(size %dx%d, SAR %d:%d) do not match the corresponding " + "output link %s parameters (%dx%d, SAR %d:%d)\n", + ctx->input_pads[in_no].name, inlink->w, inlink->h, + inlink->sample_aspect_ratio.num, + inlink->sample_aspect_ratio.den, + ctx->input_pads[out_no].name, outlink->w, outlink->h, + outlink->sample_aspect_ratio.num, + outlink->sample_aspect_ratio.den); + if (!cat->unsafe) + return AVERROR(EINVAL); + } + } + + return 0; +} + +static int push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf) +{ + ConcatContext *cat = ctx->priv; + unsigned out_no = in_no % ctx->nb_outputs; + AVFilterLink * inlink = ctx-> inputs[ in_no]; + AVFilterLink *outlink = ctx->outputs[out_no]; + struct concat_in *in = &cat->in[in_no]; + + buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); + in->pts = buf->pts; + in->nb_frames++; + /* add duration to input PTS */ + if (inlink->sample_rate) + /* use number of audio samples */ + in->pts += av_rescale_q(buf->nb_samples, + (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + else if (in->nb_frames >= 2) + /* use mean duration */ + in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1); + + buf->pts += cat->delta_ts; + return ff_filter_frame(outlink, buf); +} + +static int process_frame(AVFilterLink *inlink, AVFrame *buf) +{ + AVFilterContext *ctx = inlink->dst; + ConcatContext *cat = ctx->priv; + unsigned in_no = FF_INLINK_IDX(inlink); + + if (in_no < cat->cur_idx) { + av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n", + ctx->input_pads[in_no].name); + av_frame_free(&buf); + } else if (in_no >= cat->cur_idx + ctx->nb_outputs) { + ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf); + } else { + return push_frame(ctx, in_no, buf); + } + return 0; +} + +static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) +{ + AVFilterContext *ctx = inlink->dst; + unsigned in_no = FF_INLINK_IDX(inlink); + AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; + + return ff_get_video_buffer(outlink, w, h); +} + +static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples) +{ + AVFilterContext *ctx = inlink->dst; + unsigned in_no = FF_INLINK_IDX(inlink); + AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; + + return ff_get_audio_buffer(outlink, nb_samples); +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +{ + return process_frame(inlink, buf); +} + +static void close_input(AVFilterContext *ctx, unsigned in_no) +{ + ConcatContext *cat = ctx->priv; + + cat->in[in_no].eof = 1; + cat->nb_in_active--; + av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n", + ctx->input_pads[in_no].name, cat->nb_in_active); +} + +static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta) +{ + ConcatContext *cat = ctx->priv; + unsigned i = cat->cur_idx; + unsigned imax = i + ctx->nb_outputs; + int64_t pts; + + pts = cat->in[i++].pts; + for (; i < imax; i++) + pts = FFMAX(pts, cat->in[i].pts); + cat->delta_ts += pts; + *seg_delta = pts; +} + +static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no, + int64_t seg_delta) +{ + ConcatContext *cat = ctx->priv; + AVFilterLink *outlink = ctx->outputs[out_no]; + int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta; + int64_t nb_samples, sent = 0; + int frame_nb_samples, ret; + AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate }; + AVFrame *buf; + int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + + if (!rate_tb.den) + return AVERROR_BUG; + nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts, + outlink->time_base, rate_tb); + frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */ + while (nb_samples) { + frame_nb_samples = FFMIN(frame_nb_samples, nb_samples); + buf = ff_get_audio_buffer(outlink, frame_nb_samples); + if (!buf) + return AVERROR(ENOMEM); + av_samples_set_silence(buf->extended_data, 0, frame_nb_samples, + nb_channels, outlink->format); + buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base); + ret = ff_filter_frame(outlink, buf); + if (ret < 0) + return ret; + sent += frame_nb_samples; + nb_samples -= frame_nb_samples; + } + return 0; +} + +static int flush_segment(AVFilterContext *ctx) +{ + int ret; + ConcatContext *cat = ctx->priv; + unsigned str, str_max; + int64_t seg_delta; + + find_next_delta_ts(ctx, &seg_delta); + cat->cur_idx += ctx->nb_outputs; + cat->nb_in_active = ctx->nb_outputs; + av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n", + cat->delta_ts); + + if (cat->cur_idx < ctx->nb_inputs) { + /* pad audio streams with silence */ + str = cat->nb_streams[AVMEDIA_TYPE_VIDEO]; + str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO]; + for (; str < str_max; str++) { + ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str, + seg_delta); + if (ret < 0) + return ret; + } + /* flush queued buffers */ + /* possible enhancement: flush in PTS order */ + str_max = cat->cur_idx + ctx->nb_outputs; + for (str = cat->cur_idx; str < str_max; str++) { + while (cat->in[str].queue.available) { + ret = push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue)); + if (ret < 0) + return ret; + } + } + } + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ConcatContext *cat = ctx->priv; + unsigned out_no = FF_OUTLINK_IDX(outlink); + unsigned in_no = out_no + cat->cur_idx; + unsigned str, str_max; + int ret; + + while (1) { + if (in_no >= ctx->nb_inputs) + return AVERROR_EOF; + if (!cat->in[in_no].eof) { + ret = ff_request_frame(ctx->inputs[in_no]); + if (ret != AVERROR_EOF) + return ret; + close_input(ctx, in_no); + } + /* cycle on all inputs to finish the segment */ + /* possible enhancement: request in PTS order */ + str_max = cat->cur_idx + ctx->nb_outputs - 1; + for (str = cat->cur_idx; cat->nb_in_active; + str = str == str_max ? cat->cur_idx : str + 1) { + if (cat->in[str].eof) + continue; + ret = ff_request_frame(ctx->inputs[str]); + if (ret == AVERROR_EOF) + close_input(ctx, str); + else if (ret < 0) + return ret; + } + ret = flush_segment(ctx); + if (ret < 0) + return ret; + in_no += ctx->nb_outputs; + } +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ConcatContext *cat = ctx->priv; + unsigned seg, type, str; + + /* create input pads */ + for (seg = 0; seg < cat->nb_segments; seg++) { + for (type = 0; type < TYPE_ALL; type++) { + for (str = 0; str < cat->nb_streams[type]; str++) { + AVFilterPad pad = { + .type = type, + .get_video_buffer = get_video_buffer, + .get_audio_buffer = get_audio_buffer, + .filter_frame = filter_frame, + }; + pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str); + ff_insert_inpad(ctx, ctx->nb_inputs, &pad); + } + } + } + /* create output pads */ + for (type = 0; type < TYPE_ALL; type++) { + for (str = 0; str < cat->nb_streams[type]; str++) { + AVFilterPad pad = { + .type = type, + .config_props = config_output, + .request_frame = request_frame, + }; + pad.name = av_asprintf("out:%c%d", "va"[type], str); + ff_insert_outpad(ctx, ctx->nb_outputs, &pad); + } + } + + cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in)); + if (!cat->in) + return AVERROR(ENOMEM); + cat->nb_in_active = ctx->nb_outputs; + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ConcatContext *cat = ctx->priv; + unsigned i; + + for (i = 0; i < ctx->nb_inputs; i++) { + av_freep(&ctx->input_pads[i].name); + ff_bufqueue_discard_all(&cat->in[i].queue); + } + for (i = 0; i < ctx->nb_outputs; i++) + av_freep(&ctx->output_pads[i].name); + av_free(cat->in); +} + +static const char *const shorthand[] = { NULL }; + +AVFilter avfilter_avf_concat = { + .name = "concat", + .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ConcatContext), + .inputs = NULL, + .outputs = NULL, + .priv_class = &concat_class, + .shorthand = shorthand, +}; diff --git a/ffmpeg1/libavfilter/avf_showspectrum.c b/ffmpeg1/libavfilter/avf_showspectrum.c new file mode 100644 index 0000000..364ee6c --- /dev/null +++ b/ffmpeg1/libavfilter/avf_showspectrum.c @@ -0,0 +1,515 @@ +/* + * Copyright (c) 2012 Clément BÅ“sch + * Copyright (c) 2013 Rudolf Polzer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode + * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini). + */ + +#include + +#include "libavcodec/avfft.h" +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "internal.h" + +enum DisplayMode { COMBINED, SEPARATE, NB_MODES }; +enum DisplayScale { LINEAR, SQRT, CBRT, LOG, NB_SCALES }; +enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES }; + +typedef struct { + const AVClass *class; + int w, h; + AVFrame *outpicref; + int req_fullfilled; + int nb_display_channels; + int channel_height; + int sliding; ///< 1 if sliding mode, 0 otherwise + enum DisplayMode mode; ///< channel display mode + enum ColorMode color_mode; ///< display color scheme + enum DisplayScale scale; + float saturation; ///< color saturation multiplier + int xpos; ///< x position (current column) + RDFTContext *rdft; ///< Real Discrete Fourier Transform context + int rdft_bits; ///< number of bits (RDFT window size = 1<priv; + int err; + + showspectrum->class = &showspectrum_class; + av_opt_set_defaults(showspectrum); + + if ((err = av_set_options_string(showspectrum, args, "=", ":")) < 0) + return err; + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ShowSpectrumContext *showspectrum = ctx->priv; + int i; + + av_freep(&showspectrum->combine_buffer); + av_rdft_end(showspectrum->rdft); + for (i = 0; i < showspectrum->nb_display_channels; i++) + av_freep(&showspectrum->rdft_data[i]); + av_freep(&showspectrum->rdft_data); + av_freep(&showspectrum->window_func_lut); + av_frame_free(&showspectrum->outpicref); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE }; + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE }; + + /* set input audio formats */ + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_formats); + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_samplerates); + + /* set output video format */ + formats = ff_make_format_list(pix_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_formats); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + ShowSpectrumContext *showspectrum = ctx->priv; + int i, rdft_bits, win_size, h; + + outlink->w = showspectrum->w; + outlink->h = showspectrum->h; + + h = (showspectrum->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels; + showspectrum->channel_height = h; + + /* RDFT window size (precision) according to the requested output frame height */ + for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++); + win_size = 1 << rdft_bits; + + /* (re-)configuration if the video output changed (or first init) */ + if (rdft_bits != showspectrum->rdft_bits) { + size_t rdft_size, rdft_listsize; + AVFrame *outpicref; + + av_rdft_end(showspectrum->rdft); + showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C); + showspectrum->rdft_bits = rdft_bits; + + /* RDFT buffers: x2 for each (display) channel buffer. + * Note: we use free and malloc instead of a realloc-like function to + * make sure the buffer is aligned in memory for the FFT functions. */ + for (i = 0; i < showspectrum->nb_display_channels; i++) + av_freep(&showspectrum->rdft_data[i]); + av_freep(&showspectrum->rdft_data); + showspectrum->nb_display_channels = inlink->channels; + + if (av_size_mult(sizeof(*showspectrum->rdft_data), + showspectrum->nb_display_channels, &rdft_listsize) < 0) + return AVERROR(EINVAL); + if (av_size_mult(sizeof(**showspectrum->rdft_data), + win_size, &rdft_size) < 0) + return AVERROR(EINVAL); + showspectrum->rdft_data = av_malloc(rdft_listsize); + if (!showspectrum->rdft_data) + return AVERROR(ENOMEM); + for (i = 0; i < showspectrum->nb_display_channels; i++) { + showspectrum->rdft_data[i] = av_malloc(rdft_size); + if (!showspectrum->rdft_data[i]) + return AVERROR(ENOMEM); + } + showspectrum->filled = 0; + + /* pre-calc windowing function (hann here) */ + showspectrum->window_func_lut = + av_realloc_f(showspectrum->window_func_lut, win_size, + sizeof(*showspectrum->window_func_lut)); + if (!showspectrum->window_func_lut) + return AVERROR(ENOMEM); + for (i = 0; i < win_size; i++) + showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1))); + + /* prepare the initial picref buffer (black frame) */ + av_frame_free(&showspectrum->outpicref); + showspectrum->outpicref = outpicref = + ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!outpicref) + return AVERROR(ENOMEM); + outlink->sample_aspect_ratio = (AVRational){1,1}; + memset(outpicref->data[0], 0, outlink->h * outpicref->linesize[0]); + memset(outpicref->data[1], 128, outlink->h * outpicref->linesize[1]); + memset(outpicref->data[2], 128, outlink->h * outpicref->linesize[2]); + } + + if (showspectrum->xpos >= outlink->w) + showspectrum->xpos = 0; + + showspectrum->combine_buffer = + av_realloc_f(showspectrum->combine_buffer, outlink->h * 3, + sizeof(*showspectrum->combine_buffer)); + + av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n", + showspectrum->w, showspectrum->h, win_size); + return 0; +} + +inline static int push_frame(AVFilterLink *outlink) +{ + ShowSpectrumContext *showspectrum = outlink->src->priv; + + showspectrum->xpos++; + if (showspectrum->xpos >= outlink->w) + showspectrum->xpos = 0; + showspectrum->filled = 0; + showspectrum->req_fullfilled = 1; + + return ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref)); +} + +static int request_frame(AVFilterLink *outlink) +{ + ShowSpectrumContext *showspectrum = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + showspectrum->req_fullfilled = 0; + do { + ret = ff_request_frame(inlink); + } while (!showspectrum->req_fullfilled && ret >= 0); + + if (ret == AVERROR_EOF && showspectrum->outpicref) + push_frame(outlink); + return ret; +} + +static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples) +{ + int ret; + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + ShowSpectrumContext *showspectrum = ctx->priv; + AVFrame *outpicref = showspectrum->outpicref; + + /* nb_freq contains the power of two superior or equal to the output image + * height (or half the RDFT window size) */ + const int nb_freq = 1 << (showspectrum->rdft_bits - 1); + const int win_size = nb_freq << 1; + const double w = 1. / (sqrt(nb_freq) * 32768.); + + int ch, plane, n, y; + const int start = showspectrum->filled; + const int add_samples = FFMIN(win_size - start, nb_samples); + + /* fill RDFT input with the number of samples available */ + for (ch = 0; ch < showspectrum->nb_display_channels; ch++) { + const int16_t *p = (int16_t *)insamples->extended_data[ch]; + + p += showspectrum->consumed; + for (n = 0; n < add_samples; n++) + showspectrum->rdft_data[ch][start + n] = p[n] * showspectrum->window_func_lut[start + n]; + } + showspectrum->filled += add_samples; + + /* complete RDFT window size? */ + if (showspectrum->filled == win_size) { + + /* channel height */ + int h = showspectrum->channel_height; + + /* run RDFT on each samples set */ + for (ch = 0; ch < showspectrum->nb_display_channels; ch++) + av_rdft_calc(showspectrum->rdft, showspectrum->rdft_data[ch]); + + /* fill a new spectrum column */ +#define RE(y, ch) showspectrum->rdft_data[ch][2 * y + 0] +#define IM(y, ch) showspectrum->rdft_data[ch][2 * y + 1] +#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch)) + + /* initialize buffer for combining to black */ + for (y = 0; y < outlink->h; y++) { + showspectrum->combine_buffer[3 * y ] = 0; + showspectrum->combine_buffer[3 * y + 1] = 127.5; + showspectrum->combine_buffer[3 * y + 2] = 127.5; + } + + for (ch = 0; ch < showspectrum->nb_display_channels; ch++) { + float yf, uf, vf; + + /* decide color range */ + switch (showspectrum->mode) { + case COMBINED: + // reduce range by channel count + yf = 256.0f / showspectrum->nb_display_channels; + switch (showspectrum->color_mode) { + case INTENSITY: + uf = yf; + vf = yf; + break; + case CHANNEL: + /* adjust saturation for mixed UV coloring */ + /* this factor is correct for infinite channels, an approximation otherwise */ + uf = yf * M_PI; + vf = yf * M_PI; + break; + default: + av_assert0(0); + } + break; + case SEPARATE: + // full range + yf = 256.0f; + uf = 256.0f; + vf = 256.0f; + break; + default: + av_assert0(0); + } + + if (showspectrum->color_mode == CHANNEL) { + if (showspectrum->nb_display_channels > 1) { + uf *= 0.5 * sin((2 * M_PI * ch) / showspectrum->nb_display_channels); + vf *= 0.5 * cos((2 * M_PI * ch) / showspectrum->nb_display_channels); + } else { + uf = 0.0f; + vf = 0.0f; + } + } + uf *= showspectrum->saturation; + vf *= showspectrum->saturation; + + /* draw the channel */ + for (y = 0; y < h; y++) { + int row = (showspectrum->mode == COMBINED) ? y : ch * h + y; + float *out = &showspectrum->combine_buffer[3 * row]; + + /* get magnitude */ + float a = w * MAGNITUDE(y, ch); + + /* apply scale */ + switch (showspectrum->scale) { + case LINEAR: + break; + case SQRT: + a = sqrt(a); + break; + case CBRT: + a = cbrt(a); + break; + case LOG: + a = 1 - log(FFMAX(FFMIN(1, a), 1e-6)) / log(1e-6); // zero = -120dBFS + break; + default: + av_assert0(0); + } + + if (showspectrum->color_mode == INTENSITY) { + float y, u, v; + int i; + + for (i = 1; i < sizeof(intensity_color_table) / sizeof(*intensity_color_table) - 1; i++) + if (intensity_color_table[i].a >= a) + break; + // i now is the first item >= the color + // now we know to interpolate between item i - 1 and i + if (a <= intensity_color_table[i - 1].a) { + y = intensity_color_table[i - 1].y; + u = intensity_color_table[i - 1].u; + v = intensity_color_table[i - 1].v; + } else if (a >= intensity_color_table[i].a) { + y = intensity_color_table[i].y; + u = intensity_color_table[i].u; + v = intensity_color_table[i].v; + } else { + float start = intensity_color_table[i - 1].a; + float end = intensity_color_table[i].a; + float lerpfrac = (a - start) / (end - start); + y = intensity_color_table[i - 1].y * (1.0f - lerpfrac) + + intensity_color_table[i].y * lerpfrac; + u = intensity_color_table[i - 1].u * (1.0f - lerpfrac) + + intensity_color_table[i].u * lerpfrac; + v = intensity_color_table[i - 1].v * (1.0f - lerpfrac) + + intensity_color_table[i].v * lerpfrac; + } + + out[0] += y * yf; + out[1] += u * uf; + out[2] += v * vf; + } else { + out[0] += a * yf; + out[1] += a * uf; + out[2] += a * vf; + } + } + } + + /* copy to output */ + if (showspectrum->sliding) { + for (plane = 0; plane < 3; plane++) { + for (y = 0; y < outlink->h; y++) { + uint8_t *p = outpicref->data[plane] + + y * outpicref->linesize[plane]; + memmove(p, p + 1, outlink->w - 1); + } + } + showspectrum->xpos = outlink->w - 1; + } + for (plane = 0; plane < 3; plane++) { + uint8_t *p = outpicref->data[plane] + + (outlink->h - 1) * outpicref->linesize[plane] + + showspectrum->xpos; + for (y = 0; y < outlink->h; y++) { + *p = rint(FFMAX(0, FFMIN(showspectrum->combine_buffer[3 * y + plane], 255))); + p -= outpicref->linesize[plane]; + } + } + + outpicref->pts = insamples->pts + + av_rescale_q(showspectrum->consumed, + (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + ret = push_frame(outlink); + if (ret < 0) + return ret; + } + + return add_samples; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterContext *ctx = inlink->dst; + ShowSpectrumContext *showspectrum = ctx->priv; + int ret = 0, left_samples = insamples->nb_samples; + + showspectrum->consumed = 0; + while (left_samples) { + int ret = plot_spectrum_column(inlink, insamples, left_samples); + if (ret < 0) + break; + showspectrum->consumed += ret; + left_samples -= ret; + } + + av_frame_free(&insamples); + return ret; +} + +static const AVFilterPad showspectrum_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad showspectrum_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_avf_showspectrum = { + .name = "showspectrum", + .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ShowSpectrumContext), + .inputs = showspectrum_inputs, + .outputs = showspectrum_outputs, + .priv_class = &showspectrum_class, +}; diff --git a/ffmpeg1/libavfilter/avf_showwaves.c b/ffmpeg1/libavfilter/avf_showwaves.c new file mode 100644 index 0000000..095fc57 --- /dev/null +++ b/ffmpeg1/libavfilter/avf_showwaves.c @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio to video multimedia filter + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "avfilter.h" +#include "formats.h" +#include "audio.h" +#include "video.h" +#include "internal.h" + +enum ShowWavesMode { + MODE_POINT, + MODE_LINE, + MODE_NB, +}; + +typedef struct { + const AVClass *class; + int w, h; + AVRational rate; + int buf_idx; + AVFrame *outpicref; + int req_fullfilled; + int n; + int sample_count_mod; + enum ShowWavesMode mode; +} ShowWavesContext; + +#define OFFSET(x) offsetof(ShowWavesContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption showwaves_options[] = { + { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS }, + { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, + { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS }, + + {"mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"}, + {"point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"}, + {"line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"}, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(showwaves); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ShowWavesContext *showwaves = ctx->priv; + int err; + + showwaves->class = &showwaves_class; + av_opt_set_defaults(showwaves); + showwaves->buf_idx = 0; + + if ((err = av_set_options_string(showwaves, args, "=", ":")) < 0) + return err; + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ShowWavesContext *showwaves = ctx->priv; + + av_frame_free(&showwaves->outpicref); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; + + /* set input audio formats */ + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_formats); + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_samplerates); + + /* set output video format */ + formats = ff_make_format_list(pix_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_formats); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + ShowWavesContext *showwaves = ctx->priv; + + if (!showwaves->n) + showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5); + + outlink->w = showwaves->w; + outlink->h = showwaves->h; + outlink->sample_aspect_ratio = (AVRational){1,1}; + + outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n}, + (AVRational){showwaves->w,1}); + + av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n", + showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n); + return 0; +} + +inline static int push_frame(AVFilterLink *outlink) +{ + ShowWavesContext *showwaves = outlink->src->priv; + int ret; + + if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0) + showwaves->req_fullfilled = 1; + showwaves->outpicref = NULL; + showwaves->buf_idx = 0; + return ret; +} + +static int request_frame(AVFilterLink *outlink) +{ + ShowWavesContext *showwaves = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + showwaves->req_fullfilled = 0; + do { + ret = ff_request_frame(inlink); + } while (!showwaves->req_fullfilled && ret >= 0); + + if (ret == AVERROR_EOF && showwaves->outpicref) + push_frame(outlink); + return ret; +} + +#define MAX_INT16 ((1<<15) -1) + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + ShowWavesContext *showwaves = ctx->priv; + const int nb_samples = insamples->nb_samples; + AVFrame *outpicref = showwaves->outpicref; + int linesize = outpicref ? outpicref->linesize[0] : 0; + int16_t *p = (int16_t *)insamples->data[0]; + int nb_channels = av_get_channel_layout_nb_channels(insamples->channel_layout); + int i, j, k, h, ret = 0; + const int n = showwaves->n; + const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ + + /* draw data in the buffer */ + for (i = 0; i < nb_samples; i++) { + if (!showwaves->outpicref) { + showwaves->outpicref = outpicref = + ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!outpicref) + return AVERROR(ENOMEM); + outpicref->width = outlink->w; + outpicref->height = outlink->h; + outpicref->pts = insamples->pts + + av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, + (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + linesize = outpicref->linesize[0]; + memset(outpicref->data[0], 0, showwaves->h*linesize); + } + for (j = 0; j < nb_channels; j++) { + h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16); + switch (showwaves->mode) { + case MODE_POINT: + if (h >= 0 && h < outlink->h) + *(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x; + break; + + case MODE_LINE: + { + int start = showwaves->h/2, end = av_clip(h, 0, outlink->h-1); + if (start > end) FFSWAP(int16_t, start, end); + for (k = start; k < end; k++) + *(outpicref->data[0] + showwaves->buf_idx + k * linesize) += x; + break; + } + } + } + + showwaves->sample_count_mod++; + if (showwaves->sample_count_mod == n) { + showwaves->sample_count_mod = 0; + showwaves->buf_idx++; + } + if (showwaves->buf_idx == showwaves->w) + if ((ret = push_frame(outlink)) < 0) + break; + outpicref = showwaves->outpicref; + } + + av_frame_free(&insamples); + return ret; +} + +static const AVFilterPad showwaves_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad showwaves_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_avf_showwaves = { + .name = "showwaves", + .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ShowWavesContext), + .inputs = showwaves_inputs, + .outputs = showwaves_outputs, + .priv_class = &showwaves_class, +}; diff --git a/ffmpeg1/libavfilter/avfilter.c b/ffmpeg1/libavfilter/avfilter.c new file mode 100644 index 0000000..8a907dc --- /dev/null +++ b/ffmpeg1/libavfilter/avfilter.c @@ -0,0 +1,777 @@ +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/imgutils.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "libavutil/rational.h" +#include "libavutil/samplefmt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "audio.h" + +static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame); + +void ff_tlog_ref(void *ctx, AVFrame *ref, int end) +{ + av_unused char buf[16]; + ff_tlog(ctx, + "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, + ref, ref->buf, ref->data[0], + ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], + ref->pts, av_frame_get_pkt_pos(ref)); + + if (ref->width) { + ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", + ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den, + ref->width, ref->height, + !ref->interlaced_frame ? 'P' : /* Progressive */ + ref->top_field_first ? 'T' : 'B', /* Top / Bottom */ + ref->key_frame, + av_get_picture_type_char(ref->pict_type)); + } + if (ref->nb_samples) { + ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d", + ref->channel_layout, + ref->nb_samples, + ref->sample_rate); + } + + ff_tlog(ctx, "]%s", end ? "\n" : ""); +} + +unsigned avfilter_version(void) { + av_assert0(LIBAVFILTER_VERSION_MICRO >= 100); + return LIBAVFILTER_VERSION_INT; +} + +const char *avfilter_configuration(void) +{ + return FFMPEG_CONFIGURATION; +} + +const char *avfilter_license(void) +{ +#define LICENSE_PREFIX "libavfilter license: " + return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; +} + +void ff_command_queue_pop(AVFilterContext *filter) +{ + AVFilterCommand *c= filter->command_queue; + av_freep(&c->arg); + av_freep(&c->command); + filter->command_queue= c->next; + av_free(c); +} + +void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, + AVFilterPad **pads, AVFilterLink ***links, + AVFilterPad *newpad) +{ + unsigned i; + + idx = FFMIN(idx, *count); + + *pads = av_realloc(*pads, sizeof(AVFilterPad) * (*count + 1)); + *links = av_realloc(*links, sizeof(AVFilterLink*) * (*count + 1)); + memmove(*pads +idx+1, *pads +idx, sizeof(AVFilterPad) * (*count-idx)); + memmove(*links+idx+1, *links+idx, sizeof(AVFilterLink*) * (*count-idx)); + memcpy(*pads+idx, newpad, sizeof(AVFilterPad)); + (*links)[idx] = NULL; + + (*count)++; + for (i = idx+1; i < *count; i++) + if (*links[i]) + (*(unsigned *)((uint8_t *) *links[i] + padidx_off))++; +} + +int avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad) +{ + AVFilterLink *link; + + if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad || + src->outputs[srcpad] || dst->inputs[dstpad]) + return -1; + + if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) { + av_log(src, AV_LOG_ERROR, + "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n", + src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"), + dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?")); + return AVERROR(EINVAL); + } + + src->outputs[srcpad] = + dst-> inputs[dstpad] = link = av_mallocz(sizeof(AVFilterLink)); + + link->src = src; + link->dst = dst; + link->srcpad = &src->output_pads[srcpad]; + link->dstpad = &dst->input_pads[dstpad]; + link->type = src->output_pads[srcpad].type; + av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1); + link->format = -1; + + return 0; +} + +void avfilter_link_free(AVFilterLink **link) +{ + if (!*link) + return; + + av_frame_free(&(*link)->partial_buf); + + av_freep(link); +} + +int avfilter_link_get_channels(AVFilterLink *link) +{ + return link->channels; +} + +void avfilter_link_set_closed(AVFilterLink *link, int closed) +{ + link->closed = closed; +} + +int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx) +{ + int ret; + unsigned dstpad_idx = link->dstpad - link->dst->input_pads; + + av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' " + "between the filter '%s' and the filter '%s'\n", + filt->name, link->src->name, link->dst->name); + + link->dst->inputs[dstpad_idx] = NULL; + if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) { + /* failed to link output filter to new filter */ + link->dst->inputs[dstpad_idx] = link; + return ret; + } + + /* re-hookup the link to the new destination filter we inserted */ + link->dst = filt; + link->dstpad = &filt->input_pads[filt_srcpad_idx]; + filt->inputs[filt_srcpad_idx] = link; + + /* if any information on supported media formats already exists on the + * link, we need to preserve that */ + if (link->out_formats) + ff_formats_changeref(&link->out_formats, + &filt->outputs[filt_dstpad_idx]->out_formats); + + if (link->out_samplerates) + ff_formats_changeref(&link->out_samplerates, + &filt->outputs[filt_dstpad_idx]->out_samplerates); + if (link->out_channel_layouts) + ff_channel_layouts_changeref(&link->out_channel_layouts, + &filt->outputs[filt_dstpad_idx]->out_channel_layouts); + + return 0; +} + +int avfilter_config_links(AVFilterContext *filter) +{ + int (*config_link)(AVFilterLink *); + unsigned i; + int ret; + + for (i = 0; i < filter->nb_inputs; i ++) { + AVFilterLink *link = filter->inputs[i]; + AVFilterLink *inlink; + + if (!link) continue; + + inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL; + link->current_pts = AV_NOPTS_VALUE; + + switch (link->init_state) { + case AVLINK_INIT: + continue; + case AVLINK_STARTINIT: + av_log(filter, AV_LOG_INFO, "circular filter chain detected\n"); + return 0; + case AVLINK_UNINIT: + link->init_state = AVLINK_STARTINIT; + + if ((ret = avfilter_config_links(link->src)) < 0) + return ret; + + if (!(config_link = link->srcpad->config_props)) { + if (link->src->nb_inputs != 1) { + av_log(link->src, AV_LOG_ERROR, "Source filters and filters " + "with more than one input " + "must set config_props() " + "callbacks on all outputs\n"); + return AVERROR(EINVAL); + } + } else if ((ret = config_link(link)) < 0) { + av_log(link->src, AV_LOG_ERROR, + "Failed to configure output pad on %s\n", + link->src->name); + return ret; + } + + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + if (!link->time_base.num && !link->time_base.den) + link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q; + + if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den) + link->sample_aspect_ratio = inlink ? + inlink->sample_aspect_ratio : (AVRational){1,1}; + + if (inlink && !link->frame_rate.num && !link->frame_rate.den) + link->frame_rate = inlink->frame_rate; + + if (inlink) { + if (!link->w) + link->w = inlink->w; + if (!link->h) + link->h = inlink->h; + } else if (!link->w || !link->h) { + av_log(link->src, AV_LOG_ERROR, + "Video source filters must set their output link's " + "width and height\n"); + return AVERROR(EINVAL); + } + break; + + case AVMEDIA_TYPE_AUDIO: + if (inlink) { + if (!link->time_base.num && !link->time_base.den) + link->time_base = inlink->time_base; + } + + if (!link->time_base.num && !link->time_base.den) + link->time_base = (AVRational) {1, link->sample_rate}; + } + + if ((config_link = link->dstpad->config_props)) + if ((ret = config_link(link)) < 0) { + av_log(link->src, AV_LOG_ERROR, + "Failed to configure input pad on %s\n", + link->dst->name); + return ret; + } + + link->init_state = AVLINK_INIT; + } + } + + return 0; +} + +void ff_tlog_link(void *ctx, AVFilterLink *link, int end) +{ + if (link->type == AVMEDIA_TYPE_VIDEO) { + ff_tlog(ctx, + "link[%p s:%dx%d fmt:%s %s->%s]%s", + link, link->w, link->h, + av_get_pix_fmt_name(link->format), + link->src ? link->src->filter->name : "", + link->dst ? link->dst->filter->name : "", + end ? "\n" : ""); + } else { + char buf[128]; + av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout); + + ff_tlog(ctx, + "link[%p r:%d cl:%s fmt:%s %s->%s]%s", + link, (int)link->sample_rate, buf, + av_get_sample_fmt_name(link->format), + link->src ? link->src->filter->name : "", + link->dst ? link->dst->filter->name : "", + end ? "\n" : ""); + } +} + +int ff_request_frame(AVFilterLink *link) +{ + int ret = -1; + FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1); + + if (link->closed) + return AVERROR_EOF; + if (link->srcpad->request_frame) + ret = link->srcpad->request_frame(link); + else if (link->src->inputs[0]) + ret = ff_request_frame(link->src->inputs[0]); + if (ret == AVERROR_EOF && link->partial_buf) { + AVFrame *pbuf = link->partial_buf; + link->partial_buf = NULL; + ret = ff_filter_frame_framed(link, pbuf); + } + if (ret == AVERROR_EOF) + link->closed = 1; + return ret; +} + +int ff_poll_frame(AVFilterLink *link) +{ + int i, min = INT_MAX; + + if (link->srcpad->poll_frame) + return link->srcpad->poll_frame(link); + + for (i = 0; i < link->src->nb_inputs; i++) { + int val; + if (!link->src->inputs[i]) + return -1; + val = ff_poll_frame(link->src->inputs[i]); + min = FFMIN(min, val); + } + + return min; +} + +void ff_update_link_current_pts(AVFilterLink *link, int64_t pts) +{ + if (pts == AV_NOPTS_VALUE) + return; + link->current_pts = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q); + /* TODO use duration */ + if (link->graph && link->age_index >= 0) + ff_avfilter_graph_update_heap(link->graph, link); +} + +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags) +{ + if(!strcmp(cmd, "ping")){ + av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name); + return 0; + }else if(filter->filter->process_command) { + return filter->filter->process_command(filter, cmd, arg, res, res_len, flags); + } + return AVERROR(ENOSYS); +} + +#define MAX_REGISTERED_AVFILTERS_NB 256 + +static AVFilter *registered_avfilters[MAX_REGISTERED_AVFILTERS_NB + 1]; + +static int next_registered_avfilter_idx = 0; + +AVFilter *avfilter_get_by_name(const char *name) +{ + int i; + + for (i = 0; registered_avfilters[i]; i++) + if (!strcmp(registered_avfilters[i]->name, name)) + return registered_avfilters[i]; + + return NULL; +} + +int avfilter_register(AVFilter *filter) +{ + int i; + + if (next_registered_avfilter_idx == MAX_REGISTERED_AVFILTERS_NB) { + av_log(NULL, AV_LOG_ERROR, + "Maximum number of registered filters %d reached, " + "impossible to register filter with name '%s'\n", + MAX_REGISTERED_AVFILTERS_NB, filter->name); + return AVERROR(ENOMEM); + } + + for(i=0; filter->inputs && filter->inputs[i].name; i++) { + const AVFilterPad *input = &filter->inputs[i]; + av_assert0( !input->filter_frame + || (!input->start_frame && !input->end_frame)); + } + + registered_avfilters[next_registered_avfilter_idx++] = filter; + return 0; +} + +AVFilter **av_filter_next(AVFilter **filter) +{ + return filter ? ++filter : ®istered_avfilters[0]; +} + +void avfilter_uninit(void) +{ + memset(registered_avfilters, 0, sizeof(registered_avfilters)); + next_registered_avfilter_idx = 0; +} + +static int pad_count(const AVFilterPad *pads) +{ + int count; + + if (!pads) + return 0; + + for(count = 0; pads->name; count ++) pads ++; + return count; +} + +static const char *default_filter_name(void *filter_ctx) +{ + AVFilterContext *ctx = filter_ctx; + return ctx->name ? ctx->name : ctx->filter->name; +} + +static void *filter_child_next(void *obj, void *prev) +{ + AVFilterContext *ctx = obj; + if (!prev && ctx->filter && ctx->filter->priv_class) + return ctx->priv; + return NULL; +} + +static const AVClass *filter_child_class_next(const AVClass *prev) +{ + AVFilter **filter_ptr = NULL; + + /* find the filter that corresponds to prev */ + while (prev && *(filter_ptr = av_filter_next(filter_ptr))) + if ((*filter_ptr)->priv_class == prev) + break; + + /* could not find filter corresponding to prev */ + if (prev && !(*filter_ptr)) + return NULL; + + /* find next filter with specific options */ + while (*(filter_ptr = av_filter_next(filter_ptr))) + if ((*filter_ptr)->priv_class) + return (*filter_ptr)->priv_class; + return NULL; +} + +static const AVClass avfilter_class = { + .class_name = "AVFilter", + .item_name = default_filter_name, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, + .child_next = filter_child_next, + .child_class_next = filter_child_class_next, +}; + +const AVClass *avfilter_get_class(void) +{ + return &avfilter_class; +} + +int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name) +{ + AVFilterContext *ret; + *filter_ctx = NULL; + + if (!filter) + return AVERROR(EINVAL); + + ret = av_mallocz(sizeof(AVFilterContext)); + if (!ret) + return AVERROR(ENOMEM); + + ret->av_class = &avfilter_class; + ret->filter = filter; + ret->name = inst_name ? av_strdup(inst_name) : NULL; + if (filter->priv_size) { + ret->priv = av_mallocz(filter->priv_size); + if (!ret->priv) + goto err; + } + + ret->nb_inputs = pad_count(filter->inputs); + if (ret->nb_inputs ) { + ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs); + if (!ret->input_pads) + goto err; + memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs); + ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs); + if (!ret->inputs) + goto err; + } + + ret->nb_outputs = pad_count(filter->outputs); + if (ret->nb_outputs) { + ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs); + if (!ret->output_pads) + goto err; + memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs); + ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs); + if (!ret->outputs) + goto err; + } +#if FF_API_FOO_COUNT + ret->output_count = ret->nb_outputs; + ret->input_count = ret->nb_inputs; +#endif + + *filter_ctx = ret; + return 0; + +err: + av_freep(&ret->inputs); + av_freep(&ret->input_pads); + ret->nb_inputs = 0; + av_freep(&ret->outputs); + av_freep(&ret->output_pads); + ret->nb_outputs = 0; + av_freep(&ret->priv); + av_free(ret); + return AVERROR(ENOMEM); +} + +void avfilter_free(AVFilterContext *filter) +{ + int i; + AVFilterLink *link; + + if (!filter) + return; + + if (filter->filter->uninit) + filter->filter->uninit(filter); + if (filter->filter->shorthand) + av_opt_free(filter->priv); + + for (i = 0; i < filter->nb_inputs; i++) { + if ((link = filter->inputs[i])) { + if (link->src) + link->src->outputs[link->srcpad - link->src->output_pads] = NULL; + ff_formats_unref(&link->in_formats); + ff_formats_unref(&link->out_formats); + ff_formats_unref(&link->in_samplerates); + ff_formats_unref(&link->out_samplerates); + ff_channel_layouts_unref(&link->in_channel_layouts); + ff_channel_layouts_unref(&link->out_channel_layouts); + } + avfilter_link_free(&link); + } + for (i = 0; i < filter->nb_outputs; i++) { + if ((link = filter->outputs[i])) { + if (link->dst) + link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL; + ff_formats_unref(&link->in_formats); + ff_formats_unref(&link->out_formats); + ff_formats_unref(&link->in_samplerates); + ff_formats_unref(&link->out_samplerates); + ff_channel_layouts_unref(&link->in_channel_layouts); + ff_channel_layouts_unref(&link->out_channel_layouts); + } + avfilter_link_free(&link); + } + + av_freep(&filter->name); + av_freep(&filter->input_pads); + av_freep(&filter->output_pads); + av_freep(&filter->inputs); + av_freep(&filter->outputs); + av_freep(&filter->priv); + while(filter->command_queue){ + ff_command_queue_pop(filter); + } + av_free(filter); +} + +int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque) +{ + int ret=0; + + if (filter->filter->shorthand) { + av_assert0(filter->priv); + av_assert0(filter->filter->priv_class); + *(const AVClass **)filter->priv = filter->filter->priv_class; + av_opt_set_defaults(filter->priv); + ret = av_opt_set_from_string(filter->priv, args, + filter->filter->shorthand, "=", ":"); + if (ret < 0) + return ret; + args = NULL; + } + if (filter->filter->init_opaque) + ret = filter->filter->init_opaque(filter, args, opaque); + else if (filter->filter->init) + ret = filter->filter->init(filter, args); + return ret; +} + +const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx) +{ + return pads[pad_idx].name; +} + +enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx) +{ + return pads[pad_idx].type; +} + +static int default_filter_frame(AVFilterLink *link, AVFrame *frame) +{ + return ff_filter_frame(link->dst->outputs[0], frame); +} + +static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) +{ + int (*filter_frame)(AVFilterLink *, AVFrame *); + AVFilterPad *dst = link->dstpad; + AVFrame *out; + int ret; + AVFilterCommand *cmd= link->dst->command_queue; + int64_t pts; + + if (link->closed) { + av_frame_free(&frame); + return AVERROR_EOF; + } + + if (!(filter_frame = dst->filter_frame)) + filter_frame = default_filter_frame; + + /* copy the frame if needed */ + if (dst->needs_writable && !av_frame_is_writable(frame)) { + av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); + + /* Maybe use ff_copy_buffer_ref instead? */ + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + out = ff_get_video_buffer(link, link->w, link->h); + break; + case AVMEDIA_TYPE_AUDIO: + out = ff_get_audio_buffer(link, frame->nb_samples); + break; + default: return AVERROR(EINVAL); + } + if (!out) { + av_frame_free(&frame); + return AVERROR(ENOMEM); + } + av_frame_copy_props(out, frame); + + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, + frame->format, frame->width, frame->height); + break; + case AVMEDIA_TYPE_AUDIO: + av_samples_copy(out->extended_data, frame->extended_data, + 0, 0, frame->nb_samples, + av_get_channel_layout_nb_channels(frame->channel_layout), + frame->format); + break; + default: return AVERROR(EINVAL); + } + + av_frame_free(&frame); + } else + out = frame; + + while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){ + av_log(link->dst, AV_LOG_DEBUG, + "Processing command time:%f command:%s arg:%s\n", + cmd->time, cmd->command, cmd->arg); + avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags); + ff_command_queue_pop(link->dst); + cmd= link->dst->command_queue; + } + + pts = out->pts; + ret = filter_frame(link, out); + ff_update_link_current_pts(link, pts); + return ret; +} + +static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame) +{ + int insamples = frame->nb_samples, inpos = 0, nb_samples; + AVFrame *pbuf = link->partial_buf; + int nb_channels = av_frame_get_channels(frame); + int ret = 0; + + /* Handle framing (min_samples, max_samples) */ + while (insamples) { + if (!pbuf) { + AVRational samples_tb = { 1, link->sample_rate }; + pbuf = ff_get_audio_buffer(link, link->partial_buf_size); + if (!pbuf) { + av_log(link->dst, AV_LOG_WARNING, + "Samples dropped due to memory allocation failure.\n"); + return 0; + } + av_frame_copy_props(pbuf, frame); + pbuf->pts = frame->pts + + av_rescale_q(inpos, samples_tb, link->time_base); + pbuf->nb_samples = 0; + } + nb_samples = FFMIN(insamples, + link->partial_buf_size - pbuf->nb_samples); + av_samples_copy(pbuf->extended_data, frame->extended_data, + pbuf->nb_samples, inpos, + nb_samples, nb_channels, link->format); + inpos += nb_samples; + insamples -= nb_samples; + pbuf->nb_samples += nb_samples; + if (pbuf->nb_samples >= link->min_samples) { + ret = ff_filter_frame_framed(link, pbuf); + pbuf = NULL; + } + } + av_frame_free(&frame); + link->partial_buf = pbuf; + return ret; +} + +int ff_filter_frame(AVFilterLink *link, AVFrame *frame) +{ + FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); + + /* Consistency checks */ + if (link->type == AVMEDIA_TYPE_VIDEO) { + if (strcmp(link->dst->filter->name, "scale")) { + av_assert1(frame->format == link->format); + av_assert1(frame->width == link->w); + av_assert1(frame->height == link->h); + } + } else { + av_assert1(frame->format == link->format); + av_assert1(av_frame_get_channels(frame) == link->channels); + av_assert1(frame->channel_layout == link->channel_layout); + av_assert1(frame->sample_rate == link->sample_rate); + } + + /* Go directly to actual filtering if possible */ + if (link->type == AVMEDIA_TYPE_AUDIO && + link->min_samples && + (link->partial_buf || + frame->nb_samples < link->min_samples || + frame->nb_samples > link->max_samples)) { + return ff_filter_frame_needs_framing(link, frame); + } else { + return ff_filter_frame_framed(link, frame); + } +} diff --git a/ffmpeg1/libavfilter/avfilter.h b/ffmpeg1/libavfilter/avfilter.h new file mode 100644 index 0000000..455161f --- /dev/null +++ b/ffmpeg1/libavfilter/avfilter.h @@ -0,0 +1,895 @@ +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * external API header + */ + +/** + * @defgroup lavfi Libavfilter + * @{ + */ + +#include + +#include "libavutil/avutil.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *avfilter_license(void); + +/** + * Get the class for the AVFilterContext struct. + */ +const AVClass *avfilter_get_class(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +#if FF_API_AVFILTERBUFFER +/** + * A reference-counted buffer data type used by the filter system. Filters + * should not store pointers to this structure directly, but instead use the + * AVFilterBufferRef structure below. + */ +typedef struct AVFilterBuffer { + uint8_t *data[8]; ///< buffer data for each plane/channel + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + /** private data to be used by a custom free function */ + void *priv; + /** + * A pointer to the function to deallocate this buffer if the default + * function is not sufficient. This could, for example, add the memory + * back into a memory pool to be reused later without the overhead of + * reallocating it from scratch. + */ + void (*free)(struct AVFilterBuffer *buf); + + int format; ///< media format + int w, h; ///< width and height of the allocated buffer + unsigned refcount; ///< number of references to this buffer +} AVFilterBuffer; + +#define AV_PERM_READ 0x01 ///< can read from the buffer +#define AV_PERM_WRITE 0x02 ///< can write to the buffer +#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer +#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time +#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time +#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes +#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned + +#define AVFILTER_ALIGN 16 //not part of ABI + +/** + * Audio specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, audio specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefAudioProps { + uint64_t channel_layout; ///< channel layout of audio buffer + int nb_samples; ///< number of audio samples per channel + int sample_rate; ///< audio buffer sample rate + int channels; ///< number of channels (do not access directly) +} AVFilterBufferRefAudioProps; + +/** + * Video specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, video specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefVideoProps { + int w; ///< image width + int h; ///< image height + AVRational sample_aspect_ratio; ///< sample aspect ratio + int interlaced; ///< is frame interlaced + int top_field_first; ///< field order + enum AVPictureType pict_type; ///< picture type of the frame + int key_frame; ///< 1 -> keyframe, 0-> not + int qp_table_linesize; ///< qp_table stride + int qp_table_size; ///< qp_table size + int8_t *qp_table; ///< array of Quantization Parameters +} AVFilterBufferRefVideoProps; + +/** + * A reference to an AVFilterBuffer. Since filters can manipulate the origin of + * a buffer to, for example, crop image without any memcpy, the buffer origin + * and dimensions are per-reference properties. Linesize is also useful for + * image flipping, frame to field filters, etc, and so is also per-reference. + * + * TODO: add anything necessary for frame reordering + */ +typedef struct AVFilterBufferRef { + AVFilterBuffer *buf; ///< the buffer that this is a reference to + uint8_t *data[8]; ///< picture/audio data for each plane + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + AVFilterBufferRefVideoProps *video; ///< video buffer specific properties + AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties + + /** + * presentation timestamp. The time unit may change during + * filtering, as it is specified in the link and the filter code + * may need to rescale the PTS accordingly. + */ + int64_t pts; + int64_t pos; ///< byte position in stream, -1 if unknown + + int format; ///< media format + + int perms; ///< permissions, see the AV_PERM_* flags + + enum AVMediaType type; ///< media type of buffer data + + AVDictionary *metadata; ///< dictionary containing metadata key=value tags +} AVFilterBufferRef; + +/** + * Copy properties of src to dst, without copying the actual data + */ +attribute_deprecated +void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); + +/** + * Add a new reference to a buffer. + * + * @param ref an existing reference to the buffer + * @param pmask a bitmask containing the allowable permissions in the new + * reference + * @return a new reference to the buffer with the same properties as the + * old, excluding any permissions denied by pmask + */ +attribute_deprecated +AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); + +/** + * Remove a reference to a buffer. If this is the last reference to the + * buffer, the buffer itself is also automatically freed. + * + * @param ref reference to the buffer, may be NULL + * + * @note it is recommended to use avfilter_unref_bufferp() instead of this + * function + */ +attribute_deprecated +void avfilter_unref_buffer(AVFilterBufferRef *ref); + +/** + * Remove a reference to a buffer and set the pointer to NULL. + * If this is the last reference to the buffer, the buffer itself + * is also automatically freed. + * + * @param ref pointer to the buffer reference + */ +attribute_deprecated +void avfilter_unref_bufferp(AVFilterBufferRef **ref); +#endif + +/** + * Get the number of channels of a buffer reference. + */ +attribute_deprecated +int avfilter_ref_get_channels(AVFilterBufferRef *ref); + +#if FF_API_AVFILTERPAD_PUBLIC +/** + * A filter pad used for either input or output. + * + * See doc/filter_design.txt for details on how to implement the methods. + * + * @warning this struct might be removed from public API. + * users should call avfilter_pad_get_name() and avfilter_pad_get_type() + * to access the name and type fields; there should be no need to access + * any other fields from outside of libavfilter. + */ +struct AVFilterPad { + /** + * Pad name. The name is unique among inputs and among outputs, but an + * input may have the same name as an output. This may be NULL if this + * pad has no need to ever be referenced by name. + */ + const char *name; + + /** + * AVFilterPad type. + */ + enum AVMediaType type; + + /** + * Input pads: + * Minimum required permissions on incoming buffers. Any buffer with + * insufficient permissions will be automatically copied by the filter + * system to a new buffer which provides the needed access permissions. + * + * Output pads: + * Guaranteed permissions on outgoing buffers. Any buffer pushed on the + * link must have at least these permissions; this fact is checked by + * asserts. It can be used to optimize buffer allocation. + */ + attribute_deprecated int min_perms; + + /** + * Input pads: + * Permissions which are not accepted on incoming buffers. Any buffer + * which has any of these permissions set will be automatically copied + * by the filter system to a new buffer which does not have those + * permissions. This can be used to easily disallow buffers with + * AV_PERM_REUSE. + * + * Output pads: + * Permissions which are automatically removed on outgoing buffers. It + * can be used to optimize buffer allocation. + */ + attribute_deprecated int rej_perms; + + /** + * @deprecated unused + */ + int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); + + /** + * Callback function to get a video buffer. If NULL, the filter system will + * use ff_default_get_video_buffer(). + * + * Input video pads only. + */ + AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); + + /** + * Callback function to get an audio buffer. If NULL, the filter system will + * use ff_default_get_audio_buffer(). + * + * Input audio pads only. + */ + AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); + + /** + * @deprecated unused + */ + int (*end_frame)(AVFilterLink *link); + + /** + * @deprecated unused + */ + int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); + + /** + * Filtering callback. This is where a filter receives a frame with + * audio/video data and should do its processing. + * + * Input pads only. + * + * @return >= 0 on success, a negative AVERROR on error. This function + * must ensure that frame is properly unreferenced on error if it + * hasn't been passed on to another filter. + */ + int (*filter_frame)(AVFilterLink *link, AVFrame *frame); + + /** + * Frame poll callback. This returns the number of immediately available + * samples. It should return a positive value if the next request_frame() + * is guaranteed to return one frame (with no delay). + * + * Defaults to just calling the source poll_frame() method. + * + * Output pads only. + */ + int (*poll_frame)(AVFilterLink *link); + + /** + * Frame request callback. A call to this should result in at least one + * frame being output over the given link. This should return zero on + * success, and another value on error. + * See ff_request_frame() for the error codes with a specific + * meaning. + * + * Output pads only. + */ + int (*request_frame)(AVFilterLink *link); + + /** + * Link configuration callback. + * + * For output pads, this should set the following link properties: + * video: width, height, sample_aspect_ratio, time_base + * audio: sample_rate. + * + * This should NOT set properties such as format, channel_layout, etc which + * are negotiated between filters by the filter system using the + * query_formats() callback before this function is called. + * + * For input pads, this should check the properties of the link, and update + * the filter's internal state as necessary. + * + * For both input and output pads, this should return zero on success, + * and another value on error. + */ + int (*config_props)(AVFilterLink *link); + + /** + * The filter expects a fifo to be inserted on its input link, + * typically because it has a delay. + * + * input pads only. + */ + int needs_fifo; + + int needs_writable; +}; +#endif + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx); + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + const char *name; ///< filter name + + /** + * A description for the filter. You should use the + * NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + const AVFilterPad *inputs; ///< NULL terminated list of inputs. NULL if none + const AVFilterPad *outputs; ///< NULL terminated list of outputs. NULL if none + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter initialization function. Args contains the user-supplied + * parameters. FIXME: maybe an AVOption-based system would be better? + */ + int (*init)(AVFilterContext *ctx, const char *args); + + /** + * Filter uninitialization function. Should deallocate any memory held + * by the filter, release any buffer references, etc. This does not need + * to deallocate the AVFilterContext->priv memory itself. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Queries formats/layouts supported by the filter and its pads, and sets + * the in_formats/in_chlayouts for links connected to its output pads, + * and out_formats/out_chlayouts for links connected to its input pads. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, const char *args, void *opaque); + + const AVClass *priv_class; ///< private class, containing filter specific options + + /** + * Shorthand syntax for init arguments. + * If this field is set (even to an empty list), just before init the + * private class will be set and the arguments string will be parsed + * using av_opt_set_from_string() with "=" and ":" delimiters, and + * av_opt_free() will be called just after uninit. + */ + const char *const *shorthand; +} AVFilter; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for av_log() + + AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links +#if FF_API_FOO_COUNT + unsigned input_count; ///< @deprecated use nb_inputs +#endif + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links +#if FF_API_FOO_COUNT + unsigned output_count; ///< @deprecated use nb_outputs +#endif + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterCommand *command_queue; +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + struct AVFilterPool *pool; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown; + * if left to 0/0, will be automatically be copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * The buffer reference currently being received across the link by the + * destination filter. This is used internally by the filter system to + * allow automatic copying of buffers which do not have sufficient + * permissions for the destination. This should not be accessed directly + * by the filters. + */ + AVFilterBufferRef *cur_buf_copy; + + /** + * True if the link is closed. + * If set, all attemps of start_frame, filter_frame or request_frame + * will fail with AVERROR_EOF, and if necessary the reference will be + * destroyed. + * If request_frame returns AVERROR_EOF, this flag is set on the + * corresponding link. + * It can be set also be set by either the source or the destination + * filter. + */ + int closed; + + /** + * Number of channels. + */ + int channels; +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void avfilter_link_free(AVFilterLink **link); + +/** + * Get the number of channels of a link. + */ +int avfilter_link_get_channels(AVFilterLink *link); + +/** + * Set the closed field of a link. + */ +void avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int avfilter_config_links(AVFilterContext *filter); + +#if FF_API_AVFILTERBUFFER +/** + * Create a buffer reference wrapped around an already allocated image + * buffer. + * + * @param data pointers to the planes of the image to reference + * @param linesize linesizes for the planes of the image to reference + * @param perms the required access permissions + * @param w the width of the image specified by the data and linesize arrays + * @param h the height of the image specified by the data and linesize arrays + * @param format the pixel format of the image specified by the data and linesize arrays + */ +attribute_deprecated +AVFilterBufferRef * +avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, + int w, int h, enum AVPixelFormat format); + +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version + * that can handle unknown channel layouts. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channel_layout the channel layout of the buffer + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + uint64_t channel_layout); +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channels the number of channels of the buffer + * @param channel_layout the channel layout of the buffer, + * must be either 0 or consistent with channels + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout); + +#endif + + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use avfilter_graph_send_command(). + */ +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** Initialize the filter system. Register all builtin filters. */ +void avfilter_register_all(void); + +/** Uninitialize the filter system. Unregister all filters. */ +void avfilter_uninit(void); + +/** + * Register a filter. This is only needed if you plan to use + * avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with avfilter_open even if it is not + * registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +int avfilter_register(AVFilter *filter); + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +AVFilter *avfilter_get_by_name(const char *name); + +/** + * If filter is NULL, returns a pointer to the first registered filter pointer, + * if filter is non-NULL, returns the next pointer after filter. + * If the returned pointer points to NULL, the last registered filter + * was already reached. + */ +AVFilter **av_filter_next(AVFilter **filter); + +/** + * Create a filter instance. + * + * @param filter_ctx put here a pointer to the created filter context + * on success, NULL on failure + * @param filter the filter to create an instance of + * @param inst_name Name to give to the new instance. Can be NULL for none. + * @return >= 0 in case of success, a negative error code otherwise + */ +int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name); + +/** + * Initialize a filter. + * + * @param filter the filter to initialize + * @param args A string of parameters to use when initializing the filter. + * The format and meaning of this string varies by filter. + * @param opaque Any extra non-string data needed by the filter. The meaning + * of this parameter varies by filter. + * @return zero on success + */ +int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque); + +/** + * Free a filter context. + * + * @param filter the filter to free + */ +void avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +#if FF_API_AVFILTERBUFFER +/** + * Copy the frame properties of src to dst, without copying the actual + * image data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); + +/** + * Copy the frame properties and data pointers of src to dst, without copying + * the actual data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); +#endif + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/ffmpeg1/libavfilter/avfiltergraph.c b/ffmpeg1/libavfilter/avfiltergraph.c new file mode 100644 index 0000000..89cdda3 --- /dev/null +++ b/ffmpeg1/libavfilter/avfiltergraph.c @@ -0,0 +1,1073 @@ +/* + * filter graphs + * Copyright (c) 2008 Vitor Sessak + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "libavcodec/avcodec.h" // avcodec_find_best_pix_fmt_of_2() +#include "avfilter.h" +#include "avfiltergraph.h" +#include "formats.h" +#include "internal.h" + +#define OFFSET(x) offsetof(AVFilterGraph,x) + +static const AVOption options[]={ +{"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) , AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, 0 }, +{"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) , AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, 0 }, +{0} +}; + + +static const AVClass filtergraph_class = { + .class_name = "AVFilterGraph", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, +}; + +AVFilterGraph *avfilter_graph_alloc(void) +{ + AVFilterGraph *ret = av_mallocz(sizeof(*ret)); + if (!ret) + return NULL; + ret->av_class = &filtergraph_class; + return ret; +} + +void avfilter_graph_free(AVFilterGraph **graph) +{ + if (!*graph) + return; + for (; (*graph)->nb_filters > 0; (*graph)->nb_filters--) + avfilter_free((*graph)->filters[(*graph)->nb_filters - 1]); + av_freep(&(*graph)->sink_links); + av_freep(&(*graph)->scale_sws_opts); + av_freep(&(*graph)->aresample_swr_opts); + av_freep(&(*graph)->resample_lavr_opts); + av_freep(&(*graph)->filters); + av_freep(graph); +} + +int avfilter_graph_add_filter(AVFilterGraph *graph, AVFilterContext *filter) +{ + AVFilterContext **filters = av_realloc(graph->filters, + sizeof(*filters) * (graph->nb_filters + 1)); + if (!filters) + return AVERROR(ENOMEM); + + graph->filters = filters; + graph->filters[graph->nb_filters++] = filter; + + return 0; +} + +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx) +{ + int ret; + + if ((ret = avfilter_open(filt_ctx, filt, name)) < 0) + goto fail; + if ((ret = avfilter_init_filter(*filt_ctx, args, opaque)) < 0) + goto fail; + if ((ret = avfilter_graph_add_filter(graph_ctx, *filt_ctx)) < 0) + goto fail; + return 0; + +fail: + if (*filt_ctx) + avfilter_free(*filt_ctx); + *filt_ctx = NULL; + return ret; +} + +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags) +{ + graph->disable_auto_convert = flags; +} + +/** + * Check for the validity of graph. + * + * A graph is considered valid if all its input and output pads are + * connected. + * + * @return 0 in case of success, a negative value otherwise + */ +static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx) +{ + AVFilterContext *filt; + int i, j; + + for (i = 0; i < graph->nb_filters; i++) { + const AVFilterPad *pad; + filt = graph->filters[i]; + + for (j = 0; j < filt->nb_inputs; j++) { + if (!filt->inputs[j] || !filt->inputs[j]->src) { + pad = &filt->input_pads[j]; + av_log(log_ctx, AV_LOG_ERROR, + "Input pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any source\n", + pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name); + return AVERROR(EINVAL); + } + } + + for (j = 0; j < filt->nb_outputs; j++) { + if (!filt->outputs[j] || !filt->outputs[j]->dst) { + pad = &filt->output_pads[j]; + av_log(log_ctx, AV_LOG_ERROR, + "Output pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any destination\n", + pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name); + return AVERROR(EINVAL); + } + } + } + + return 0; +} + +/** + * Configure all the links of graphctx. + * + * @return 0 in case of success, a negative value otherwise + */ +static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx) +{ + AVFilterContext *filt; + int i, ret; + + for (i = 0; i < graph->nb_filters; i++) { + filt = graph->filters[i]; + + if (!filt->nb_outputs) { + if ((ret = avfilter_config_links(filt))) + return ret; + } + } + + return 0; +} + +AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name) +{ + int i; + + for (i = 0; i < graph->nb_filters; i++) + if (graph->filters[i]->name && !strcmp(name, graph->filters[i]->name)) + return graph->filters[i]; + + return NULL; +} + +static void sanitize_channel_layouts(void *log, AVFilterChannelLayouts *l) +{ + if (!l) + return; + if (l->nb_channel_layouts) { + if (l->all_layouts || l->all_counts) + av_log(log, AV_LOG_WARNING, "All layouts set on non-empty list\n"); + l->all_layouts = l->all_counts = 0; + } else { + if (l->all_counts && !l->all_layouts) + av_log(log, AV_LOG_WARNING, "All counts without all layouts\n"); + l->all_layouts = 1; + } +} + +static int filter_query_formats(AVFilterContext *ctx) +{ + int ret, i; + AVFilterFormats *formats; + AVFilterChannelLayouts *chlayouts; + AVFilterFormats *samplerates; + enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type : + ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type : + AVMEDIA_TYPE_VIDEO; + + if ((ret = ctx->filter->query_formats(ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n", + ctx->name, av_err2str(ret)); + return ret; + } + + for (i = 0; i < ctx->nb_inputs; i++) + sanitize_channel_layouts(ctx, ctx->inputs[i]->out_channel_layouts); + for (i = 0; i < ctx->nb_outputs; i++) + sanitize_channel_layouts(ctx, ctx->outputs[i]->in_channel_layouts); + + formats = ff_all_formats(type); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + if (type == AVMEDIA_TYPE_AUDIO) { + samplerates = ff_all_samplerates(); + if (!samplerates) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, samplerates); + chlayouts = ff_all_channel_layouts(); + if (!chlayouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, chlayouts); + } + return 0; +} + +static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) +{ + int i, j, ret; + int scaler_count = 0, resampler_count = 0; + + for (j = 0; j < 2; j++) { + /* ask all the sub-filters for their supported media formats */ + for (i = 0; i < graph->nb_filters; i++) { + /* Call query_formats on sources first. + This is a temporary workaround for amerge, + until format renegociation is implemented. */ + if (!graph->filters[i]->nb_inputs == j) + continue; + if (graph->filters[i]->filter->query_formats) + ret = filter_query_formats(graph->filters[i]); + else + ret = ff_default_query_formats(graph->filters[i]); + if (ret < 0) + return ret; + } + } + + /* go through and merge as many format lists as possible */ + for (i = 0; i < graph->nb_filters; i++) { + AVFilterContext *filter = graph->filters[i]; + + for (j = 0; j < filter->nb_inputs; j++) { + AVFilterLink *link = filter->inputs[j]; + int convert_needed = 0; + + if (!link) + continue; + + if (link->in_formats != link->out_formats && + !ff_merge_formats(link->in_formats, link->out_formats, + link->type)) + convert_needed = 1; + if (link->type == AVMEDIA_TYPE_AUDIO) { + if (link->in_channel_layouts != link->out_channel_layouts && + !ff_merge_channel_layouts(link->in_channel_layouts, + link->out_channel_layouts)) + convert_needed = 1; + if (link->in_samplerates != link->out_samplerates && + !ff_merge_samplerates(link->in_samplerates, + link->out_samplerates)) + convert_needed = 1; + } + + if (convert_needed) { + AVFilterContext *convert; + AVFilter *filter; + AVFilterLink *inlink, *outlink; + char scale_args[256]; + char inst_name[30]; + + /* couldn't merge format lists. auto-insert conversion filter */ + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + if (!(filter = avfilter_get_by_name("scale"))) { + av_log(log_ctx, AV_LOG_ERROR, "'scale' filter " + "not present, cannot convert pixel formats.\n"); + return AVERROR(EINVAL); + } + + snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d", + scaler_count++); + if (graph->scale_sws_opts) + snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts); + else + snprintf(scale_args, sizeof(scale_args), "0:0"); + + if ((ret = avfilter_graph_create_filter(&convert, filter, + inst_name, scale_args, NULL, + graph)) < 0) + return ret; + break; + case AVMEDIA_TYPE_AUDIO: + if (!(filter = avfilter_get_by_name("aresample"))) { + av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter " + "not present, cannot convert audio formats.\n"); + return AVERROR(EINVAL); + } + + snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d", + resampler_count++); + scale_args[0] = '\0'; + if (graph->aresample_swr_opts) + snprintf(scale_args, sizeof(scale_args), "%s", + graph->aresample_swr_opts); + if ((ret = avfilter_graph_create_filter(&convert, filter, + inst_name, graph->aresample_swr_opts, + NULL, graph)) < 0) + return ret; + break; + default: + return AVERROR(EINVAL); + } + + if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0) + return ret; + + filter_query_formats(convert); + inlink = convert->inputs[0]; + outlink = convert->outputs[0]; + if (!ff_merge_formats( inlink->in_formats, inlink->out_formats, inlink->type) || + !ff_merge_formats(outlink->in_formats, outlink->out_formats, outlink->type)) + ret |= AVERROR(ENOSYS); + if (inlink->type == AVMEDIA_TYPE_AUDIO && + (!ff_merge_samplerates(inlink->in_samplerates, + inlink->out_samplerates) || + !ff_merge_channel_layouts(inlink->in_channel_layouts, + inlink->out_channel_layouts))) + ret |= AVERROR(ENOSYS); + if (outlink->type == AVMEDIA_TYPE_AUDIO && + (!ff_merge_samplerates(outlink->in_samplerates, + outlink->out_samplerates) || + !ff_merge_channel_layouts(outlink->in_channel_layouts, + outlink->out_channel_layouts))) + ret |= AVERROR(ENOSYS); + + if (ret < 0) { + av_log(log_ctx, AV_LOG_ERROR, + "Impossible to convert between the formats supported by the filter " + "'%s' and the filter '%s'\n", link->src->name, link->dst->name); + return ret; + } + } + } + } + + return 0; +} + +static int pick_format(AVFilterLink *link, AVFilterLink *ref) +{ + if (!link || !link->in_formats) + return 0; + + if (link->type == AVMEDIA_TYPE_VIDEO) { + if(ref && ref->type == AVMEDIA_TYPE_VIDEO){ + int has_alpha= av_pix_fmt_desc_get(ref->format)->nb_components % 2 == 0; + enum AVPixelFormat best= AV_PIX_FMT_NONE; + int i; + for (i=0; iin_formats->format_count; i++) { + enum AVPixelFormat p = link->in_formats->formats[i]; + best= avcodec_find_best_pix_fmt_of_2(best, p, ref->format, has_alpha, NULL); + } + av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n", + av_get_pix_fmt_name(best), link->in_formats->format_count, + av_get_pix_fmt_name(ref->format), has_alpha); + link->in_formats->formats[0] = best; + } + } + + link->in_formats->format_count = 1; + link->format = link->in_formats->formats[0]; + + if (link->type == AVMEDIA_TYPE_AUDIO) { + if (!link->in_samplerates->format_count) { + av_log(link->src, AV_LOG_ERROR, "Cannot select sample rate for" + " the link between filters %s and %s.\n", link->src->name, + link->dst->name); + return AVERROR(EINVAL); + } + link->in_samplerates->format_count = 1; + link->sample_rate = link->in_samplerates->formats[0]; + + if (link->in_channel_layouts->all_layouts) { + av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for" + " the link between filters %s and %s.\n", link->src->name, + link->dst->name); + return AVERROR(EINVAL); + } + link->in_channel_layouts->nb_channel_layouts = 1; + link->channel_layout = link->in_channel_layouts->channel_layouts[0]; + if ((link->channels = FF_LAYOUT2COUNT(link->channel_layout))) + link->channel_layout = 0; + else + link->channels = av_get_channel_layout_nb_channels(link->channel_layout); + } + + ff_formats_unref(&link->in_formats); + ff_formats_unref(&link->out_formats); + ff_formats_unref(&link->in_samplerates); + ff_formats_unref(&link->out_samplerates); + ff_channel_layouts_unref(&link->in_channel_layouts); + ff_channel_layouts_unref(&link->out_channel_layouts); + + return 0; +} + +#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \ +do { \ + for (i = 0; i < filter->nb_inputs; i++) { \ + AVFilterLink *link = filter->inputs[i]; \ + fmt_type fmt; \ + \ + if (!link->out_ ## list || link->out_ ## list->nb != 1) \ + continue; \ + fmt = link->out_ ## list->var[0]; \ + \ + for (j = 0; j < filter->nb_outputs; j++) { \ + AVFilterLink *out_link = filter->outputs[j]; \ + list_type *fmts; \ + \ + if (link->type != out_link->type || \ + out_link->in_ ## list->nb == 1) \ + continue; \ + fmts = out_link->in_ ## list; \ + \ + if (!out_link->in_ ## list->nb) { \ + add_format(&out_link->in_ ##list, fmt); \ + break; \ + } \ + \ + for (k = 0; k < out_link->in_ ## list->nb; k++) \ + if (fmts->var[k] == fmt) { \ + fmts->var[0] = fmt; \ + fmts->nb = 1; \ + ret = 1; \ + break; \ + } \ + } \ + } \ +} while (0) + +static int reduce_formats_on_filter(AVFilterContext *filter) +{ + int i, j, k, ret = 0; + + REDUCE_FORMATS(int, AVFilterFormats, formats, formats, + format_count, ff_add_format); + REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats, + format_count, ff_add_format); + + /* reduce channel layouts */ + for (i = 0; i < filter->nb_inputs; i++) { + AVFilterLink *inlink = filter->inputs[i]; + uint64_t fmt; + + if (!inlink->out_channel_layouts || + inlink->out_channel_layouts->nb_channel_layouts != 1) + continue; + fmt = inlink->out_channel_layouts->channel_layouts[0]; + + for (j = 0; j < filter->nb_outputs; j++) { + AVFilterLink *outlink = filter->outputs[j]; + AVFilterChannelLayouts *fmts; + + fmts = outlink->in_channel_layouts; + if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1) + continue; + + if (fmts->all_layouts) { + /* Turn the infinite list into a singleton */ + fmts->all_layouts = fmts->all_counts = 0; + ff_add_channel_layout(&outlink->in_channel_layouts, fmt); + break; + } + + for (k = 0; k < outlink->in_channel_layouts->nb_channel_layouts; k++) { + if (fmts->channel_layouts[k] == fmt) { + fmts->channel_layouts[0] = fmt; + fmts->nb_channel_layouts = 1; + ret = 1; + break; + } + } + } + } + + return ret; +} + +static void reduce_formats(AVFilterGraph *graph) +{ + int i, reduced; + + do { + reduced = 0; + + for (i = 0; i < graph->nb_filters; i++) + reduced |= reduce_formats_on_filter(graph->filters[i]); + } while (reduced); +} + +static void swap_samplerates_on_filter(AVFilterContext *filter) +{ + AVFilterLink *link = NULL; + int sample_rate; + int i, j; + + for (i = 0; i < filter->nb_inputs; i++) { + link = filter->inputs[i]; + + if (link->type == AVMEDIA_TYPE_AUDIO && + link->out_samplerates->format_count == 1) + break; + } + if (i == filter->nb_inputs) + return; + + sample_rate = link->out_samplerates->formats[0]; + + for (i = 0; i < filter->nb_outputs; i++) { + AVFilterLink *outlink = filter->outputs[i]; + int best_idx, best_diff = INT_MAX; + + if (outlink->type != AVMEDIA_TYPE_AUDIO || + outlink->in_samplerates->format_count < 2) + continue; + + for (j = 0; j < outlink->in_samplerates->format_count; j++) { + int diff = abs(sample_rate - outlink->in_samplerates->formats[j]); + + if (diff < best_diff) { + best_diff = diff; + best_idx = j; + } + } + FFSWAP(int, outlink->in_samplerates->formats[0], + outlink->in_samplerates->formats[best_idx]); + } +} + +static void swap_samplerates(AVFilterGraph *graph) +{ + int i; + + for (i = 0; i < graph->nb_filters; i++) + swap_samplerates_on_filter(graph->filters[i]); +} + +#define CH_CENTER_PAIR (AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER) +#define CH_FRONT_PAIR (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT) +#define CH_STEREO_PAIR (AV_CH_STEREO_LEFT | AV_CH_STEREO_RIGHT) +#define CH_WIDE_PAIR (AV_CH_WIDE_LEFT | AV_CH_WIDE_RIGHT) +#define CH_SIDE_PAIR (AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT) +#define CH_DIRECT_PAIR (AV_CH_SURROUND_DIRECT_LEFT | AV_CH_SURROUND_DIRECT_RIGHT) +#define CH_BACK_PAIR (AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT) + +/* allowable substitutions for channel pairs when comparing layouts, + * ordered by priority for both values */ +static const uint64_t ch_subst[][2] = { + { CH_FRONT_PAIR, CH_CENTER_PAIR }, + { CH_FRONT_PAIR, CH_WIDE_PAIR }, + { CH_FRONT_PAIR, AV_CH_FRONT_CENTER }, + { CH_CENTER_PAIR, CH_FRONT_PAIR }, + { CH_CENTER_PAIR, CH_WIDE_PAIR }, + { CH_CENTER_PAIR, AV_CH_FRONT_CENTER }, + { CH_WIDE_PAIR, CH_FRONT_PAIR }, + { CH_WIDE_PAIR, CH_CENTER_PAIR }, + { CH_WIDE_PAIR, AV_CH_FRONT_CENTER }, + { AV_CH_FRONT_CENTER, CH_FRONT_PAIR }, + { AV_CH_FRONT_CENTER, CH_CENTER_PAIR }, + { AV_CH_FRONT_CENTER, CH_WIDE_PAIR }, + { CH_SIDE_PAIR, CH_DIRECT_PAIR }, + { CH_SIDE_PAIR, CH_BACK_PAIR }, + { CH_SIDE_PAIR, AV_CH_BACK_CENTER }, + { CH_BACK_PAIR, CH_DIRECT_PAIR }, + { CH_BACK_PAIR, CH_SIDE_PAIR }, + { CH_BACK_PAIR, AV_CH_BACK_CENTER }, + { AV_CH_BACK_CENTER, CH_BACK_PAIR }, + { AV_CH_BACK_CENTER, CH_DIRECT_PAIR }, + { AV_CH_BACK_CENTER, CH_SIDE_PAIR }, +}; + +static void swap_channel_layouts_on_filter(AVFilterContext *filter) +{ + AVFilterLink *link = NULL; + int i, j, k; + + for (i = 0; i < filter->nb_inputs; i++) { + link = filter->inputs[i]; + + if (link->type == AVMEDIA_TYPE_AUDIO && + link->out_channel_layouts->nb_channel_layouts == 1) + break; + } + if (i == filter->nb_inputs) + return; + + for (i = 0; i < filter->nb_outputs; i++) { + AVFilterLink *outlink = filter->outputs[i]; + int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX; + + if (outlink->type != AVMEDIA_TYPE_AUDIO || + outlink->in_channel_layouts->nb_channel_layouts < 2) + continue; + + for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) { + uint64_t in_chlayout = link->out_channel_layouts->channel_layouts[0]; + uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j]; + int in_channels = av_get_channel_layout_nb_channels(in_chlayout); + int out_channels = av_get_channel_layout_nb_channels(out_chlayout); + int count_diff = out_channels - in_channels; + int matched_channels, extra_channels; + int score = 100000; + + if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) { + /* Compute score in case the input or output layout encodes + a channel count; in this case the score is not altered by + the computation afterwards, as in_chlayout and + out_chlayout have both been set to 0 */ + if (FF_LAYOUT2COUNT(in_chlayout)) + in_channels = FF_LAYOUT2COUNT(in_chlayout); + if (FF_LAYOUT2COUNT(out_chlayout)) + out_channels = FF_LAYOUT2COUNT(out_chlayout); + score -= 10000 + FFABS(out_channels - in_channels) + + (in_channels > out_channels ? 10000 : 0); + in_chlayout = out_chlayout = 0; + /* Let the remaining computation run, even if the score + value is not altered */ + } + + /* channel substitution */ + for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) { + uint64_t cmp0 = ch_subst[k][0]; + uint64_t cmp1 = ch_subst[k][1]; + if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) && + (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) { + in_chlayout &= ~cmp0; + out_chlayout &= ~cmp1; + /* add score for channel match, minus a deduction for + having to do the substitution */ + score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2; + } + } + + /* no penalty for LFE channel mismatch */ + if ( (in_chlayout & AV_CH_LOW_FREQUENCY) && + (out_chlayout & AV_CH_LOW_FREQUENCY)) + score += 10; + in_chlayout &= ~AV_CH_LOW_FREQUENCY; + out_chlayout &= ~AV_CH_LOW_FREQUENCY; + + matched_channels = av_get_channel_layout_nb_channels(in_chlayout & + out_chlayout); + extra_channels = av_get_channel_layout_nb_channels(out_chlayout & + (~in_chlayout)); + score += 10 * matched_channels - 5 * extra_channels; + + if (score > best_score || + (count_diff < best_count_diff && score == best_score)) { + best_score = score; + best_idx = j; + best_count_diff = count_diff; + } + } + av_assert0(best_idx >= 0); + FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0], + outlink->in_channel_layouts->channel_layouts[best_idx]); + } + +} + +static void swap_channel_layouts(AVFilterGraph *graph) +{ + int i; + + for (i = 0; i < graph->nb_filters; i++) + swap_channel_layouts_on_filter(graph->filters[i]); +} + +static void swap_sample_fmts_on_filter(AVFilterContext *filter) +{ + AVFilterLink *link = NULL; + int format, bps; + int i, j; + + for (i = 0; i < filter->nb_inputs; i++) { + link = filter->inputs[i]; + + if (link->type == AVMEDIA_TYPE_AUDIO && + link->out_formats->format_count == 1) + break; + } + if (i == filter->nb_inputs) + return; + + format = link->out_formats->formats[0]; + bps = av_get_bytes_per_sample(format); + + for (i = 0; i < filter->nb_outputs; i++) { + AVFilterLink *outlink = filter->outputs[i]; + int best_idx = -1, best_score = INT_MIN; + + if (outlink->type != AVMEDIA_TYPE_AUDIO || + outlink->in_formats->format_count < 2) + continue; + + for (j = 0; j < outlink->in_formats->format_count; j++) { + int out_format = outlink->in_formats->formats[j]; + int out_bps = av_get_bytes_per_sample(out_format); + int score; + + if (av_get_packed_sample_fmt(out_format) == format || + av_get_planar_sample_fmt(out_format) == format) { + best_idx = j; + break; + } + + /* for s32 and float prefer double to prevent loss of information */ + if (bps == 4 && out_bps == 8) { + best_idx = j; + break; + } + + /* prefer closest higher or equal bps */ + score = -abs(out_bps - bps); + if (out_bps >= bps) + score += INT_MAX/2; + + if (score > best_score) { + best_score = score; + best_idx = j; + } + } + av_assert0(best_idx >= 0); + FFSWAP(int, outlink->in_formats->formats[0], + outlink->in_formats->formats[best_idx]); + } +} + +static void swap_sample_fmts(AVFilterGraph *graph) +{ + int i; + + for (i = 0; i < graph->nb_filters; i++) + swap_sample_fmts_on_filter(graph->filters[i]); + +} + +static int pick_formats(AVFilterGraph *graph) +{ + int i, j, ret; + int change; + + do{ + change = 0; + for (i = 0; i < graph->nb_filters; i++) { + AVFilterContext *filter = graph->filters[i]; + if (filter->nb_inputs){ + for (j = 0; j < filter->nb_inputs; j++){ + if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) { + if ((ret = pick_format(filter->inputs[j], NULL)) < 0) + return ret; + change = 1; + } + } + } + if (filter->nb_outputs){ + for (j = 0; j < filter->nb_outputs; j++){ + if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) { + if ((ret = pick_format(filter->outputs[j], NULL)) < 0) + return ret; + change = 1; + } + } + } + if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) { + for (j = 0; j < filter->nb_outputs; j++) { + if(filter->outputs[j]->format<0) { + if ((ret = pick_format(filter->outputs[j], filter->inputs[0])) < 0) + return ret; + change = 1; + } + } + } + } + }while(change); + + for (i = 0; i < graph->nb_filters; i++) { + AVFilterContext *filter = graph->filters[i]; + + for (j = 0; j < filter->nb_inputs; j++) + if ((ret = pick_format(filter->inputs[j], NULL)) < 0) + return ret; + for (j = 0; j < filter->nb_outputs; j++) + if ((ret = pick_format(filter->outputs[j], NULL)) < 0) + return ret; + } + return 0; +} + +/** + * Configure the formats of all the links in the graph. + */ +static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx) +{ + int ret; + + /* find supported formats from sub-filters, and merge along links */ + if ((ret = query_formats(graph, log_ctx)) < 0) + return ret; + + /* Once everything is merged, it's possible that we'll still have + * multiple valid media format choices. We try to minimize the amount + * of format conversion inside filters */ + reduce_formats(graph); + + /* for audio filters, ensure the best format, sample rate and channel layout + * is selected */ + swap_sample_fmts(graph); + swap_samplerates(graph); + swap_channel_layouts(graph); + + if ((ret = pick_formats(graph)) < 0) + return ret; + + return 0; +} + +static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph, + AVClass *log_ctx) +{ + unsigned i, j; + int sink_links_count = 0, n = 0; + AVFilterContext *f; + AVFilterLink **sinks; + + for (i = 0; i < graph->nb_filters; i++) { + f = graph->filters[i]; + for (j = 0; j < f->nb_inputs; j++) { + f->inputs[j]->graph = graph; + f->inputs[j]->age_index = -1; + } + for (j = 0; j < f->nb_outputs; j++) { + f->outputs[j]->graph = graph; + f->outputs[j]->age_index= -1; + } + if (!f->nb_outputs) { + if (f->nb_inputs > INT_MAX - sink_links_count) + return AVERROR(EINVAL); + sink_links_count += f->nb_inputs; + } + } + sinks = av_calloc(sink_links_count, sizeof(*sinks)); + if (!sinks) + return AVERROR(ENOMEM); + for (i = 0; i < graph->nb_filters; i++) { + f = graph->filters[i]; + if (!f->nb_outputs) { + for (j = 0; j < f->nb_inputs; j++) { + sinks[n] = f->inputs[j]; + f->inputs[j]->age_index = n++; + } + } + } + av_assert0(n == sink_links_count); + graph->sink_links = sinks; + graph->sink_links_count = sink_links_count; + return 0; +} + +static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx) +{ + AVFilterContext *f; + int i, j, ret; + int fifo_count = 0; + + for (i = 0; i < graph->nb_filters; i++) { + f = graph->filters[i]; + + for (j = 0; j < f->nb_inputs; j++) { + AVFilterLink *link = f->inputs[j]; + AVFilterContext *fifo_ctx; + AVFilter *fifo; + char name[32]; + + if (!link->dstpad->needs_fifo) + continue; + + fifo = f->inputs[j]->type == AVMEDIA_TYPE_VIDEO ? + avfilter_get_by_name("fifo") : + avfilter_get_by_name("afifo"); + + snprintf(name, sizeof(name), "auto-inserted fifo %d", fifo_count++); + + ret = avfilter_graph_create_filter(&fifo_ctx, fifo, name, NULL, + NULL, graph); + if (ret < 0) + return ret; + + ret = avfilter_insert_filter(link, fifo_ctx, 0, 0); + if (ret < 0) + return ret; + } + } + + return 0; +} + +int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx) +{ + int ret; + + if ((ret = graph_check_validity(graphctx, log_ctx))) + return ret; + if ((ret = graph_insert_fifos(graphctx, log_ctx)) < 0) + return ret; + if ((ret = graph_config_formats(graphctx, log_ctx))) + return ret; + if ((ret = graph_config_links(graphctx, log_ctx))) + return ret; + if ((ret = ff_avfilter_graph_config_pointers(graphctx, log_ctx))) + return ret; + + return 0; +} + +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags) +{ + int i, r = AVERROR(ENOSYS); + + if(!graph) + return r; + + if((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) { + r=avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST); + if(r != AVERROR(ENOSYS)) + return r; + } + + if(res_len && res) + res[0]= 0; + + for (i = 0; i < graph->nb_filters; i++) { + AVFilterContext *filter = graph->filters[i]; + if(!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)){ + r = avfilter_process_command(filter, cmd, arg, res, res_len, flags); + if(r != AVERROR(ENOSYS)) { + if((flags & AVFILTER_CMD_FLAG_ONE) || r<0) + return r; + } + } + } + + return r; +} + +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *command, const char *arg, int flags, double ts) +{ + int i; + + if(!graph) + return 0; + + for (i = 0; i < graph->nb_filters; i++) { + AVFilterContext *filter = graph->filters[i]; + if(filter && (!strcmp(target, "all") || !strcmp(target, filter->name) || !strcmp(target, filter->filter->name))){ + AVFilterCommand **queue = &filter->command_queue, *next; + while (*queue && (*queue)->time <= ts) + queue = &(*queue)->next; + next = *queue; + *queue = av_mallocz(sizeof(AVFilterCommand)); + (*queue)->command = av_strdup(command); + (*queue)->arg = av_strdup(arg); + (*queue)->time = ts; + (*queue)->flags = flags; + (*queue)->next = next; + if(flags & AVFILTER_CMD_FLAG_ONE) + return 0; + } + } + + return 0; +} + +static void heap_bubble_up(AVFilterGraph *graph, + AVFilterLink *link, int index) +{ + AVFilterLink **links = graph->sink_links; + + while (index) { + int parent = (index - 1) >> 1; + if (links[parent]->current_pts >= link->current_pts) + break; + links[index] = links[parent]; + links[index]->age_index = index; + index = parent; + } + links[index] = link; + link->age_index = index; +} + +static void heap_bubble_down(AVFilterGraph *graph, + AVFilterLink *link, int index) +{ + AVFilterLink **links = graph->sink_links; + + while (1) { + int child = 2 * index + 1; + if (child >= graph->sink_links_count) + break; + if (child + 1 < graph->sink_links_count && + links[child + 1]->current_pts < links[child]->current_pts) + child++; + if (link->current_pts < links[child]->current_pts) + break; + links[index] = links[child]; + links[index]->age_index = index; + index = child; + } + links[index] = link; + link->age_index = index; +} + +void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link) +{ + heap_bubble_up (graph, link, link->age_index); + heap_bubble_down(graph, link, link->age_index); +} + + +int avfilter_graph_request_oldest(AVFilterGraph *graph) +{ + while (graph->sink_links_count) { + AVFilterLink *oldest = graph->sink_links[0]; + int r = ff_request_frame(oldest); + if (r != AVERROR_EOF) + return r; + av_log(oldest->dst, AV_LOG_DEBUG, "EOF on sink link %s:%s.\n", + oldest->dst ? oldest->dst->name : "unknown", + oldest->dstpad ? oldest->dstpad->name : "unknown"); + /* EOF: remove the link from the heap */ + if (oldest->age_index < --graph->sink_links_count) + heap_bubble_down(graph, graph->sink_links[graph->sink_links_count], + oldest->age_index); + oldest->age_index = -1; + } + return AVERROR_EOF; +} diff --git a/ffmpeg1/libavfilter/avfiltergraph.h b/ffmpeg1/libavfilter/avfiltergraph.h new file mode 100644 index 0000000..61110f9 --- /dev/null +++ b/ffmpeg1/libavfilter/avfiltergraph.h @@ -0,0 +1,280 @@ +/* + * Filter graphs + * copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTERGRAPH_H +#define AVFILTER_AVFILTERGRAPH_H + +#include "avfilter.h" +#include "libavutil/log.h" + +typedef struct AVFilterGraph { + const AVClass *av_class; +#if FF_API_FOO_COUNT + attribute_deprecated + unsigned filter_count_unused; +#endif + AVFilterContext **filters; +#if !FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters + char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#if FF_API_FOO_COUNT + unsigned nb_filters; +#endif + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + */ +AVFilterGraph *avfilter_graph_alloc(void); + +/** + * Get a filter instance with name name from graph. + * + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name); + +/** + * Add an existing filter instance to a filter graph. + * + * @param graphctx the filter graph + * @param filter the filter to be added + */ +int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter); + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aconvert filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return 0 in case of success, a negative AVERROR code otherwise + */ +int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void avfilter_inout_free(AVFilterInOut **inout); + +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note the difference between avfilter_graph_parse2() and + * avfilter_graph_parse() is that in avfilter_graph_parse(), the caller provides + * the lists of inputs and outputs, which therefore must be known before calling + * the function. On the other hand, avfilter_graph_parse2() \em returns the + * inputs and outputs that are left unlinked after parsing the graph and the + * caller then deals with them. Another difference is that in + * avfilter_graph_parse(), the inputs parameter describes inputs of the + * already existing part of the graph; i.e. from the point of view of + * the newly created part, they are outputs. Similarly the outputs parameter + * describes outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * avfilter_graph_parse2() takes the opposite approach -- it makes no reference + * whatsoever to already existing parts of the graph and the inputs parameter + * will on return contain inputs of the newly parsed part of the graph. + * Analogously the outputs parameter will contain outputs of the newly created + * filters. + */ +int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using av_free + */ +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int avfilter_graph_request_oldest(AVFilterGraph *graph); + +#endif /* AVFILTER_AVFILTERGRAPH_H */ diff --git a/ffmpeg1/libavfilter/bbox.c b/ffmpeg1/libavfilter/bbox.c new file mode 100644 index 0000000..be9b2e6 --- /dev/null +++ b/ffmpeg1/libavfilter/bbox.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2005 Robert Edele + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "bbox.h" + +int ff_calculate_bounding_box(FFBoundingBox *bbox, + const uint8_t *data, int linesize, int w, int h, + int min_val) +{ + int x, y; + int start_x; + int start_y; + int end_x; + int end_y; + const uint8_t *line; + + /* left bound */ + for (start_x = 0; start_x < w; start_x++) + for (y = 0; y < h; y++) + if ((data[y * linesize + start_x] > min_val)) + goto outl; +outl: + if (start_x == w) /* no points found */ + return 0; + + /* right bound */ + for (end_x = w - 1; end_x >= start_x; end_x--) + for (y = 0; y < h; y++) + if ((data[y * linesize + end_x] > min_val)) + goto outr; +outr: + + /* top bound */ + line = data; + for (start_y = 0; start_y < h; start_y++) { + for (x = 0; x < w; x++) + if (line[x] > min_val) + goto outt; + line += linesize; + } +outt: + + /* bottom bound */ + line = data + (h-1)*linesize; + for (end_y = h - 1; end_y >= start_y; end_y--) { + for (x = 0; x < w; x++) + if (line[x] > min_val) + goto outb; + line -= linesize; + } +outb: + + bbox->x1 = start_x; + bbox->y1 = start_y; + bbox->x2 = end_x; + bbox->y2 = end_y; + return 1; +} diff --git a/ffmpeg1/libavfilter/bbox.h b/ffmpeg1/libavfilter/bbox.h new file mode 100644 index 0000000..eb73154 --- /dev/null +++ b/ffmpeg1/libavfilter/bbox.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2005 Robert Edele + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BBOX_H +#define AVFILTER_BBOX_H + +#include + +typedef struct { + int x1, x2, y1, y2; +} FFBoundingBox; + +/** + * Calculate the smallest rectangle that will encompass the + * region with values > min_val. + * + * @param bbox bounding box structure which is updated with the found values. + * If no pixels could be found with value > min_val, the + * structure is not modified. + * @return 1 in case at least one pixel with value > min_val was found, + * 0 otherwise + */ +int ff_calculate_bounding_box(FFBoundingBox *bbox, + const uint8_t *data, int linesize, + int w, int h, int min_val); + +#endif /* AVFILTER_BBOX_H */ diff --git a/ffmpeg1/libavfilter/buffer.c b/ffmpeg1/libavfilter/buffer.c new file mode 100644 index 0000000..29fedc4 --- /dev/null +++ b/ffmpeg1/libavfilter/buffer.c @@ -0,0 +1,167 @@ +/* + * Copyright Stefano Sabatini + * Copyright Anton Khirnov + * Copyright Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/avassert.h" +#include "libavutil/common.h" +#include "libavutil/imgutils.h" +#include "libavcodec/avcodec.h" + +#include "avfilter.h" +#include "internal.h" +#include "audio.h" +#include "avcodec.h" + +void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr) +{ + if (ptr->extended_data != ptr->data) + av_freep(&ptr->extended_data); + av_free(ptr->data[0]); + av_free(ptr); +} + +static void copy_video_props(AVFilterBufferRefVideoProps *dst, AVFilterBufferRefVideoProps *src) { + *dst = *src; + if (src->qp_table) { + int qsize = src->qp_table_size; + dst->qp_table = av_malloc(qsize); + memcpy(dst->qp_table, src->qp_table, qsize); + } +} + +AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask) +{ + AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef)); + if (!ret) + return NULL; + *ret = *ref; + + ret->metadata = NULL; + av_dict_copy(&ret->metadata, ref->metadata, 0); + + if (ref->type == AVMEDIA_TYPE_VIDEO) { + ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps)); + if (!ret->video) { + av_free(ret); + return NULL; + } + copy_video_props(ret->video, ref->video); + ret->extended_data = ret->data; + } else if (ref->type == AVMEDIA_TYPE_AUDIO) { + ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps)); + if (!ret->audio) { + av_free(ret); + return NULL; + } + *ret->audio = *ref->audio; + + if (ref->extended_data && ref->extended_data != ref->data) { + int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout); + if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) * + nb_channels))) { + av_freep(&ret->audio); + av_freep(&ret); + return NULL; + } + memcpy(ret->extended_data, ref->extended_data, + sizeof(*ret->extended_data) * nb_channels); + } else + ret->extended_data = ret->data; + } + ret->perms &= pmask; + ret->buf->refcount ++; + return ret; +} + +void avfilter_unref_buffer(AVFilterBufferRef *ref) +{ + if (!ref) + return; + av_assert0(ref->buf->refcount > 0); + if (!(--ref->buf->refcount)) + ref->buf->free(ref->buf); + if (ref->extended_data != ref->data) + av_freep(&ref->extended_data); + if (ref->video) + av_freep(&ref->video->qp_table); + av_freep(&ref->video); + av_freep(&ref->audio); + av_dict_free(&ref->metadata); + av_free(ref); +} + +void avfilter_unref_bufferp(AVFilterBufferRef **ref) +{ + avfilter_unref_buffer(*ref); + *ref = NULL; +} + +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) +{ + dst->pts = src->pts; + dst->pos = av_frame_get_pkt_pos(src); + dst->format = src->format; + + av_dict_free(&dst->metadata); + av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); + + switch (dst->type) { + case AVMEDIA_TYPE_VIDEO: + dst->video->w = src->width; + dst->video->h = src->height; + dst->video->sample_aspect_ratio = src->sample_aspect_ratio; + dst->video->interlaced = src->interlaced_frame; + dst->video->top_field_first = src->top_field_first; + dst->video->key_frame = src->key_frame; + dst->video->pict_type = src->pict_type; + break; + case AVMEDIA_TYPE_AUDIO: + dst->audio->sample_rate = src->sample_rate; + dst->audio->channel_layout = src->channel_layout; + break; + default: + return AVERROR(EINVAL); + } + + return 0; +} + +void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src) +{ + // copy common properties + dst->pts = src->pts; + dst->pos = src->pos; + + switch (src->type) { + case AVMEDIA_TYPE_VIDEO: { + if (dst->video->qp_table) + av_freep(&dst->video->qp_table); + copy_video_props(dst->video, src->video); + break; + } + case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break; + default: break; + } + + av_dict_free(&dst->metadata); + av_dict_copy(&dst->metadata, src->metadata, 0); +} diff --git a/ffmpeg1/libavfilter/bufferqueue.h b/ffmpeg1/libavfilter/bufferqueue.h new file mode 100644 index 0000000..adbc0fd --- /dev/null +++ b/ffmpeg1/libavfilter/bufferqueue.h @@ -0,0 +1,121 @@ +/* + * Generic buffer queue + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERQUEUE_H +#define AVFILTER_BUFFERQUEUE_H + +/** + * FFBufQueue: simple AVFrame queue API + * + * Note: this API is not thread-safe. Concurrent access to the same queue + * must be protected by a mutex or any synchronization mechanism. + */ + +/** + * Maximum size of the queue. + * + * This value can be overridden by definying it before including this + * header. + * Powers of 2 are recommended. + */ +#ifndef FF_BUFQUEUE_SIZE +#define FF_BUFQUEUE_SIZE 32 +#endif + +#include "avfilter.h" +#include "libavutil/avassert.h" + +/** + * Structure holding the queue + */ +struct FFBufQueue { + AVFrame *queue[FF_BUFQUEUE_SIZE]; + unsigned short head; + unsigned short available; /**< number of available buffers */ +}; + +#define BUCKET(i) queue->queue[(queue->head + (i)) % FF_BUFQUEUE_SIZE] + +/** + * Test if a buffer queue is full. + */ +static inline int ff_bufqueue_is_full(struct FFBufQueue *queue) +{ + return queue->available == FF_BUFQUEUE_SIZE; +} + +/** + * Add a buffer to the queue. + * + * If the queue is already full, then the current last buffer is dropped + * (and unrefed) with a warning before adding the new buffer. + */ +static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, + AVFrame *buf) +{ + if (ff_bufqueue_is_full(queue)) { + av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n"); + av_frame_free(&BUCKET(--queue->available)); + } + BUCKET(queue->available++) = buf; +} + +/** + * Get a buffer from the queue without altering it. + * + * Buffer with index 0 is the first buffer in the queue. + * Return NULL if the queue has not enough buffers. + */ +static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue, + unsigned index) +{ + return index < queue->available ? BUCKET(index) : NULL; +} + +/** + * Get the first buffer from the queue and remove it. + * + * Do not use on an empty queue. + */ +static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue) +{ + AVFrame *ret = queue->queue[queue->head]; + av_assert0(queue->available); + queue->available--; + queue->queue[queue->head] = NULL; + queue->head = (queue->head + 1) % FF_BUFQUEUE_SIZE; + return ret; +} + +/** + * Unref and remove all buffers from the queue. + */ +static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue) +{ + while (queue->available) { + AVFrame *buf = ff_bufqueue_get(queue); + av_frame_free(&buf); + } +} + +#undef BUCKET + +#endif /* AVFILTER_BUFFERQUEUE_H */ diff --git a/ffmpeg1/libavfilter/buffersink.c b/ffmpeg1/libavfilter/buffersink.c new file mode 100644 index 0000000..bcb6525 --- /dev/null +++ b/ffmpeg1/libavfilter/buffersink.c @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * buffer sink + */ + +#include "libavutil/audio_fifo.h" +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/mathematics.h" + +#include "audio.h" +#include "avfilter.h" +#include "buffersink.h" +#include "internal.h" + +typedef struct { + AVFifoBuffer *fifo; ///< FIFO buffer of video frame references + unsigned warning_limit; + + /* only used for video */ + enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1 + + /* only used for audio */ + enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE + int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1 + int all_channel_counts; + int *sample_rates; ///< list of accepted sample rates, terminated by -1 + + /* only used for compat API */ + AVAudioFifo *audio_fifo; ///< FIFO for audio samples + int64_t next_pts; ///< interpolating audio pts +} BufferSinkContext; + +static av_cold void uninit(AVFilterContext *ctx) +{ + BufferSinkContext *sink = ctx->priv; + AVFrame *frame; + + if (sink->audio_fifo) + av_audio_fifo_free(sink->audio_fifo); + + if (sink->fifo) { + while (av_fifo_size(sink->fifo) >= sizeof(AVFilterBufferRef *)) { + av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL); + av_frame_unref(frame); + } + av_fifo_free(sink->fifo); + sink->fifo = NULL; + } + av_freep(&sink->pixel_fmts); + av_freep(&sink->sample_fmts); + av_freep(&sink->sample_rates); + av_freep(&sink->channel_layouts); +} + +static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref) +{ + BufferSinkContext *buf = ctx->priv; + + if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) { + /* realloc fifo size */ + if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) { + av_log(ctx, AV_LOG_ERROR, + "Cannot buffer more frames. Consume some available frames " + "before adding new ones.\n"); + return AVERROR(ENOMEM); + } + } + + /* cache frame */ + av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL); + return 0; +} + +static int filter_frame(AVFilterLink *link, AVFrame *frame) +{ + AVFilterContext *ctx = link->dst; + BufferSinkContext *buf = link->dst->priv; + int ret; + + if ((ret = add_buffer_ref(ctx, frame)) < 0) + return ret; + if (buf->warning_limit && + av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) { + av_log(ctx, AV_LOG_WARNING, + "%d buffers queued in %s, something may be wrong.\n", + buf->warning_limit, + (char *)av_x_if_null(ctx->name, ctx->filter->name)); + buf->warning_limit *= 10; + } + return 0; +} + +int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) +{ + return av_buffersink_get_frame_flags(ctx, frame, 0); +} + +int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + int ret; + AVFrame *cur_frame; + + /* no picref available, fetch it from the filterchain */ + if (!av_fifo_size(buf->fifo)) { + if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST) + return AVERROR(EAGAIN); + if ((ret = ff_request_frame(inlink)) < 0) + return ret; + } + + if (!av_fifo_size(buf->fifo)) + return AVERROR(EINVAL); + + if (flags & AV_BUFFERSINK_FLAG_PEEK) { + cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0)); + if ((ret = av_frame_ref(frame, cur_frame)) < 0) + return ret; + } else { + av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL); + av_frame_move_ref(frame, cur_frame); + av_frame_free(&cur_frame); + } + + return 0; +} + +static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame, + int nb_samples) +{ + BufferSinkContext *s = ctx->priv; + AVFilterLink *link = ctx->inputs[0]; + AVFrame *tmp; + + if (!(tmp = ff_get_audio_buffer(link, nb_samples))) + return AVERROR(ENOMEM); + av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples); + + tmp->pts = s->next_pts; + s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate}, + link->time_base); + + av_frame_move_ref(frame, tmp); + av_frame_free(&tmp); + + return 0; + +} + +int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples) +{ + BufferSinkContext *s = ctx->priv; + AVFilterLink *link = ctx->inputs[0]; + AVFrame *cur_frame; + int ret = 0; + + if (!s->audio_fifo) { + int nb_channels = link->channels; + if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples))) + return AVERROR(ENOMEM); + } + + while (ret >= 0) { + if (av_audio_fifo_size(s->audio_fifo) >= nb_samples) + return read_from_fifo(ctx, frame, nb_samples); + + if (!(cur_frame = av_frame_alloc())) + return AVERROR(ENOMEM); + ret = av_buffersink_get_frame_flags(ctx, cur_frame, 0); + if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) { + av_frame_free(&cur_frame); + return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo)); + } else if (ret < 0) { + av_frame_free(&cur_frame); + return ret; + } + + if (cur_frame->pts != AV_NOPTS_VALUE) { + s->next_pts = cur_frame->pts - + av_rescale_q(av_audio_fifo_size(s->audio_fifo), + (AVRational){ 1, link->sample_rate }, + link->time_base); + } + + ret = av_audio_fifo_write(s->audio_fifo, (void**)cur_frame->extended_data, + cur_frame->nb_samples); + av_frame_free(&cur_frame); + } + + return ret; + +} + +AVBufferSinkParams *av_buffersink_params_alloc(void) +{ + static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; + AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams)); + if (!params) + return NULL; + + params->pixel_fmts = pixel_fmts; + return params; +} + +AVABufferSinkParams *av_abuffersink_params_alloc(void) +{ + AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams)); + + if (!params) + return NULL; + return params; +} + +#define FIFO_INIT_SIZE 8 + +static av_cold int common_init(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + + buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *)); + if (!buf->fifo) { + av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n"); + return AVERROR(ENOMEM); + } + buf->warning_limit = 100; + return 0; +} + +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size) +{ + AVFilterLink *inlink = ctx->inputs[0]; + + inlink->min_samples = inlink->max_samples = + inlink->partial_buf_size = frame_size; +} + +#if FF_API_AVFILTERBUFFER +static void compat_free_buffer(AVFilterBuffer *buf) +{ + AVFrame *frame = buf->priv; + av_frame_free(&frame); + av_free(buf); +} + +static int attribute_align_arg compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags) +{ + AVFilterBufferRef *buf; + AVFrame *frame; + int ret; + + if (!pbuf) + return ff_poll_frame(ctx->inputs[0]); + + frame = av_frame_alloc(); + if (!frame) + return AVERROR(ENOMEM); + + if (!nb_samples) + ret = av_buffersink_get_frame_flags(ctx, frame, flags); + else + ret = av_buffersink_get_samples(ctx, frame, nb_samples); + + if (ret < 0) + goto fail; + + if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) { + buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, + AV_PERM_READ, + frame->width, frame->height, + frame->format); + } else { + buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data, + frame->linesize[0], AV_PERM_READ, + frame->nb_samples, + frame->format, + frame->channel_layout); + } + if (!buf) { + ret = AVERROR(ENOMEM); + goto fail; + } + + avfilter_copy_frame_props(buf, frame); + + buf->buf->priv = frame; + buf->buf->free = compat_free_buffer; + + *pbuf = buf; + + return 0; +fail: + av_frame_free(&frame); + return ret; +} + +int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) +{ + return compat_read(ctx, buf, 0, 0); +} + +int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, + int nb_samples) +{ + return compat_read(ctx, buf, nb_samples, 0); +} + +int av_buffersink_get_buffer_ref(AVFilterContext *ctx, + AVFilterBufferRef **bufref, int flags) +{ + *bufref = NULL; + + av_assert0( !strcmp(ctx->filter->name, "buffersink") + || !strcmp(ctx->filter->name, "abuffersink") + || !strcmp(ctx->filter->name, "ffbuffersink") + || !strcmp(ctx->filter->name, "ffabuffersink")); + + return compat_read(ctx, bufref, 0, flags); +} +#endif + +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx) +{ + av_assert0( !strcmp(ctx->filter->name, "buffersink") + || !strcmp(ctx->filter->name, "ffbuffersink")); + + return ctx->inputs[0]->frame_rate; +} + +int attribute_align_arg av_buffersink_poll_frame(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + av_assert0( !strcmp(ctx->filter->name, "buffersink") + || !strcmp(ctx->filter->name, "abuffersink") + || !strcmp(ctx->filter->name, "ffbuffersink") + || !strcmp(ctx->filter->name, "ffabuffersink")); + + return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink); +} + +static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BufferSinkContext *buf = ctx->priv; + AVBufferSinkParams *params = opaque; + + if (params && params->pixel_fmts) { + const int *pixel_fmts = params->pixel_fmts; + + buf->pixel_fmts = ff_copy_int_list(pixel_fmts); + if (!buf->pixel_fmts) + return AVERROR(ENOMEM); + } + + return common_init(ctx); +} + +static int vsink_query_formats(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + + if (buf->pixel_fmts) + ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts)); + else + ff_default_query_formats(ctx); + + return 0; +} + +static int64_t *concat_channels_lists(const int64_t *layouts, const int *counts) +{ + int nb_layouts = 0, nb_counts = 0, i; + int64_t *list; + + if (layouts) + for (; layouts[nb_layouts] != -1; nb_layouts++); + if (counts) + for (; counts[nb_counts] != -1; nb_counts++); + if (nb_counts > INT_MAX - 1 - nb_layouts) + return NULL; + if (!(list = av_calloc(nb_layouts + nb_counts + 1, sizeof(*list)))) + return NULL; + for (i = 0; i < nb_layouts; i++) + list[i] = layouts[i]; + for (i = 0; i < nb_counts; i++) + list[nb_layouts + i] = FF_COUNT2LAYOUT(counts[i]); + list[nb_layouts + nb_counts] = -1; + return list; +} + +static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BufferSinkContext *buf = ctx->priv; + AVABufferSinkParams *params = opaque; + + if (params && params->sample_fmts) { + buf->sample_fmts = ff_copy_int_list(params->sample_fmts); + if (!buf->sample_fmts) + return AVERROR(ENOMEM); + } + if (params && params->sample_rates) { + buf->sample_rates = ff_copy_int_list(params->sample_rates); + if (!buf->sample_rates) + return AVERROR(ENOMEM); + } + if (params && (params->channel_layouts || params->channel_counts)) { + if (params->all_channel_counts) { + av_log(ctx, AV_LOG_ERROR, + "Conflicting all_channel_counts and list in parameters\n"); + return AVERROR(EINVAL); + } + buf->channel_layouts = concat_channels_lists(params->channel_layouts, + params->channel_counts); + if (!buf->channel_layouts) + return AVERROR(ENOMEM); + } + if (params) + buf->all_channel_counts = params->all_channel_counts; + return common_init(ctx); +} + +static int asink_query_formats(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + + if (buf->sample_fmts) { + if (!(formats = ff_make_format_list(buf->sample_fmts))) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + } + + if (buf->channel_layouts || buf->all_channel_counts) { + layouts = buf->all_channel_counts ? ff_all_channel_counts() : + avfilter_make_format64_list(buf->channel_layouts); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + } + + if (buf->sample_rates) { + formats = ff_make_format_list(buf->sample_rates); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + } + + return 0; +} + +#if FF_API_AVFILTERBUFFER +static const AVFilterPad ffbuffersink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + }, + { NULL }, +}; + +AVFilter avfilter_vsink_ffbuffersink = { + .name = "ffbuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), + .priv_size = sizeof(BufferSinkContext), + .init_opaque = vsink_init, + .uninit = uninit, + + .query_formats = vsink_query_formats, + .inputs = ffbuffersink_inputs, + .outputs = NULL, +}; + +static const AVFilterPad ffabuffersink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL }, +}; + +AVFilter avfilter_asink_ffabuffersink = { + .name = "ffabuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), + .init_opaque = asink_init, + .uninit = uninit, + .priv_size = sizeof(BufferSinkContext), + .query_formats = asink_query_formats, + .inputs = ffabuffersink_inputs, + .outputs = NULL, +}; +#endif /* FF_API_AVFILTERBUFFER */ + +static const AVFilterPad avfilter_vsink_buffer_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +AVFilter avfilter_vsink_buffer = { + .name = "buffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), + .priv_size = sizeof(BufferSinkContext), + .init_opaque = vsink_init, + .uninit = uninit, + + .query_formats = vsink_query_formats, + .inputs = avfilter_vsink_buffer_inputs, + .outputs = NULL, +}; + +static const AVFilterPad avfilter_asink_abuffer_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +AVFilter avfilter_asink_abuffer = { + .name = "abuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), + .priv_size = sizeof(BufferSinkContext), + .init_opaque = asink_init, + .uninit = uninit, + + .query_formats = asink_query_formats, + .inputs = avfilter_asink_abuffer_inputs, + .outputs = NULL, +}; diff --git a/ffmpeg1/libavfilter/buffersink.h b/ffmpeg1/libavfilter/buffersink.h new file mode 100644 index 0000000..ce96d08 --- /dev/null +++ b/ffmpeg1/libavfilter/buffersink.h @@ -0,0 +1,186 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Get an audio/video buffer data from buffer_sink and put it in bufref. + * + * This function works with both audio and video buffer sinks. + * + * @param buffer_sink pointer to a buffersink or abuffersink context + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +attribute_deprecated +int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, + AVFilterBufferRef **bufref, int flags); + +/** + * Get the number of immediately available frames. + */ +attribute_deprecated +int av_buffersink_poll_frame(AVFilterContext *ctx); + +/** + * Get a buffer with filtered data from sink and put it in buf. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +attribute_deprecated +int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); + +/** + * Same as av_buffersink_read, but with the ability to specify the number of + * samples read. This function is less efficient than av_buffersink_read(), + * because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). buf + * will contain exactly nb_samples audio samples, except at the end + * of stream, when it can contain less than nb_samples. + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @warning do not mix this function with av_buffersink_read(). Use only one or + * the other with a single sink, not both. + */ +attribute_deprecated +int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, + int nb_samples); +#endif + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVBufferSinkParams *av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVABufferSinkParams *av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * Get the frame rate of the input. + */ +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @warning do not mix this function with av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/ffmpeg1/libavfilter/buffersrc.c b/ffmpeg1/libavfilter/buffersrc.c new file mode 100644 index 0000000..cc650ff --- /dev/null +++ b/ffmpeg1/libavfilter/buffersrc.c @@ -0,0 +1,581 @@ +/* + * Copyright (c) 2008 Vitor Sessak + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * memory buffer source filter + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/common.h" +#include "libavutil/fifo.h" +#include "libavutil/frame.h" +#include "libavutil/imgutils.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "audio.h" +#include "avfilter.h" +#include "buffersrc.h" +#include "formats.h" +#include "internal.h" +#include "video.h" +#include "avcodec.h" + +typedef struct { + const AVClass *class; + AVFifoBuffer *fifo; + AVRational time_base; ///< time_base to set in the output link + AVRational frame_rate; ///< frame_rate to set in the output link + unsigned nb_failed_requests; + unsigned warning_limit; + + /* video only */ + int w, h; + enum AVPixelFormat pix_fmt; + AVRational pixel_aspect; + char *sws_param; + + /* audio only */ + int sample_rate; + enum AVSampleFormat sample_fmt; + char *sample_fmt_str; + int channels; + uint64_t channel_layout; + char *channel_layout_str; + + int eof; +} BufferSourceContext; + +#define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\ + if (c->w != width || c->h != height || c->pix_fmt != format) {\ + av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\ + } + +#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, ch_count, format)\ + if (c->sample_fmt != format || c->sample_rate != srate ||\ + c->channel_layout != ch_layout || c->channels != ch_count) {\ + av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\ + return AVERROR(EINVAL);\ + } + +int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame) +{ + return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame, + AV_BUFFERSRC_FLAG_KEEP_REF); +} + +int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame) +{ + return av_buffersrc_add_frame_flags(ctx, frame, 0); +} + +static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, + AVFrame *frame, int flags); + +int av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) +{ + AVFrame *copy = NULL; + int ret = 0; + + if (frame && frame->channel_layout && + av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) { + av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); + return AVERROR(EINVAL); + } + + if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame) + return av_buffersrc_add_frame_internal(ctx, frame, flags); + + if (!(copy = av_frame_alloc())) + return AVERROR(ENOMEM); + ret = av_frame_ref(copy, frame); + if (ret >= 0) + ret = av_buffersrc_add_frame_internal(ctx, copy, flags); + + av_frame_free(©); + return ret; +} + +static int attribute_align_arg av_buffersrc_add_frame_internal(AVFilterContext *ctx, + AVFrame *frame, int flags) +{ + BufferSourceContext *s = ctx->priv; + AVFrame *copy; + int ret; + + if (!frame) { + s->eof = 1; + return 0; + } else if (s->eof) + return AVERROR(EINVAL); + + if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { + + switch (ctx->outputs[0]->type) { + case AVMEDIA_TYPE_VIDEO: + CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height, + frame->format); + break; + case AVMEDIA_TYPE_AUDIO: + /* For layouts unknown on input but known on link after negotiation. */ + if (!frame->channel_layout) + frame->channel_layout = s->channel_layout; + CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout, + av_frame_get_channels(frame), frame->format); + break; + default: + return AVERROR(EINVAL); + } + + } + + if (!av_fifo_space(s->fifo) && + (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + + sizeof(copy))) < 0) + return ret; + + if (!(copy = av_frame_alloc())) + return AVERROR(ENOMEM); + av_frame_move_ref(copy, frame); + + if ((ret = av_fifo_generic_write(s->fifo, ©, sizeof(copy), NULL)) < 0) { + av_frame_move_ref(frame, copy); + av_frame_free(©); + return ret; + } + + if ((flags & AV_BUFFERSRC_FLAG_PUSH)) + if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0) + return ret; + + return 0; +} + +#if FF_API_AVFILTERBUFFER +static void compat_free_buffer(void *opaque, uint8_t *data) +{ + AVFilterBufferRef *buf = opaque; + AV_NOWARN_DEPRECATED( + avfilter_unref_buffer(buf); + ) +} + +static void compat_unref_buffer(void *opaque, uint8_t *data) +{ + AVBufferRef *buf = opaque; + AV_NOWARN_DEPRECATED( + av_buffer_unref(&buf); + ) +} + +int av_buffersrc_add_ref(AVFilterContext *ctx, AVFilterBufferRef *buf, + int flags) +{ + BufferSourceContext *s = ctx->priv; + AVFrame *frame = NULL; + AVBufferRef *dummy_buf = NULL; + int ret = 0, planes, i; + + if (!buf) { + s->eof = 1; + return 0; + } else if (s->eof) + return AVERROR(EINVAL); + + frame = av_frame_alloc(); + if (!frame) + return AVERROR(ENOMEM); + + dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, + (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); + if (!dummy_buf) { + ret = AVERROR(ENOMEM); + goto fail; + } + + AV_NOWARN_DEPRECATED( + if ((ret = avfilter_copy_buf_props(frame, buf)) < 0) + goto fail; + ) + +#define WRAP_PLANE(ref_out, data, data_size) \ +do { \ + AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ + if (!dummy_ref) { \ + ret = AVERROR(ENOMEM); \ + goto fail; \ + } \ + ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \ + dummy_ref, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); \ + if (!ref_out) { \ + av_frame_unref(frame); \ + ret = AVERROR(ENOMEM); \ + goto fail; \ + } \ +} while (0) + + if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) { + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); + + planes = av_pix_fmt_count_planes(frame->format); + if (!desc || planes <= 0) { + ret = AVERROR(EINVAL); + goto fail; + } + + for (i = 0; i < planes; i++) { + int v_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; + int plane_size = (frame->height >> v_shift) * frame->linesize[i]; + + WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); + } + } else { + int planar = av_sample_fmt_is_planar(frame->format); + int channels = av_get_channel_layout_nb_channels(frame->channel_layout); + + planes = planar ? channels : 1; + + if (planes > FF_ARRAY_ELEMS(frame->buf)) { + frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); + frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * + frame->nb_extended_buf); + if (!frame->extended_buf) { + ret = AVERROR(ENOMEM); + goto fail; + } + } + + for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) + WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); + + for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++) + WRAP_PLANE(frame->extended_buf[i], + frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], + frame->linesize[0]); + } + + ret = av_buffersrc_add_frame_flags(ctx, frame, flags); + +fail: + av_buffer_unref(&dummy_buf); + av_frame_free(&frame); + + return ret; +} + +int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf) +{ + return av_buffersrc_add_ref(ctx, buf, 0); +} +#endif + +#define OFFSET(x) offsetof(BufferSourceContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +static const AVOption buffer_options[] = { + { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = FLAGS }, + { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = FLAGS }, + { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { NULL }, +}; +#undef FLAGS + +AVFILTER_DEFINE_CLASS(buffer); + +static av_cold int init_video(AVFilterContext *ctx, const char *args) +{ + BufferSourceContext *c = ctx->priv; + char pix_fmt_str[128], *colon, *equal; + int ret, n = 0; + + c->class = &buffer_class; + + if (!args) { + av_log(ctx, AV_LOG_ERROR, "Arguments required\n"); + return AVERROR(EINVAL); + } + + colon = strchr(args, ':'); + equal = strchr(args, '='); + if (equal && (!colon || equal < colon)) { + av_opt_set_defaults(c); + ret = av_set_options_string(c, args, "=", ":"); + if (ret < 0) + goto fail; + } else { + if (!args || + (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str, + &c->time_base.num, &c->time_base.den, + &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) { + av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args); + return AVERROR(EINVAL); + } + if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == AV_PIX_FMT_NONE) { + char *tail; + c->pix_fmt = strtol(pix_fmt_str, &tail, 10); + if (*tail || c->pix_fmt < 0 || c->pix_fmt >= AV_PIX_FMT_NB) { + av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); + return AVERROR(EINVAL); + } + } + } + + if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*)))) + return AVERROR(ENOMEM); + + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", + c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), + c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den, + c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, "")); + c->warning_limit = 100; + return 0; + +fail: + av_opt_free(c); + return ret; +} + +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src) +{ + return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests; +} + +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM +static const AVOption abuffer_options[] = { + { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, + { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, + { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(abuffer); + +static av_cold int init_audio(AVFilterContext *ctx, const char *args) +{ + BufferSourceContext *s = ctx->priv; + int ret = 0; + + s->class = &abuffer_class; + av_opt_set_defaults(s); + + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) + goto fail; + + s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str); + if (s->sample_fmt == AV_SAMPLE_FMT_NONE) { + av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", + s->sample_fmt_str); + ret = AVERROR(EINVAL); + goto fail; + } + + if (s->channel_layout_str) { + int n; + /* TODO reindent */ + s->channel_layout = av_get_channel_layout(s->channel_layout_str); + if (!s->channel_layout) { + av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", + s->channel_layout_str); + ret = AVERROR(EINVAL); + goto fail; + } + n = av_get_channel_layout_nb_channels(s->channel_layout); + if (s->channels) { + if (n != s->channels) { + av_log(ctx, AV_LOG_ERROR, + "Mismatching channel count %d and layout '%s' " + "(%d channels)\n", + s->channels, s->channel_layout_str, n); + ret = AVERROR(EINVAL); + goto fail; + } + } + s->channels = n; + } else if (!s->channels) { + av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor " + "channel layout specified\n"); + ret = AVERROR(EINVAL); + goto fail; + } + + if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*)))) { + ret = AVERROR(ENOMEM); + goto fail; + } + + if (!s->time_base.num) + s->time_base = (AVRational){1, s->sample_rate}; + + av_log(ctx, AV_LOG_VERBOSE, + "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n", + s->time_base.num, s->time_base.den, s->sample_fmt_str, + s->sample_rate, s->channel_layout_str); + s->warning_limit = 100; + +fail: + av_opt_free(s); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + BufferSourceContext *s = ctx->priv; + while (s->fifo && av_fifo_size(s->fifo)) { + AVFrame *frame; + av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL); + av_frame_free(&frame); + } + av_fifo_free(s->fifo); + s->fifo = NULL; + av_freep(&s->sws_param); +} + +static int query_formats(AVFilterContext *ctx) +{ + BufferSourceContext *c = ctx->priv; + AVFilterChannelLayouts *channel_layouts = NULL; + AVFilterFormats *formats = NULL; + AVFilterFormats *samplerates = NULL; + + switch (ctx->outputs[0]->type) { + case AVMEDIA_TYPE_VIDEO: + ff_add_format(&formats, c->pix_fmt); + ff_set_common_formats(ctx, formats); + break; + case AVMEDIA_TYPE_AUDIO: + ff_add_format(&formats, c->sample_fmt); + ff_set_common_formats(ctx, formats); + + ff_add_format(&samplerates, c->sample_rate); + ff_set_common_samplerates(ctx, samplerates); + + ff_add_channel_layout(&channel_layouts, + c->channel_layout ? c->channel_layout : + FF_COUNT2LAYOUT(c->channels)); + ff_set_common_channel_layouts(ctx, channel_layouts); + break; + default: + return AVERROR(EINVAL); + } + + return 0; +} + +static int config_props(AVFilterLink *link) +{ + BufferSourceContext *c = link->src->priv; + + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + link->w = c->w; + link->h = c->h; + link->sample_aspect_ratio = c->pixel_aspect; + break; + case AVMEDIA_TYPE_AUDIO: + if (!c->channel_layout) + c->channel_layout = link->channel_layout; + break; + default: + return AVERROR(EINVAL); + } + + link->time_base = c->time_base; + link->frame_rate = c->frame_rate; + return 0; +} + +static int request_frame(AVFilterLink *link) +{ + BufferSourceContext *c = link->src->priv; + AVFrame *frame; + + if (!av_fifo_size(c->fifo)) { + if (c->eof) + return AVERROR_EOF; + c->nb_failed_requests++; + return AVERROR(EAGAIN); + } + av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL); + + return ff_filter_frame(link, frame); +} + +static int poll_frame(AVFilterLink *link) +{ + BufferSourceContext *c = link->src->priv; + int size = av_fifo_size(c->fifo); + if (!size && c->eof) + return AVERROR_EOF; + return size/sizeof(AVFrame*); +} + +static const AVFilterPad avfilter_vsrc_buffer_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .poll_frame = poll_frame, + .config_props = config_props, + }, + { NULL } +}; + +AVFilter avfilter_vsrc_buffer = { + .name = "buffer", + .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."), + .priv_size = sizeof(BufferSourceContext), + .query_formats = query_formats, + + .init = init_video, + .uninit = uninit, + + .inputs = NULL, + .outputs = avfilter_vsrc_buffer_outputs, + .priv_class = &buffer_class, +}; + +static const AVFilterPad avfilter_asrc_abuffer_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .request_frame = request_frame, + .poll_frame = poll_frame, + .config_props = config_props, + }, + { NULL } +}; + +AVFilter avfilter_asrc_abuffer = { + .name = "abuffer", + .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."), + .priv_size = sizeof(BufferSourceContext), + .query_formats = query_formats, + + .init = init_audio, + .uninit = uninit, + + .inputs = NULL, + .outputs = avfilter_asrc_abuffer_outputs, + .priv_class = &abuffer_class, +}; diff --git a/ffmpeg1/libavfilter/buffersrc.h b/ffmpeg1/libavfilter/buffersrc.h new file mode 100644 index 0000000..66361b3 --- /dev/null +++ b/ffmpeg1/libavfilter/buffersrc.h @@ -0,0 +1,148 @@ +/* + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * Memory buffer source API. + */ + +#include "libavcodec/avcodec.h" +#include "avfilter.h" + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + +#if FF_API_AVFILTERBUFFER + /** + * Ignored + */ + AV_BUFFERSRC_FLAG_NO_COPY = 2, +#endif + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Add buffer data in picref to buffer_src. + * + * @param buffer_src pointer to a buffer source context + * @param picref a buffer reference, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_ref(AVFilterContext *buffer_src, + AVFilterBufferRef *picref, int flags); + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +#if FF_API_AVFILTERBUFFER +/** + * Add a buffer to the filtergraph s. + * + * @param buf buffer containing frame data to be passed down the filtergraph. + * This function will take ownership of buf, the user must not free it. + * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. + * + * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() + */ +attribute_deprecated +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); +#endif + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and av_buffersrc_write_frame() is + * that av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/ffmpeg1/libavfilter/drawutils.c b/ffmpeg1/libavfilter/drawutils.c new file mode 100644 index 0000000..aebc000 --- /dev/null +++ b/ffmpeg1/libavfilter/drawutils.c @@ -0,0 +1,552 @@ +/* + * Copyright 2011 Stefano Sabatini + * Copyright 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avutil.h" +#include "libavutil/colorspace.h" +#include "libavutil/mem.h" +#include "libavutil/pixdesc.h" +#include "drawutils.h" +#include "formats.h" + +enum { RED = 0, GREEN, BLUE, ALPHA }; + +int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt) +{ + switch (pix_fmt) { + case AV_PIX_FMT_0RGB: + case AV_PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break; + case AV_PIX_FMT_0BGR: + case AV_PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break; + case AV_PIX_FMT_RGB0: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break; + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_BGR0: + case AV_PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break; + default: /* unsupported */ + return AVERROR(EINVAL); + } + return 0; +} + +int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4], + enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], + int *is_packed_rgba, uint8_t rgba_map_ptr[4]) +{ + uint8_t rgba_map[4] = {0}; + int i; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt); + int hsub = pix_desc->log2_chroma_w; + + *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0; + + if (*is_packed_rgba) { + pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3; + for (i = 0; i < 4; i++) + dst_color[rgba_map[i]] = rgba_color[i]; + + line[0] = av_malloc(w * pixel_step[0]); + for (i = 0; i < w; i++) + memcpy(line[0] + i * pixel_step[0], dst_color, pixel_step[0]); + if (rgba_map_ptr) + memcpy(rgba_map_ptr, rgba_map, sizeof(rgba_map[0]) * 4); + } else { + int plane; + + dst_color[0] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]); + dst_color[1] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); + dst_color[2] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); + dst_color[3] = rgba_color[3]; + + for (plane = 0; plane < 4; plane++) { + int line_size; + int hsub1 = (plane == 1 || plane == 2) ? hsub : 0; + + pixel_step[plane] = 1; + line_size = (w >> hsub1) * pixel_step[plane]; + line[plane] = av_malloc(line_size); + memset(line[plane], dst_color[plane], line_size); + } + } + + return 0; +} + +void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4], + uint8_t *src[4], int pixelstep[4], + int hsub, int vsub, int x, int y, int w, int h) +{ + int i, plane; + uint8_t *p; + + for (plane = 0; plane < 4 && dst[plane]; plane++) { + int hsub1 = plane == 1 || plane == 2 ? hsub : 0; + int vsub1 = plane == 1 || plane == 2 ? vsub : 0; + + p = dst[plane] + (y >> vsub1) * dst_linesize[plane]; + for (i = 0; i < (h >> vsub1); i++) { + memcpy(p + (x >> hsub1) * pixelstep[plane], + src[plane], (w >> hsub1) * pixelstep[plane]); + p += dst_linesize[plane]; + } + } +} + +void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4], + uint8_t *src[4], int src_linesize[4], int pixelstep[4], + int hsub, int vsub, int x, int y, int y2, int w, int h) +{ + int i, plane; + uint8_t *p; + + for (plane = 0; plane < 4 && dst[plane]; plane++) { + int hsub1 = plane == 1 || plane == 2 ? hsub : 0; + int vsub1 = plane == 1 || plane == 2 ? vsub : 0; + + p = dst[plane] + (y >> vsub1) * dst_linesize[plane]; + for (i = 0; i < (h >> vsub1); i++) { + memcpy(p + (x >> hsub1) * pixelstep[plane], + src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), (w >> hsub1) * pixelstep[plane]); + p += dst_linesize[plane]; + } + } +} + +int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags) +{ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format); + const AVComponentDescriptor *c; + unsigned i, nb_planes = 0; + int pixelstep[MAX_PLANES] = { 0 }; + + if (!desc->name) + return AVERROR(EINVAL); + if (desc->flags & ~(PIX_FMT_PLANAR | PIX_FMT_RGB | PIX_FMT_PSEUDOPAL | PIX_FMT_ALPHA)) + return AVERROR(ENOSYS); + for (i = 0; i < desc->nb_components; i++) { + c = &desc->comp[i]; + /* for now, only 8-bits formats */ + if (c->depth_minus1 != 8 - 1) + return AVERROR(ENOSYS); + if (c->plane >= MAX_PLANES) + return AVERROR(ENOSYS); + /* strange interleaving */ + if (pixelstep[c->plane] != 0 && + pixelstep[c->plane] != c->step_minus1 + 1) + return AVERROR(ENOSYS); + pixelstep[c->plane] = c->step_minus1 + 1; + if (pixelstep[c->plane] >= 8) + return AVERROR(ENOSYS); + nb_planes = FFMAX(nb_planes, c->plane + 1); + } + if ((desc->log2_chroma_w || desc->log2_chroma_h) && nb_planes < 3) + return AVERROR(ENOSYS); /* exclude NV12 and NV21 */ + memset(draw, 0, sizeof(*draw)); + draw->desc = desc; + draw->format = format; + draw->nb_planes = nb_planes; + memcpy(draw->pixelstep, pixelstep, sizeof(draw->pixelstep)); + if (nb_planes >= 3 && !(desc->flags & PIX_FMT_RGB)) { + draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w; + draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h; + } + for (i = 0; i < ((desc->nb_components - 1) | 1); i++) + draw->comp_mask[desc->comp[i].plane] |= + 1 << (desc->comp[i].offset_plus1 - 1); + return 0; +} + +void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]) +{ + unsigned i; + uint8_t rgba_map[4]; + + if (rgba != color->rgba) + memcpy(color->rgba, rgba, sizeof(color->rgba)); + if ((draw->desc->flags & PIX_FMT_RGB) && draw->nb_planes == 1 && + ff_fill_rgba_map(rgba_map, draw->format) >= 0) { + for (i = 0; i < 4; i++) + color->comp[0].u8[rgba_map[i]] = rgba[i]; + } else if (draw->nb_planes == 3 || draw->nb_planes == 4) { + /* assume YUV */ + color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); + color->comp[1].u8[0] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0); + color->comp[2].u8[0] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0); + color->comp[3].u8[0] = rgba[3]; + } else if (draw->format == AV_PIX_FMT_GRAY8 || draw->format == AV_PIX_FMT_GRAY8A) { + color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); + color->comp[1].u8[0] = rgba[3]; + } else { + av_log(NULL, AV_LOG_WARNING, + "Color conversion not implemented for %s\n", draw->desc->name); + memset(color, 128, sizeof(*color)); + } +} + +static uint8_t *pointer_at(FFDrawContext *draw, uint8_t *data[], int linesize[], + int plane, int x, int y) +{ + return data[plane] + + (y >> draw->vsub[plane]) * linesize[plane] + + (x >> draw->hsub[plane]) * draw->pixelstep[plane]; +} + +void ff_copy_rectangle2(FFDrawContext *draw, + uint8_t *dst[], int dst_linesize[], + uint8_t *src[], int src_linesize[], + int dst_x, int dst_y, int src_x, int src_y, + int w, int h) +{ + int plane, y, wp, hp; + uint8_t *p, *q; + + for (plane = 0; plane < draw->nb_planes; plane++) { + p = pointer_at(draw, src, src_linesize, plane, src_x, src_y); + q = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y); + wp = (w >> draw->hsub[plane]) * draw->pixelstep[plane]; + hp = (h >> draw->vsub[plane]); + for (y = 0; y < hp; y++) { + memcpy(q, p, wp); + p += src_linesize[plane]; + q += dst_linesize[plane]; + } + } +} + +void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_x, int dst_y, int w, int h) +{ + int plane, x, y, wp, hp; + uint8_t *p0, *p; + + for (plane = 0; plane < draw->nb_planes; plane++) { + p0 = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y); + wp = (w >> draw->hsub[plane]); + hp = (h >> draw->vsub[plane]); + if (!hp) + return; + p = p0; + /* copy first line from color */ + for (x = 0; x < wp; x++) { + memcpy(p, color->comp[plane].u8, draw->pixelstep[plane]); + p += draw->pixelstep[plane]; + } + wp *= draw->pixelstep[plane]; + /* copy next lines from first line */ + p = p0 + dst_linesize[plane]; + for (y = 1; y < hp; y++) { + memcpy(p, p0, wp); + p += dst_linesize[plane]; + } + } +} + +/** + * Clip interval [x; x+w[ within [0; wmax[. + * The resulting w may be negative if the final interval is empty. + * dx, if not null, return the difference between in and out value of x. + */ +static void clip_interval(int wmax, int *x, int *w, int *dx) +{ + if (dx) + *dx = 0; + if (*x < 0) { + if (dx) + *dx = -*x; + *w += *x; + *x = 0; + } + if (*x + *w > wmax) + *w = wmax - *x; +} + +/** + * Decompose w pixels starting at x + * into start + (w starting at x) + end + * with x and w aligned on multiples of 1<>= sub; +} + +static int component_used(FFDrawContext *draw, int plane, int comp) +{ + return (draw->comp_mask[plane] >> comp) & 1; +} + +/* If alpha is in the [ 0 ; 0x1010101 ] range, + then alpha * value is in the [ 0 ; 0xFFFFFFFF ] range, + and >> 24 gives a correct rounding. */ +static void blend_line(uint8_t *dst, unsigned src, unsigned alpha, + int dx, int w, unsigned hsub, int left, int right) +{ + unsigned asrc = alpha * src; + unsigned tau = 0x1010101 - alpha; + int x; + + if (left) { + unsigned suba = (left * alpha) >> hsub; + *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24; + dst += dx; + } + for (x = 0; x < w; x++) { + *dst = (*dst * tau + asrc) >> 24; + dst += dx; + } + if (right) { + unsigned suba = (right * alpha) >> hsub; + *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24; + } +} + +void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_w, int dst_h, + int x0, int y0, int w, int h) +{ + unsigned alpha, nb_planes, nb_comp, plane, comp; + int w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y; + uint8_t *p0, *p; + + /* TODO optimize if alpha = 0xFF */ + clip_interval(dst_w, &x0, &w, NULL); + clip_interval(dst_h, &y0, &h, NULL); + if (w <= 0 || h <= 0 || !color->rgba[3]) + return; + /* 0x10203 * alpha + 2 is in the [ 2 ; 0x1010101 - 2 ] range */ + alpha = 0x10203 * color->rgba[3] + 0x2; + nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */ + for (plane = 0; plane < nb_planes; plane++) { + nb_comp = draw->pixelstep[plane]; + p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0); + w_sub = w; + h_sub = h; + x_sub = x0; + y_sub = y0; + subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right); + subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom); + for (comp = 0; comp < nb_comp; comp++) { + if (!component_used(draw, plane, comp)) + continue; + p = p0 + comp; + if (top) { + blend_line(p, color->comp[plane].u8[comp], alpha >> 1, + draw->pixelstep[plane], w_sub, + draw->hsub[plane], left, right); + p += dst_linesize[plane]; + } + for (y = 0; y < h_sub; y++) { + blend_line(p, color->comp[plane].u8[comp], alpha, + draw->pixelstep[plane], w_sub, + draw->hsub[plane], left, right); + p += dst_linesize[plane]; + } + if (bottom) + blend_line(p, color->comp[plane].u8[comp], alpha >> 1, + draw->pixelstep[plane], w_sub, + draw->hsub[plane], left, right); + } + } +} + +static void blend_pixel(uint8_t *dst, unsigned src, unsigned alpha, + uint8_t *mask, int mask_linesize, int l2depth, + unsigned w, unsigned h, unsigned shift, unsigned xm0) +{ + unsigned xm, x, y, t = 0; + unsigned xmshf = 3 - l2depth; + unsigned xmmod = 7 >> l2depth; + unsigned mbits = (1 << (1 << l2depth)) - 1; + unsigned mmult = 255 / mbits; + + for (y = 0; y < h; y++) { + xm = xm0; + for (x = 0; x < w; x++) { + t += ((mask[xm >> xmshf] >> ((~xm & xmmod) << l2depth)) & mbits) + * mmult; + xm++; + } + mask += mask_linesize; + } + alpha = (t >> shift) * alpha; + *dst = ((0x1010101 - alpha) * *dst + alpha * src) >> 24; +} + +static void blend_line_hv(uint8_t *dst, int dst_delta, + unsigned src, unsigned alpha, + uint8_t *mask, int mask_linesize, int l2depth, int w, + unsigned hsub, unsigned vsub, + int xm, int left, int right, int hband) +{ + int x; + + if (left) { + blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth, + left, hband, hsub + vsub, xm); + dst += dst_delta; + xm += left; + } + for (x = 0; x < w; x++) { + blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth, + 1 << hsub, hband, hsub + vsub, xm); + dst += dst_delta; + xm += 1 << hsub; + } + if (right) + blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth, + right, hband, hsub + vsub, xm); +} + +void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h, + uint8_t *mask, int mask_linesize, int mask_w, int mask_h, + int l2depth, unsigned endianness, int x0, int y0) +{ + unsigned alpha, nb_planes, nb_comp, plane, comp; + int xm0, ym0, w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y; + uint8_t *p0, *p, *m; + + clip_interval(dst_w, &x0, &mask_w, &xm0); + clip_interval(dst_h, &y0, &mask_h, &ym0); + mask += ym0 * mask_linesize; + if (mask_w <= 0 || mask_h <= 0 || !color->rgba[3]) + return; + /* alpha is in the [ 0 ; 0x10203 ] range, + alpha * mask is in the [ 0 ; 0x1010101 - 4 ] range */ + alpha = (0x10307 * color->rgba[3] + 0x3) >> 8; + nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */ + for (plane = 0; plane < nb_planes; plane++) { + nb_comp = draw->pixelstep[plane]; + p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0); + w_sub = mask_w; + h_sub = mask_h; + x_sub = x0; + y_sub = y0; + subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right); + subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom); + for (comp = 0; comp < nb_comp; comp++) { + if (!component_used(draw, plane, comp)) + continue; + p = p0 + comp; + m = mask; + if (top) { + blend_line_hv(p, draw->pixelstep[plane], + color->comp[plane].u8[comp], alpha, + m, mask_linesize, l2depth, w_sub, + draw->hsub[plane], draw->vsub[plane], + xm0, left, right, top); + p += dst_linesize[plane]; + m += top * mask_linesize; + } + for (y = 0; y < h_sub; y++) { + blend_line_hv(p, draw->pixelstep[plane], + color->comp[plane].u8[comp], alpha, + m, mask_linesize, l2depth, w_sub, + draw->hsub[plane], draw->vsub[plane], + xm0, left, right, 1 << draw->vsub[plane]); + p += dst_linesize[plane]; + m += mask_linesize << draw->vsub[plane]; + } + if (bottom) + blend_line_hv(p, draw->pixelstep[plane], + color->comp[plane].u8[comp], alpha, + m, mask_linesize, l2depth, w_sub, + draw->hsub[plane], draw->vsub[plane], + xm0, left, right, bottom); + } + } +} + +int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir, + int value) +{ + unsigned shift = sub_dir ? draw->vsub_max : draw->hsub_max; + + if (!shift) + return value; + if (round_dir >= 0) + value += round_dir ? (1 << shift) - 1 : 1 << (shift - 1); + return (value >> shift) << shift; +} + +AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags) +{ + enum AVPixelFormat i, pix_fmts[AV_PIX_FMT_NB + 1]; + unsigned n = 0; + FFDrawContext draw; + + for (i = 0; i < AV_PIX_FMT_NB; i++) + if (ff_draw_init(&draw, i, flags) >= 0) + pix_fmts[n++] = i; + pix_fmts[n++] = AV_PIX_FMT_NONE; + return ff_make_format_list(pix_fmts); +} + +#ifdef TEST + +#undef printf + +int main(void) +{ + enum AVPixelFormat f; + const AVPixFmtDescriptor *desc; + FFDrawContext draw; + FFDrawColor color; + int r, i; + + for (f = 0; f < AV_PIX_FMT_NB; f++) { + desc = av_pix_fmt_desc_get(f); + if (!desc->name) + continue; + printf("Testing %s...%*s", desc->name, + (int)(16 - strlen(desc->name)), ""); + r = ff_draw_init(&draw, f, 0); + if (r < 0) { + char buf[128]; + av_strerror(r, buf, sizeof(buf)); + printf("no: %s\n", buf); + continue; + } + ff_draw_color(&draw, &color, (uint8_t[]) { 1, 0, 0, 1 }); + for (i = 0; i < sizeof(color); i++) + if (((uint8_t *)&color)[i] != 128) + break; + if (i == sizeof(color)) { + printf("fallback color\n"); + continue; + } + printf("ok\n"); + } + return 0; +} + +#endif diff --git a/ffmpeg1/libavfilter/drawutils.h b/ffmpeg1/libavfilter/drawutils.h new file mode 100644 index 0000000..5ffffe7 --- /dev/null +++ b/ffmpeg1/libavfilter/drawutils.h @@ -0,0 +1,155 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_DRAWUTILS_H +#define AVFILTER_DRAWUTILS_H + +/** + * @file + * misc drawing utilities + */ + +#include +#include "avfilter.h" +#include "libavutil/pixfmt.h" + +int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt); + +int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, + uint8_t dst_color[4], + enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], + int *is_packed_rgba, uint8_t rgba_map[4]); + +void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4], + uint8_t *src[4], int pixelstep[4], + int hsub, int vsub, int x, int y, int w, int h); + +void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4], + uint8_t *src[4], int src_linesize[4], int pixelstep[4], + int hsub, int vsub, int x, int y, int y2, int w, int h); + +#define MAX_PLANES 4 + +typedef struct FFDrawContext { + const struct AVPixFmtDescriptor *desc; + enum AVPixelFormat format; + unsigned nb_planes; + int pixelstep[MAX_PLANES]; /*< offset between pixels */ + uint8_t comp_mask[MAX_PLANES]; /*< bitmask of used non-alpha components */ + uint8_t hsub[MAX_PLANES]; /*< horizontal subsampling */ + uint8_t vsub[MAX_PLANES]; /*< vertical subsampling */ + uint8_t hsub_max; + uint8_t vsub_max; +} FFDrawContext; + +typedef struct FFDrawColor { + uint8_t rgba[4]; + union { + uint32_t u32; + uint16_t u16; + uint8_t u8[4]; + } comp[MAX_PLANES]; +} FFDrawColor; + +/** + * Init a draw context. + * + * Only a limited number of pixel formats are supported, if format is not + * supported the function will return an error. + * No flags currently defined. + * @return 0 for success, < 0 for error + */ +int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags); + +/** + * Prepare a color. + */ +void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]); + +/** + * Copy a rectangle from an image to another. + * + * The coordinates must be as even as the subsampling requires. + */ +void ff_copy_rectangle2(FFDrawContext *draw, + uint8_t *dst[], int dst_linesize[], + uint8_t *src[], int src_linesize[], + int dst_x, int dst_y, int src_x, int src_y, + int w, int h); + +/** + * Fill a rectangle with an uniform color. + * + * The coordinates must be as even as the subsampling requires. + * The color needs to be inited with ff_draw_color. + */ +void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_x, int dst_y, int w, int h); + +/** + * Blend a rectangle with an uniform color. + */ +void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_w, int dst_h, + int x0, int y0, int w, int h); + +/** + * Blend an alpha mask with an uniform color. + * + * @param draw draw context + * @param color color for the overlay; + * @param dst destination image + * @param dst_linesize line stride of the destination + * @param dst_w width of the destination image + * @param dst_h height of the destination image + * @param mask mask + * @param mask_linesize line stride of the mask + * @param mask_w width of the mask + * @param mask_h height of the mask + * @param l2depth log2 of depth of the mask (0 for 1bpp, 3 for 8bpp) + * @param endianness bit order of the mask (0: MSB to the left) + * @param x0 horizontal position of the overlay + * @param y0 vertical position of the overlay + */ +void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h, + uint8_t *mask, int mask_linesize, int mask_w, int mask_h, + int l2depth, unsigned endianness, int x0, int y0); + +/** + * Round a dimension according to subsampling. + * + * @param draw draw context + * @param sub_dir 0 for horizontal, 1 for vertical + * @param round_dir 0 nearest, -1 round down, +1 round up + * @param value value to round + * @return the rounded value + */ +int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir, + int value); + +/** + * Return the list of pixel formats supported by the draw functions. + * + * The flags are the same as ff_draw_init, i.e., none currently. + */ +AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags); + +#endif /* AVFILTER_DRAWUTILS_H */ diff --git a/ffmpeg1/libavfilter/f_ebur128.c b/ffmpeg1/libavfilter/f_ebur128.c new file mode 100644 index 0000000..8aaea73 --- /dev/null +++ b/ffmpeg1/libavfilter/f_ebur128.c @@ -0,0 +1,822 @@ +/* + * Copyright (c) 2012 Clément BÅ“sch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * EBU R.128 implementation + * @see http://tech.ebu.ch/loudness + * @see https://www.youtube.com/watch?v=iuEtQqC-Sqo "EBU R128 Introduction - Florian Camerer" + * @todo True Peak + * @todo implement start/stop/reset through filter command injection + * @todo support other frequencies to avoid resampling + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/xga_font_data.h" +#include "libavutil/opt.h" +#include "libavutil/timestamp.h" +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +#define MAX_CHANNELS 63 + +/* pre-filter coefficients */ +#define PRE_B0 1.53512485958697 +#define PRE_B1 -2.69169618940638 +#define PRE_B2 1.19839281085285 +#define PRE_A1 -1.69065929318241 +#define PRE_A2 0.73248077421585 + +/* RLB-filter coefficients */ +#define RLB_B0 1.0 +#define RLB_B1 -2.0 +#define RLB_B2 1.0 +#define RLB_A1 -1.99004745483398 +#define RLB_A2 0.99007225036621 + +#define ABS_THRES -70 ///< silence gate: we discard anything below this absolute (LUFS) threshold +#define ABS_UP_THRES 10 ///< upper loud limit to consider (ABS_THRES being the minimum) +#define HIST_GRAIN 100 ///< defines histogram precision +#define HIST_SIZE ((ABS_UP_THRES - ABS_THRES) * HIST_GRAIN + 1) + +/** + * An histogram is an array of HIST_SIZE hist_entry storing all the energies + * recorded (with an accuracy of 1/HIST_GRAIN) of the loudnesses from ABS_THRES + * (at 0) to ABS_UP_THRES (at HIST_SIZE-1). + * This fixed-size system avoids the need of a list of energies growing + * infinitely over the time and is thus more scalable. + */ +struct hist_entry { + int count; ///< how many times the corresponding value occurred + double energy; ///< E = 10^((L + 0.691) / 10) + double loudness; ///< L = -0.691 + 10 * log10(E) +}; + +struct integrator { + double *cache[MAX_CHANNELS]; ///< window of filtered samples (N ms) + int cache_pos; ///< focus on the last added bin in the cache array + double sum[MAX_CHANNELS]; ///< sum of the last N ms filtered samples (cache content) + int filled; ///< 1 if the cache is completely filled, 0 otherwise + double rel_threshold; ///< relative threshold + double sum_kept_powers; ///< sum of the powers (weighted sums) above absolute threshold + int nb_kept_powers; ///< number of sum above absolute threshold + struct hist_entry *histogram; ///< histogram of the powers, used to compute LRA and I +}; + +struct rect { int x, y, w, h; }; + +typedef struct { + const AVClass *class; ///< AVClass context for log and options purpose + + /* video */ + int do_video; ///< 1 if video output enabled, 0 otherwise + int w, h; ///< size of the video output + struct rect text; ///< rectangle for the LU legend on the left + struct rect graph; ///< rectangle for the main graph in the center + struct rect gauge; ///< rectangle for the gauge on the right + AVFrame *outpicref; ///< output picture reference, updated regularly + int meter; ///< select a EBU mode between +9 and +18 + int scale_range; ///< the range of LU values according to the meter + int y_zero_lu; ///< the y value (pixel position) for 0 LU + int *y_line_ref; ///< y reference values for drawing the LU lines in the graph and the gauge + + /* audio */ + int nb_channels; ///< number of channels in the input + double *ch_weighting; ///< channel weighting mapping + int sample_count; ///< sample count used for refresh frequency, reset at refresh + + /* Filter caches. + * The mult by 3 in the following is for X[i], X[i-1] and X[i-2] */ + double x[MAX_CHANNELS * 3]; ///< 3 input samples cache for each channel + double y[MAX_CHANNELS * 3]; ///< 3 pre-filter samples cache for each channel + double z[MAX_CHANNELS * 3]; ///< 3 RLB-filter samples cache for each channel + +#define I400_BINS (48000 * 4 / 10) +#define I3000_BINS (48000 * 3) + struct integrator i400; ///< 400ms integrator, used for Momentary loudness (M), and Integrated loudness (I) + struct integrator i3000; ///< 3s integrator, used for Short term loudness (S), and Loudness Range (LRA) + + /* I and LRA specific */ + double integrated_loudness; ///< integrated loudness in LUFS (I) + double loudness_range; ///< loudness range in LU (LRA) + double lra_low, lra_high; ///< low and high LRA values + + /* misc */ + int loglevel; ///< log level for frame logging + int metadata; ///< whether or not to inject loudness results in frames + int request_fulfilled; ///< 1 if some audio just got pushed, 0 otherwise. FIXME: remove me +} EBUR128Context; + +#define OFFSET(x) offsetof(EBUR128Context, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define V AV_OPT_FLAG_VIDEO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption ebur128_options[] = { + { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, V|F }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x480"}, 0, 0, V|F }, + { "meter", "set scale meter (+9 to +18)", OFFSET(meter), AV_OPT_TYPE_INT, {.i64 = 9}, 9, 18, V|F }, + { "framelog", "force frame logging level", OFFSET(loglevel), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, A|V|F, "level" }, + { "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, A|V|F, "level" }, + { "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, INT_MIN, INT_MAX, A|V|F, "level" }, + { "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, A|V|F }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(ebur128); + +static const uint8_t graph_colors[] = { + 0xdd, 0x66, 0x66, // value above 0LU non reached + 0x66, 0x66, 0xdd, // value below 0LU non reached + 0x96, 0x33, 0x33, // value above 0LU reached + 0x33, 0x33, 0x96, // value below 0LU reached + 0xdd, 0x96, 0x96, // value above 0LU line non reached + 0x96, 0x96, 0xdd, // value below 0LU line non reached + 0xdd, 0x33, 0x33, // value above 0LU line reached + 0x33, 0x33, 0xdd, // value below 0LU line reached +}; + +static const uint8_t *get_graph_color(const EBUR128Context *ebur128, int v, int y) +{ + const int below0 = y > ebur128->y_zero_lu; + const int reached = y >= v; + const int line = ebur128->y_line_ref[y] || y == ebur128->y_zero_lu; + const int colorid = 4*line + 2*reached + below0; + return graph_colors + 3*colorid; +} + +static inline int lu_to_y(const EBUR128Context *ebur128, double v) +{ + v += 2 * ebur128->meter; // make it in range [0;...] + v = av_clipf(v, 0, ebur128->scale_range); // make sure it's in the graph scale + v = ebur128->scale_range - v; // invert value (y=0 is on top) + return v * ebur128->graph.h / ebur128->scale_range; // rescale from scale range to px height +} + +#define FONT8 0 +#define FONT16 1 + +static const uint8_t font_colors[] = { + 0xdd, 0xdd, 0x00, + 0x00, 0x96, 0x96, +}; + +static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...) +{ + int i; + char buf[128] = {0}; + const uint8_t *font; + int font_height; + va_list vl; + + if (ftid == FONT16) font = avpriv_vga16_font, font_height = 16; + else if (ftid == FONT8) font = avpriv_cga_font, font_height = 8; + else return; + + va_start(vl, fmt); + vsnprintf(buf, sizeof(buf), fmt, vl); + va_end(vl); + + for (i = 0; buf[i]; i++) { + int char_y, mask; + uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*3; + + for (char_y = 0; char_y < font_height; char_y++) { + for (mask = 0x80; mask; mask >>= 1) { + if (font[buf[i] * font_height + char_y] & mask) + memcpy(p, color, 3); + else + memcpy(p, "\x00\x00\x00", 3); + p += 3; + } + p += pic->linesize[0] - 8*3; + } + } +} + +static void drawline(AVFrame *pic, int x, int y, int len, int step) +{ + int i; + uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3; + + for (i = 0; i < len; i++) { + memcpy(p, "\x00\xff\x00", 3); + p += step; + } +} + +static int config_video_output(AVFilterLink *outlink) +{ + int i, x, y; + uint8_t *p; + AVFilterContext *ctx = outlink->src; + EBUR128Context *ebur128 = ctx->priv; + AVFrame *outpicref; + + /* check if there is enough space to represent everything decently */ + if (ebur128->w < 640 || ebur128->h < 480) { + av_log(ctx, AV_LOG_ERROR, "Video size %dx%d is too small, " + "minimum size is 640x480\n", ebur128->w, ebur128->h); + return AVERROR(EINVAL); + } + outlink->w = ebur128->w; + outlink->h = ebur128->h; + +#define PAD 8 + + /* configure text area position and size */ + ebur128->text.x = PAD; + ebur128->text.y = 40; + ebur128->text.w = 3 * 8; // 3 characters + ebur128->text.h = ebur128->h - PAD - ebur128->text.y; + + /* configure gauge position and size */ + ebur128->gauge.w = 20; + ebur128->gauge.h = ebur128->text.h; + ebur128->gauge.x = ebur128->w - PAD - ebur128->gauge.w; + ebur128->gauge.y = ebur128->text.y; + + /* configure graph position and size */ + ebur128->graph.x = ebur128->text.x + ebur128->text.w + PAD; + ebur128->graph.y = ebur128->gauge.y; + ebur128->graph.w = ebur128->gauge.x - ebur128->graph.x - PAD; + ebur128->graph.h = ebur128->gauge.h; + + /* graph and gauge share the LU-to-pixel code */ + av_assert0(ebur128->graph.h == ebur128->gauge.h); + + /* prepare the initial picref buffer */ + av_frame_free(&ebur128->outpicref); + ebur128->outpicref = outpicref = + ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!outpicref) + return AVERROR(ENOMEM); + outlink->sample_aspect_ratio = (AVRational){1,1}; + + /* init y references values (to draw LU lines) */ + ebur128->y_line_ref = av_calloc(ebur128->graph.h + 1, sizeof(*ebur128->y_line_ref)); + if (!ebur128->y_line_ref) + return AVERROR(ENOMEM); + + /* black background */ + memset(outpicref->data[0], 0, ebur128->h * outpicref->linesize[0]); + + /* draw LU legends */ + drawtext(outpicref, PAD, PAD+16, FONT8, font_colors+3, " LU"); + for (i = ebur128->meter; i >= -ebur128->meter * 2; i--) { + y = lu_to_y(ebur128, i); + x = PAD + (i < 10 && i > -10) * 8; + ebur128->y_line_ref[y] = i; + y -= 4; // -4 to center vertically + drawtext(outpicref, x, y + ebur128->graph.y, FONT8, font_colors+3, + "%c%d", i < 0 ? '-' : i > 0 ? '+' : ' ', FFABS(i)); + } + + /* draw graph */ + ebur128->y_zero_lu = lu_to_y(ebur128, 0); + p = outpicref->data[0] + ebur128->graph.y * outpicref->linesize[0] + + ebur128->graph.x * 3; + for (y = 0; y < ebur128->graph.h; y++) { + const uint8_t *c = get_graph_color(ebur128, INT_MAX, y); + + for (x = 0; x < ebur128->graph.w; x++) + memcpy(p + x*3, c, 3); + p += outpicref->linesize[0]; + } + + /* draw fancy rectangles around the graph and the gauge */ +#define DRAW_RECT(r) do { \ + drawline(outpicref, r.x, r.y - 1, r.w, 3); \ + drawline(outpicref, r.x, r.y + r.h, r.w, 3); \ + drawline(outpicref, r.x - 1, r.y, r.h, outpicref->linesize[0]); \ + drawline(outpicref, r.x + r.w, r.y, r.h, outpicref->linesize[0]); \ +} while (0) + DRAW_RECT(ebur128->graph); + DRAW_RECT(ebur128->gauge); + + return 0; +} + +static int config_audio_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + EBUR128Context *ebur128 = ctx->priv; + + /* force 100ms framing in case of metadata injection: the frames must have + * a granularity of the window overlap to be accurately exploited */ + if (ebur128->metadata) + inlink->min_samples = + inlink->max_samples = + inlink->partial_buf_size = inlink->sample_rate / 10; + return 0; +} + +static int config_audio_output(AVFilterLink *outlink) +{ + int i; + int idx_bitposn = 0; + AVFilterContext *ctx = outlink->src; + EBUR128Context *ebur128 = ctx->priv; + const int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + +#define BACK_MASK (AV_CH_BACK_LEFT |AV_CH_BACK_CENTER |AV_CH_BACK_RIGHT| \ + AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_BACK_RIGHT| \ + AV_CH_SIDE_LEFT |AV_CH_SIDE_RIGHT| \ + AV_CH_SURROUND_DIRECT_LEFT |AV_CH_SURROUND_DIRECT_RIGHT) + + ebur128->nb_channels = nb_channels; + ebur128->ch_weighting = av_calloc(nb_channels, sizeof(*ebur128->ch_weighting)); + if (!ebur128->ch_weighting) + return AVERROR(ENOMEM); + + for (i = 0; i < nb_channels; i++) { + + /* find the next bit that is set starting from the right */ + while ((outlink->channel_layout & 1ULL<ch_weighting[i] = 0; + } else if (1ULL<ch_weighting[i] = 1.41; + } else { + ebur128->ch_weighting[i] = 1.0; + } + + idx_bitposn++; + + if (!ebur128->ch_weighting[i]) + continue; + + /* bins buffer for the two integration window (400ms and 3s) */ + ebur128->i400.cache[i] = av_calloc(I400_BINS, sizeof(*ebur128->i400.cache[0])); + ebur128->i3000.cache[i] = av_calloc(I3000_BINS, sizeof(*ebur128->i3000.cache[0])); + if (!ebur128->i400.cache[i] || !ebur128->i3000.cache[i]) + return AVERROR(ENOMEM); + } + + return 0; +} + +#define ENERGY(loudness) (pow(10, ((loudness) + 0.691) / 10.)) +#define LOUDNESS(energy) (-0.691 + 10 * log10(energy)) + +static struct hist_entry *get_histogram(void) +{ + int i; + struct hist_entry *h = av_calloc(HIST_SIZE, sizeof(*h)); + + if (!h) + return NULL; + for (i = 0; i < HIST_SIZE; i++) { + h[i].loudness = i / (double)HIST_GRAIN + ABS_THRES; + h[i].energy = ENERGY(h[i].loudness); + } + return h; +} + +/* This is currently necessary for the min/max samples to work properly. + * FIXME: remove me when possible */ +static int audio_request_frame(AVFilterLink *outlink) +{ + int ret; + AVFilterContext *ctx = outlink->src; + EBUR128Context *ebur128 = ctx->priv; + + ebur128->request_fulfilled = 0; + do { + ret = ff_request_frame(ctx->inputs[0]); + } while (!ebur128->request_fulfilled && ret >= 0); + return ret; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + EBUR128Context *ebur128 = ctx->priv; + AVFilterPad pad; + + ebur128->class = &ebur128_class; + av_opt_set_defaults(ebur128); + + if ((ret = av_set_options_string(ebur128, args, "=", ":")) < 0) + return ret; + + if (ebur128->loglevel != AV_LOG_INFO && + ebur128->loglevel != AV_LOG_VERBOSE) { + if (ebur128->do_video || ebur128->metadata) + ebur128->loglevel = AV_LOG_VERBOSE; + else + ebur128->loglevel = AV_LOG_INFO; + } + + // if meter is +9 scale, scale range is from -18 LU to +9 LU (or 3*9) + // if meter is +18 scale, scale range is from -36 LU to +18 LU (or 3*18) + ebur128->scale_range = 3 * ebur128->meter; + + ebur128->i400.histogram = get_histogram(); + ebur128->i3000.histogram = get_histogram(); + if (!ebur128->i400.histogram || !ebur128->i3000.histogram) + return AVERROR(ENOMEM); + + ebur128->integrated_loudness = ABS_THRES; + ebur128->loudness_range = 0; + + /* insert output pads */ + if (ebur128->do_video) { + pad = (AVFilterPad){ + .name = av_strdup("out0"), + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_video_output, + }; + if (!pad.name) + return AVERROR(ENOMEM); + ff_insert_outpad(ctx, 0, &pad); + } + pad = (AVFilterPad){ + .name = av_asprintf("out%d", ebur128->do_video), + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_audio_output, + }; + if (ebur128->metadata) + pad.request_frame = audio_request_frame; + if (!pad.name) + return AVERROR(ENOMEM); + ff_insert_outpad(ctx, ebur128->do_video, &pad); + + /* summary */ + av_log(ctx, AV_LOG_VERBOSE, "EBU +%d scale\n", ebur128->meter); + + return 0; +} + +#define HIST_POS(power) (int)(((power) - ABS_THRES) * HIST_GRAIN) + +/* loudness and power should be set such as loudness = -0.691 + + * 10*log10(power), we just avoid doing that calculus two times */ +static int gate_update(struct integrator *integ, double power, + double loudness, int gate_thres) +{ + int ipower; + double relative_threshold; + int gate_hist_pos; + + /* update powers histograms by incrementing current power count */ + ipower = av_clip(HIST_POS(loudness), 0, HIST_SIZE - 1); + integ->histogram[ipower].count++; + + /* compute relative threshold and get its position in the histogram */ + integ->sum_kept_powers += power; + integ->nb_kept_powers++; + relative_threshold = integ->sum_kept_powers / integ->nb_kept_powers; + if (!relative_threshold) + relative_threshold = 1e-12; + integ->rel_threshold = LOUDNESS(relative_threshold) + gate_thres; + gate_hist_pos = av_clip(HIST_POS(integ->rel_threshold), 0, HIST_SIZE - 1); + + return gate_hist_pos; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + int i, ch, idx_insample; + AVFilterContext *ctx = inlink->dst; + EBUR128Context *ebur128 = ctx->priv; + const int nb_channels = ebur128->nb_channels; + const int nb_samples = insamples->nb_samples; + const double *samples = (double *)insamples->data[0]; + AVFrame *pic = ebur128->outpicref; + + for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) { + const int bin_id_400 = ebur128->i400.cache_pos; + const int bin_id_3000 = ebur128->i3000.cache_pos; + +#define MOVE_TO_NEXT_CACHED_ENTRY(time) do { \ + ebur128->i##time.cache_pos++; \ + if (ebur128->i##time.cache_pos == I##time##_BINS) { \ + ebur128->i##time.filled = 1; \ + ebur128->i##time.cache_pos = 0; \ + } \ +} while (0) + + MOVE_TO_NEXT_CACHED_ENTRY(400); + MOVE_TO_NEXT_CACHED_ENTRY(3000); + + for (ch = 0; ch < nb_channels; ch++) { + double bin; + + ebur128->x[ch * 3] = *samples++; // set X[i] + + if (!ebur128->ch_weighting[ch]) + continue; + + /* Y[i] = X[i]*b0 + X[i-1]*b1 + X[i-2]*b2 - Y[i-1]*a1 - Y[i-2]*a2 */ +#define FILTER(Y, X, name) do { \ + double *dst = ebur128->Y + ch*3; \ + double *src = ebur128->X + ch*3; \ + dst[2] = dst[1]; \ + dst[1] = dst[0]; \ + dst[0] = src[0]*name##_B0 + src[1]*name##_B1 + src[2]*name##_B2 \ + - dst[1]*name##_A1 - dst[2]*name##_A2; \ +} while (0) + + // TODO: merge both filters in one? + FILTER(y, x, PRE); // apply pre-filter + ebur128->x[ch * 3 + 2] = ebur128->x[ch * 3 + 1]; + ebur128->x[ch * 3 + 1] = ebur128->x[ch * 3 ]; + FILTER(z, y, RLB); // apply RLB-filter + + bin = ebur128->z[ch * 3] * ebur128->z[ch * 3]; + + /* add the new value, and limit the sum to the cache size (400ms or 3s) + * by removing the oldest one */ + ebur128->i400.sum [ch] = ebur128->i400.sum [ch] + bin - ebur128->i400.cache [ch][bin_id_400]; + ebur128->i3000.sum[ch] = ebur128->i3000.sum[ch] + bin - ebur128->i3000.cache[ch][bin_id_3000]; + + /* override old cache entry with the new value */ + ebur128->i400.cache [ch][bin_id_400 ] = bin; + ebur128->i3000.cache[ch][bin_id_3000] = bin; + } + + /* For integrated loudness, gating blocks are 400ms long with 75% + * overlap (see BS.1770-2 p5), so a re-computation is needed each 100ms + * (4800 samples at 48kHz). */ + if (++ebur128->sample_count == 4800) { + double loudness_400, loudness_3000; + double power_400 = 1e-12, power_3000 = 1e-12; + AVFilterLink *outlink = ctx->outputs[0]; + const int64_t pts = insamples->pts + + av_rescale_q(idx_insample, (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + + ebur128->sample_count = 0; + +#define COMPUTE_LOUDNESS(m, time) do { \ + if (ebur128->i##time.filled) { \ + /* weighting sum of the last